staticinlinevoid assert_vma_held_evict(conststruct i915_vma *vma)
{ /* * We may be forced to unbind when the vm is dead, to clean it up. * This is the only exception to the requirement of the object lock * being held.
*/ if (kref_read(&vma->vm->ref))
assert_object_held_shared(vma->obj);
}
/* * Exclude global GTT VMA from holding a GT wakeref * while active, otherwise GPU never goes idle.
*/ if (!i915_vma_is_ggtt(vma)) { /* * Since we and our _retire() counterpart can be * called asynchronously, storing a wakeref tracking * handle inside struct i915_vma is not safe, and * there is no other good place for that. Hence, * use untracked variants of intel_gt_pm_get/put().
*/
intel_gt_pm_get_untracked(vma->vm->gt);
}
if (!i915_vma_is_ggtt(vma)) { /* * Since we can be called from atomic contexts, * use an async variant of intel_gt_pm_put().
*/
intel_gt_pm_put_async_untracked(vma->vm->gt);
}
/* * If the view already exists in the tree, another thread * already created a matching vma, so return the older instance * and dispose of ours.
*/
cmp = i915_vma_compare(pos, vm, view); if (cmp < 0)
p = &rb->rb_right; elseif (cmp > 0)
p = &rb->rb_left; else goto err_unlock;
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma.tree);
if (i915_vma_is_ggtt(vma)) /* * We put the GGTT vma at the start of the vma-list, followed * by the ppGGTT vma. This allows us to break early when * iterating over only the GGTT vma for an object, see * for_each_ggtt_vma()
*/
list_add(&vma->obj_link, &obj->vma.list); else
list_add_tail(&vma->obj_link, &obj->vma.list);
/** * i915_vma_instance - return the singleton instance of the VMA * @obj: parent &struct drm_i915_gem_object to be mapped * @vm: address space in which the mapping is located * @view: additional mapping requirements * * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with * the same @view characteristics. If a match is not found, one is created. * Once created, the VMA is kept until either the object is freed, or the * address space is closed. * * Returns the vma, or an error pointer.
*/ struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj, struct i915_address_space *vm, conststruct i915_gtt_view *view)
{ struct i915_vma *vma;
/* * We are about the bind the object, which must mean we have already * signaled the work to potentially clear/move the pages underneath. If * something went wrong at that stage then the object should have * unknown_state set, in which case we need to skip the bind.
*/ if (i915_gem_object_has_unknown_state(vw->obj)) return;
/** * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. * @vma: VMA to map * @pat_index: PAT index to set in PTE * @flags: flags like global or local mapping * @work: preallocated worker for allocating and binding the PTE * @vma_res: pointer to a preallocated vma resource. The resource is either * consumed or freed. * * DMA addresses are taken from the scatter-gather table of this object (or of * this VMA in case of non-default GGTT views) and PTE entries set up. * Note that DMA addresses are also the only part of the SG table we care about.
*/ int i915_vma_bind(struct i915_vma *vma, unsignedint pat_index,
u32 flags, struct i915_vma_work *work, struct i915_vma_resource *vma_res)
{
u32 bind_flags;
u32 vma_flags; int ret;
/* * Note we only want to chain up to the migration fence on * the pages (not the object itself). As we don't track that, * yet, we have to use the exclusive fence instead. * * Also note that we do not want to track the async vma as * part of the obj->resv->excl_fence as it only affects * execution and not content or object's backing store lifetime.
*/
prev = i915_active_set_exclusive(&vma->active, &work->base.dma); if (prev) {
__i915_sw_fence_await_dma_fence(&work->base.chain,
prev,
&work->cb);
dma_fence_put(prev);
}
work->base.dma.error = 0; /* enable the queue_work() */
work->obj = i915_gem_object_get(vma->obj);
} else {
ret = i915_gem_object_wait_moving_fence(vma->obj, true); if (ret) {
i915_vma_resource_free(vma->resource);
vma->resource = NULL;
ptr = READ_ONCE(vma->iomap); if (ptr == NULL) { /* * TODO: consider just using i915_gem_object_pin_map() for lmem * instead, which already supports mapping non-contiguous chunks * of pages, that way we can also drop the * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
*/ if (i915_gem_object_is_lmem(vma->obj)) {
ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
vma->obj->base.size);
} elseif (i915_vma_is_map_and_fenceable(vma)) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
i915_vma_offset(vma),
i915_vma_size(vma));
} else {
ptr = (void __iomem *)
i915_gem_object_pin_map(vma->obj, I915_MAP_WC); if (IS_ERR(ptr)) {
err = PTR_ERR(ptr); goto err;
}
ptr = page_pack_bits(ptr, 1);
}
if (ptr == NULL) {
err = -ENOMEM; goto err;
}
if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { if (page_unmask_bits(ptr))
__i915_gem_object_release_map(vma->obj); else
io_mapping_unmap(ptr);
ptr = vma->iomap;
}
}
__i915_vma_pin(vma);
err = i915_vma_pin_fence(vma); if (err) goto err_unpin;
i915_vma_set_ggtt_write(vma);
/* NB Access through the GTT requires the device to be awake. */ return page_mask_bits(ptr);
/* * On some machines we have to be careful when putting differing types * of snoopable memory together to avoid the prefetcher crossing memory * domains and dying. During vm initialisation, we decide whether or not * these constraints apply and set the drm_mm.color_adjust * appropriately.
*/ if (!i915_vm_has_cache_coloring(vma->vm)) returntrue;
/* Only valid to be called on an already inserted vma */
GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(list_empty(&node->node_list));
other = list_prev_entry(node, node_list); if (i915_node_color_differs(other, color) &&
!drm_mm_hole_follows(other)) returnfalse;
other = list_next_entry(node, node_list); if (i915_node_color_differs(other, color) &&
!drm_mm_hole_follows(node)) returnfalse;
returntrue;
}
/** * i915_vma_insert - finds a slot for the vma in its address space * @vma: the vma * @ww: An optional struct i915_gem_ww_ctx * @size: requested size in bytes (can be larger than the VMA) * @alignment: required alignment * @flags: mask of PIN_* flags to use * * First we try to allocate some free space that meets the requirements for * the VMA. Failing that, if the flags permit, it will evict an old VMA, * preferably the oldest idle entry to make room for the new VMA. * * Returns: * 0 on success, negative error code otherwise.
*/ staticint
i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags)
{ unsignedlong color, guard;
u64 start, end; int ret;
guard = vma->guard; /* retain guard across rebinds */ if (flags & PIN_OFFSET_GUARD) {
GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32));
guard = max_t(u32, guard, flags & PIN_OFFSET_MASK);
} /* * As we align the node upon insertion, but the hardware gets * node.start + guard, the easiest way to make that work is * to make the guard a multiple of the alignment size.
*/
guard = ALIGN(guard, alignment);
/* * If binding the object/GGTT view requires more space than the entire * aperture has, reject it early before evicting everything in a vain * attempt to find space.
*/ if (size > end - 2 * guard) {
drm_dbg(vma->obj->base.dev, "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
size, flags & PIN_MAPPABLE ? "mappable" : "total", end); return -ENOSPC;
}
color = 0;
if (i915_vm_has_cache_coloring(vma->vm))
color = vma->obj->pat_index;
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK; if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end)) return -EINVAL; /* * The caller knows not of the guard added by others and * requests for the offset of the start of its buffer * to be fixed, which may not be the same as the position * of the vma->node due to the guard pages.
*/ if (offset < guard || offset + size > end - guard) return -ENOSPC;
ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
size + 2 * guard,
offset - guard,
color, flags); if (ret) return ret;
} else {
size += 2 * guard; /* * We only support huge gtt pages through the 48b PPGTT, * however we also don't want to force any alignment for * objects which need to be tightly packed into the low 32bits. * * Note that we assume that GGTT are limited to 4GiB for the * foreseeable future. See also i915_ggtt_offset().
*/ if (upper_32_bits(end - 1) &&
vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
!HAS_64K_PAGES(vma->vm->i915)) { /* * We can't mix 64K and 4K PTEs in the same page-table * (2M block), and so to avoid the ugliness and * complexity of coloring we opt for just aligning 64K * objects to 2M.
*/
u64 page_alignment =
rounddown_pow_of_two(vma->page_sizes.sg |
I915_GTT_PAGE_SIZE_2M);
/* * Check we don't expand for the limited Global GTT * (mappable aperture is even more precious!). This * also checks that we exclude the aliasing-ppgtt.
*/
GEM_BUG_ON(i915_vma_is_ggtt(vma));
alignment = max(alignment, page_alignment);
if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
}
ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
size, alignment, color,
start, end, flags); if (ret) return ret;
/* * And finally now the object is completely decoupled from this * vma, we can drop its hold on the backing storage and allow * it to be reaped by the shrinker.
*/
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
}
src_idx = src_stride * (height - 1) + column + offset; for (row = 0; row < height; row++) {
st->nents++; /* * We don't need the pages, but need to initialize * the entries so the sg list can be happily traversed. * The only thing we need are DMA addresses.
*/
sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
sg_dma_address(sg) =
i915_gem_object_get_dma_address(obj, src_idx);
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
src_idx -= src_stride;
}
left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
if (!left) continue;
st->nents++;
/* * The DE ignores the PTEs for the padding tiles, the sg entry * here is just a convenience to indicate how many padding PTEs * to insert at this spot.
*/
sg_set_page(sg, NULL, left, 0);
sg_dma_address(sg) = 0;
sg_dma_len(sg) = left;
sg = sg_next(sg);
}
/* * The DE ignores the PTEs for the padding tiles, the sg entry * here is just a convenience to indicate how many padding PTEs * to insert at this spot.
*/
sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
sg_dma_address(sg) = 0;
sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
if (alignment_pad)
sg = add_padding_pages(alignment_pad, st, sg);
for (row = 0; row < height; row++) { unsignedint left = width * I915_GTT_PAGE_SIZE;
while (left) {
dma_addr_t addr; unsignedint length;
/* * We don't need the pages, but need to initialize * the entries so the sg list can be happily traversed. * The only thing we need are DMA addresses.
*/
/* * The vma->pages are only valid within the lifespan of the borrowed * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so * must be the vma->pages. A simple rule is that vma->pages must only * be accessed when the obj->mm.pages are pinned.
*/
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
case I915_GTT_VIEW_ROTATED:
pages =
intel_rotate_pages(&vma->gtt_view.rotated, vma->obj); break;
case I915_GTT_VIEW_REMAPPED:
pages =
intel_remap_pages(&vma->gtt_view.remapped, vma->obj); break;
case I915_GTT_VIEW_PARTIAL:
pages = intel_partial_pages(&vma->gtt_view, vma->obj); break;
}
if (IS_ERR(pages)) {
drm_err(&vma->vm->i915->drm, "Failed to get pages for VMA view type %u (%ld)!\n",
vma->gtt_view.type, PTR_ERR(pages)); return PTR_ERR(pages);
}
vma->pages = pages;
return 0;
}
I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
{ int err;
if (atomic_add_unless(&vma->pages_count, 1, 0)) return 0;
err = i915_gem_object_pin_pages(vma->obj); if (err) return err;
err = __i915_vma_get_pages(vma); if (err) goto err_unpin;
/* * Before we release the pages that were bound by this vma, we * must invalidate all the TLBs that may still have a reference * back to our physical address. It only needs to be done once, * so after updating the PTE to point away from the pages, record * the most recent TLB invalidation seqno, and if we have not yet * flushed the TLBs upon release, perform a full invalidation.
*/
for_each_gt(gt, vm->i915, id)
WRITE_ONCE(tlb[id],
intel_gt_next_invalidate_tlb_full(gt));
}
staticvoid __vma_put_pages(struct i915_vma *vma, unsignedint count)
{ /* We allocate under vma_get_pages, so beware the shrinker */
GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
if (atomic_sub_return(count, &vma->pages_count) == 0) { if (vma->pages != vma->obj->mm.pages) {
sg_free_table(vma->pages);
kfree(vma->pages);
}
vma->pages = NULL;
/* The upper portion of pages_count is the number of bindings */
count = atomic_read(&vma->pages_count);
count >>= I915_VMA_PAGES_BIAS;
GEM_BUG_ON(!count);
/* First try and grab the pin without rebinding the vma */ if (try_qad_pin(vma, flags)) return 0;
err = i915_vma_get_pages(vma); if (err) return err;
/* * In case of a global GTT, we must hold a runtime-pm wakeref * while global PTEs are updated. In other cases, we hold * the rpm reference while the VMA is active. Since runtime * resume may require allocations, which are forbidden inside * vm->mutex, get the first rpm wakeref outside of the mutex.
*/
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) { /* lock VM */
err = i915_vm_lock_objects(vma->vm, ww); if (err) goto err_rpm;
work = i915_vma_work(); if (!work) {
err = -ENOMEM; goto err_rpm;
}
work->vm = vma->vm;
err = i915_gem_object_get_moving_fence(vma->obj, &moving); if (err) goto err_rpm;
dma_fence_work_chain(&work->base, moving);
/* Allocate enough page directories to used PTE */ if (vma->vm->allocate_va_range) {
err = i915_vm_alloc_pt_stash(vma->vm,
&work->stash,
vma->size); if (err) goto err_fence;
err = i915_vm_map_pt_stash(vma->vm, &work->stash); if (err) goto err_fence;
}
}
/* * Differentiate between user/kernel vma inside the aliasing-ppgtt. * * We conflate the Global GTT with the user's vma when using the * aliasing-ppgtt, but it is still vitally important to try and * keep the use cases distinct. For example, userptr objects are * not allowed inside the Global GTT as that will cause lock * inversions when we have to evict them the mmu_notifier callbacks - * but they are allowed to be part of the user ppGTT which can never * be mapped. As such we try to give the distinct users of the same * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt * and i915_ppgtt separate]. * * NB this may cause us to mask real lock inversions -- while the * code is safe today, lockdep may not be able to spot future * transgressions.
*/
err = mutex_lock_interruptible_nested(&vma->vm->mutex,
!(flags & PIN_GLOBAL)); if (err) goto err_vma_res;
/* No more allocations allowed now we hold vm->mutex */
if (unlikely(i915_vma_is_closed(vma))) {
err = -ENOENT; goto err_unlock;
}
/* There should only be at most 2 active bindings (user, global) */
GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
err_remove: if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
i915_vma_detach(vma);
drm_mm_remove_node(&vma->node);
}
err_active:
i915_active_release(&vma->active);
err_unlock:
mutex_unlock(&vma->vm->mutex);
err_vma_res:
i915_vma_resource_free(vma_res);
err_fence: if (work) { /* * When pinning VMA to GGTT on CHV or BXT with VTD enabled, * commit VMA binding asynchronously to avoid risk of lock * inversion among reservation_ww locks held here and * cpu_hotplug_lock acquired from stop_machine(), which we * wrap around GGTT updates when running in those environments.
*/ if (i915_vma_is_ggtt(vma) &&
intel_vm_no_concurrent_access_wa(vma->vm->i915))
dma_fence_work_commit(&work->base); else
dma_fence_work_commit_imm(&work->base);
}
err_rpm:
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
i915_vma_put_pages(vma); return err;
}
int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{ struct i915_gem_ww_ctx ww; int err;
i915_gem_ww_ctx_init(&ww, true);
retry:
err = i915_gem_object_lock(vma->obj, &ww); if (!err)
err = i915_vma_pin_ww(vma, &ww, size, alignment, flags); if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (err != -ENOSPC) { if (!err) {
err = i915_vma_wait_for_bind(vma); if (err)
i915_vma_unpin(vma);
} return err;
}
/* Unlike i915_vma_pin, we don't take no for an answer! */
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
flush_idle_contexts(gt); if (mutex_lock_interruptible(&vm->mutex) == 0) { /* * We pass NULL ww here, as we don't want to unbind * locked objects when called from execbuf when pinning * is removed. This would probably regress badly.
*/
i915_gem_evict_vm(vm, NULL, NULL);
mutex_unlock(&vm->mutex);
}
} while (1);
}
int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u32 align, unsignedint flags)
{ struct i915_gem_ww_ctx _ww; int err;
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
if (ww) return __i915_ggtt_pin(vma, ww, align, flags);
/** * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas * @obj: i915 GEM object * This function clears scanout flags for objects ggtt vmas. These flags are set * when object is pinned for display use and this function to clear them all is * targeted to be called by frontbuffer tracking code when the frontbuffer is * about to be released.
*/ void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj)
{ struct i915_vma *vma;
staticvoid __vma_close(struct i915_vma *vma, struct intel_gt *gt)
{ /* * We defer actually closing, unbinding and destroying the VMA until * the next idle point, or if the object is freed in the meantime. By * postponing the unbind, we allow for it to be resurrected by the * client, avoiding the work required to rebind the VMA. This is * advantageous for DRI, where the client/server pass objects * between themselves, temporarily opening a local VMA to the * object, and then closing it again. The same object is then reused * on the next frame (or two, depending on the depth of the swap queue) * causing us to rebind the VMA once more. This ends up being a lot * of wasted work for the steady state.
*/
GEM_BUG_ON(i915_vma_is_closed(vma));
list_add(&vma->closed_link, >->closed_vma);
}
/* * i915_vma_destroy_locked - Remove all weak reference to the vma and put * the initial reference. * * This function should be called when it's decided the vma isn't needed * anymore. The caller must assure that it doesn't race with another lookup * plus destroy, typically by taking an appropriate reference. * * Current callsites are * - __i915_gem_object_pages_fini() * - __i915_vm_close() - Blocks the above function by taking a reference on * the object. * - __i915_vma_parked() - Blocks the above functions by taking a reference * on the vm and a reference on the object. Also takes the object lock so * destruction from __i915_vma_parked() can be blocked by holding the * object lock. Since the object lock is only allowed from within i915 with * an object refcount, holding the object lock also implicitly blocks the * vma freeing from __i915_gem_object_pages_fini(). * * Because of locks taken during destruction, a vma is also guaranteed to * stay alive while the following locks are held if it was looked up while * holding one of the locks: * - vm->mutex * - obj->vma.lock * - gt->closed_lock
*/ void i915_vma_destroy_locked(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->mutex);
/* As the GT is held idle, no vma can be reopened as we destroy them */
list_for_each_entry_safe(vma, next, &closed, closed_link) { struct drm_i915_gem_object *obj = vma->obj; struct i915_address_space *vm = vma->vm;
if (i915_gem_object_trylock(obj, NULL)) {
INIT_LIST_HEAD(&vma->closed_link);
i915_vma_destroy(vma);
i915_gem_object_unlock(obj);
} else { /* back you go.. */
spin_lock_irq(>->closed_lock);
list_add(&vma->closed_link, >->closed_vma);
spin_unlock_irq(>->closed_lock);
}
int _i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, struct dma_fence *fence, unsignedint flags)
{ struct drm_i915_gem_object *obj = vma->obj; int err;
assert_object_held(obj);
GEM_BUG_ON(!vma->pages);
if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) {
err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE); if (unlikely(err)) return err;
}
err = __i915_vma_move_to_active(vma, rq); if (unlikely(err)) return err;
/* * Reserve fences slot early to prevent an allocation after preparing * the workload and associating fences with dma_resv.
*/ if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) { struct dma_fence *curr; int idx;
if (flags & EXEC_OBJECT_WRITE) { struct intel_frontbuffer *front;
front = i915_gem_object_get_frontbuffer(obj); if (unlikely(front)) { if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
i915_active_add_request(&front->write, rq);
intel_frontbuffer_put(front);
}
}
if (fence) { struct dma_fence *curr; enum dma_resv_usage usage; int idx;
if (i915_vma_is_map_and_fenceable(vma)) { /* Force a pagefault for domain tracking on next user access */
i915_vma_revoke_mmap(vma);
/* * Check that we have flushed all writes through the GGTT * before the unbind, other due to non-strict nature of those * indirect writes they may end up referencing the GGTT PTE * after the unbind. * * Note that we may be concurrently poking at the GGTT_WRITE * bit from set-domain, as we mark all GGTT vma associated * with an object. We know this is for another vma, as we * are currently unbinding this one -- so if this vma will be * reused, it will be refaulted and have its dirty bit set * before the next write.
*/
i915_vma_flush_writes(vma);
/* release the fence reg _after_ flushing */
i915_vma_revoke_fence(vma);
/* Object backend must be async capable. */
GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
/* If vm is not open, unbind is a nop. */
vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
kref_read(&vma->vm->ref);
vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
if (!async) { if (unbind_fence) {
dma_fence_wait(unbind_fence, false);
dma_fence_put(unbind_fence);
unbind_fence = NULL;
}
vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
}
/* * Binding itself may not have completed until the unbind fence signals, * so don't drop the pages until that happens, unless the resource is * async_capable.
*/
vma_unbind_pages(vma); return unbind_fence;
}
int __i915_vma_unbind(struct i915_vma *vma)
{ int ret;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned"); return -EAGAIN;
}
/* * After confirming that no one else is pinning this vma, wait for * any laggards who may have crept in during the wait (through * a residual pin skipping the vm->mutex) to complete.
*/
ret = i915_vma_sync(vma); if (ret) return ret;
if (!drm_mm_node_allocated(&vma->node)) return NULL;
if (i915_vma_is_pinned(vma) ||
&vma->obj->mm.rsgt->table != vma->resource->bi.pages) return ERR_PTR(-EAGAIN);
/* * We probably need to replace this with awaiting the fences of the * object's dma_resv when the vma active goes away. When doing that * we need to be careful to not add the vma_resource unbind fence * immediately to the object's dma_resv, because then unbinding * the next vma from the object, in case there are many, will * actually await the unbinding of the previous vmas, which is * undesirable.
*/ if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
I915_ACTIVE_AWAIT_EXCL |
I915_ACTIVE_AWAIT_ACTIVE) < 0) { return ERR_PTR(-EBUSY);
}
fence = __i915_vma_evict(vma, true);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return fence;
}
int i915_vma_unbind(struct i915_vma *vma)
{ struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref = NULL; int err;
assert_object_held_shared(vma->obj);
/* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma); if (err) return err;
if (!drm_mm_node_allocated(&vma->node)) return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned"); return -EAGAIN;
}
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) /* XXX not always required: nop_clear_range */
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); if (err) goto out_rpm;
/* * We need the dma-resv lock since we add the * unbind fence to the dma-resv object.
*/
assert_object_held(obj);
if (!drm_mm_node_allocated(&vma->node)) return 0;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned"); return -EAGAIN;
}
if (!obj->mm.rsgt) return -EBUSY;
err = dma_resv_reserve_fences(obj->base.resv, 2); if (err) return -EBUSY;
/* * It would be great if we could grab this wakeref from the * async unbind work if needed, but we can't because it uses * kmalloc and it's in the dma-fence signalling critical path.
*/ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.