err = i915_active_acquire(&vma->active); if (err) goto err_unpin;
/* * And mark it as a globally pinned object to let the shrinker know * it cannot reclaim the object until we release it.
*/
i915_vma_make_unshrinkable(vma);
vma->obj->mm.dirty = true;
int __intel_context_do_pin_ww(struct intel_context *ce, struct i915_gem_ww_ctx *ww)
{ bool handoff = false; void *vaddr; int err = 0;
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce); if (err) return err;
}
/* * We always pin the context/ring/timeline here, to ensure a pin * refcount for __intel_context_active(), which prevent a lock * inversion of ce->pin_mutex vs dma_resv_lock().
*/
err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); if (!err)
err = i915_gem_object_lock(ce->ring->vma->obj, ww); if (!err && ce->state)
err = i915_gem_object_lock(ce->state->obj, ww); if (!err)
err = intel_context_pre_pin(ce, ww); if (err) return err;
err = ce->ops->pre_pin(ce, ww, &vaddr); if (err) goto err_ctx_unpin;
err = i915_active_acquire(&ce->active); if (err) goto err_post_unpin;
err = mutex_lock_interruptible(&ce->pin_mutex); if (err) goto err_release;
intel_engine_pm_might_get(ce->engine);
if (unlikely(intel_context_is_closed(ce))) {
err = -ENOENT; goto err_unlock;
}
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce); if (unlikely(err)) goto err_unlock;
handoff = true;
smp_mb__before_atomic(); /* flush pin before it is visible */
atomic_inc(&ce->pin_count);
}
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
trace_intel_context_do_pin(ce);
err_unlock:
mutex_unlock(&ce->pin_mutex);
err_release:
i915_active_release(&ce->active);
err_post_unpin: if (!handoff)
ce->ops->post_unpin(ce);
err_ctx_unpin:
intel_context_post_unpin(ce);
/* * Unlock the hwsp_ggtt object since it's shared. * In principle we can unlock all the global state locked above * since it's pinned and doesn't need fencing, and will * thus remain resident until it is explicitly unpinned.
*/
i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
return err;
}
int __intel_context_do_pin(struct intel_context *ce)
{ struct i915_gem_ww_ctx ww; int err;
/* * Once released, we may asynchronously drop the active reference. * As that may be the only reference keeping the context alive, * take an extra now so that it is not freed before we finish * dereferencing it.
*/
intel_context_get(ce);
intel_context_active_release(ce);
trace_intel_context_do_unpin(ce);
intel_context_put(ce);
}
/* everything should already be activated by intel_context_pre_pin() */
GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
__intel_ring_pin(ce->ring);
__intel_timeline_pin(ce->timeline);
if (ce->state) {
GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
__i915_vma_pin(ce->state);
i915_vma_make_unshrinkable(ce->state);
}
/* * Initialize fence to be complete as this is expected to be complete * unless there is a pending schedule disable outstanding.
*/
i915_sw_fence_init(&ce->guc_state.blocked,
sw_fence_dummy_notify);
i915_sw_fence_commit(&ce->guc_state.blocked);
int intel_context_prepare_remote_request(struct intel_context *ce, struct i915_request *rq)
{ struct intel_timeline *tl = ce->timeline; int err;
/* Only suitable for use in remotely modifying this context */
GEM_BUG_ON(rq->context == ce);
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ /* Queue this switch after current activity by this context. */
err = i915_active_fence_set(&tl->last_request, rq); if (err) return err;
}
/* * Guarantee context image and the timeline remains pinned until the * modifying request is retired by setting the ce activity tracker. * * But we only need to take one pin on the account of it. Or in other * words transfer the pinned ce object to tracked active request.
*/
GEM_BUG_ON(i915_active_is_idle(&ce->active)); return i915_active_add_request(&ce->active, rq);
}
/* * timeline->mutex should be the inner lock, but is used as outer lock. * Hack around this to shut up lockdep in selftests..
*/
lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
/* * We search the parent list to find an active request on the submitted * context. The parent list contains the requests for all the contexts * in the relationship so we have to do a compare of each request's * context.
*/
spin_lock_irqsave(&parent->guc_state.lock, flags);
list_for_each_entry_reverse(rq, &parent->guc_state.requests,
sched.link) { if (rq->context != ce) continue; if (i915_request_completed(rq)) break;
active = rq;
} if (active)
active = i915_request_get_rcu(active);
spin_unlock_irqrestore(&parent->guc_state.lock, flags);
return active;
}
void intel_context_bind_parent_child(struct intel_context *parent, struct intel_context *child)
{ /* * Callers responsibility to validate that this function is used * correctly but we use GEM_BUG_ON here ensure that they do.
*/
GEM_BUG_ON(intel_context_is_pinned(parent));
GEM_BUG_ON(intel_context_is_child(parent));
GEM_BUG_ON(intel_context_is_pinned(child));
GEM_BUG_ON(intel_context_is_child(child));
GEM_BUG_ON(intel_context_is_parent(child));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.