do { struct intel_timeline *next = xchg(&tl->retire, NULL);
/* * Our goal here is to retire _idle_ timelines as soon as * possible (as they are idle, we do not expect userspace * to be cleaning up anytime soon). * * If the timeline is currently locked, either it is being * retired elsewhere or about to be!
*/ if (mutex_trylock(&tl->mutex)) {
retire_requests(tl);
mutex_unlock(&tl->mutex);
}
intel_timeline_put(tl);
GEM_BUG_ON(!next);
tl = ptr_mask_bits(next, 1);
} while (tl);
}
/* * We open-code a llist here to include the additional tag [BIT(0)] * so that we know when the timeline is already on a * retirement queue: either this engine or another.
*/
if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */ returnfalse;
intel_timeline_get(tl);
first = READ_ONCE(engine->retire); do
tl->retire = ptr_pack_bits(first, 1, 1); while (!try_cmpxchg(&engine->retire, &first, tl));
return !first;
}
void intel_engine_add_retire(struct intel_engine_cs *engine, struct intel_timeline *tl)
{ /* We don't deal well with the engine disappearing beneath us */
GEM_BUG_ON(intel_engine_is_virtual(engine));
if (add_retire(engine, tl))
queue_work(engine->i915->unordered_wq, &engine->retire_work);
}
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout, long *remaining_timeout)
{ struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl, *tn; unsignedlong active_count = 0;
LIST_HEAD(free);
flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { if (!mutex_trylock(&tl->mutex)) {
active_count++; /* report busy to caller, try again? */ continue;
}
intel_timeline_get(tl);
GEM_BUG_ON(!atomic_read(&tl->active_count));
atomic_inc(&tl->active_count); /* pin the list element */
spin_unlock(&timelines->lock);
if (timeout > 0) { struct dma_fence *fence;
fence = i915_active_fence_get(&tl->last_request); if (fence) {
mutex_unlock(&tl->mutex);
/* Retirement is best effort */ if (!mutex_trylock(&tl->mutex)) {
active_count++; goto out_active;
}
}
}
if (!retire_requests(tl))
active_count++;
mutex_unlock(&tl->mutex);
out_active: spin_lock(&timelines->lock);
/* Resume list iteration after reacquiring spinlock */
list_safe_reset_next(tl, tn, link); if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
/* Defer the final release to after the spinlock */ if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(atomic_read(&tl->active_count));
list_add(&tl->link, &free);
}
}
spin_unlock(&timelines->lock);
void intel_gt_fini_requests(struct intel_gt *gt)
{ /* Wait until the work is marked as finished before unloading! */
cancel_delayed_work_sync(>->requests.retire_work);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.