/* * Start out close to the 32b fence rollover point, so we can * catch bugs with fence comparisons.
*/
fctx->last_fence = 0xffffff00;
fctx->completed_fence = fctx->last_fence;
*fctx->fenceptr = fctx->last_fence;
bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
{ /* * Note: Check completed_fence first, as fenceptr is in a write-combine * mapping, so it will be more expensive to read.
*/ return (int32_t)(fctx->completed_fence - fence) >= 0 ||
(int32_t)(*fctx->fenceptr - fence) >= 0;
}
/* called from irq handler and workqueue (in recover path) */ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
{ unsignedlong flags;
spin_lock_irqsave(&fctx->spinlock, flags); if (fence_after(fence, fctx->completed_fence))
fctx->completed_fence = fence; if (msm_fence_completed(fctx, fctx->next_deadline_fence))
hrtimer_cancel(&fctx->deadline_timer);
spin_unlock_irqrestore(&fctx->spinlock, flags);
}
/* * Set timer to trigger boost 3ms before deadline, or * if we are already less than 3ms before the deadline * schedule boost work immediately.
*/
deadline = ktime_sub(deadline, ms_to_ktime(3));
/* * Until this point, the fence was just some pre-allocated memory, * no-one should have taken a reference to it yet.
*/
WARN_ON(kref_read(&fence->refcount));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.