int __intel_wakeref_get_first(struct intel_wakeref *wf)
{
intel_wakeref_t wakeref; int ret = 0;
wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm); /* * Treat get/put as different subclasses, as we may need to run * the put callback from under the shrinker and do not want to * cross-contanimate that callback with any extra work performed * upon acquiring the wakeref.
*/
mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
if (!atomic_read(&wf->count)) {
INTEL_WAKEREF_BUG_ON(wf->wakeref);
wf->wakeref = wakeref;
wakeref = NULL;
ret = wf->ops->get(wf); if (ret) {
wakeref = xchg(&wf->wakeref, NULL);
wake_up_var(&wf->wakeref); goto unlock;
}
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); if (unlikely(!atomic_dec_and_test(&wf->count))) goto unlock;
/* ops->put() must reschedule its own release on error/deferral */ if (likely(!wf->ops->put(wf))) {
INTEL_WAKEREF_BUG_ON(!wf->wakeref);
wakeref = xchg(&wf->wakeref, NULL);
wake_up_var(&wf->wakeref);
}
unlock:
mutex_unlock(&wf->mutex); if (wakeref)
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
}
/* Assume we are not in process context and so cannot sleep. */ if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
mod_delayed_work(wf->i915->unordered_wq, &wf->work,
FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags)); return;
}
if (!timeout) { if (timer_delete_sync(&wf->timer))
wakeref_auto_timeout(&wf->timer); return;
}
/* Our mission is that we only extend an already active wakeref */
assert_rpm_wakelock_held(&wf->i915->runtime_pm);
if (!refcount_inc_not_zero(&wf->count)) {
spin_lock_irqsave(&wf->lock, flags); if (!refcount_inc_not_zero(&wf->count)) {
INTEL_WAKEREF_BUG_ON(wf->wakeref);
wf->wakeref =
intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm);
refcount_set(&wf->count, 1);
}
spin_unlock_irqrestore(&wf->lock, flags);
}
/* * If we extend a pending timer, we will only get a single timer * callback and so need to cancel the local inc by running the * elided callback to keep the wf->count balanced.
*/ if (mod_timer(&wf->timer, jiffies + timeout))
wakeref_auto_timeout(&wf->timer);
}
buf = kmalloc(buf_size, GFP_NOWAIT); if (!buf) return;
count = ref_tracker_dir_snprint(dir, buf, buf_size); if (!count) goto free; /* printk does not like big buffers, so we split it */ for (sb = buf; *sb; sb = se + 1) {
se = strchrnul(sb, '\n');
drm_printf(p, "%.*s", (int)(se - sb + 1), sb); if (!*se) break;
} if (count >= buf_size)
drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n",
count + 1 - buf_size);
free:
kfree(buf);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.