staticbool can_release_pages(struct drm_i915_gem_object *obj)
{ /* Consider only shrinkable objects. */ if (!i915_gem_object_is_shrinkable(obj)) returnfalse;
/* * We can only return physical pages to the system if we can either * discard the contents (because the user has marked them as being * purgeable) or if we can move their contents out to swap.
*/ return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
if (!(flags & I915_SHRINK_ACTIVE))
shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT;
if (flags & I915_SHRINK_WRITEBACK)
shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK;
return obj->ops->shrink(obj, shrink_flags);
}
return 0;
}
/** * i915_gem_shrink - Shrink buffer object caches * @ww: i915 gem ww acquire ctx, or NULL * @i915: i915 device * @target: amount of memory to make available, in pages * @nr_scanned: optional output for number of pages scanned (incremental) * @shrink: control flags for selecting cache types * * This function is the main interface to the shrinker. It will try to release * up to @target pages of main memory backing storage from buffer objects. * Selection of the specific caches can be done with @flags. This is e.g. useful * when purgeable objects should be removed from caches preferentially. * * Note that it's not guaranteed that released amount is actually available as * free system memory - the pages might still be in-used to due to other reasons * (like cpu mmaps) or the mm core has reused them before we could grab them. * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). * * Also note that any kind of pinning (both per-vma address space pins and * backing storage pins at the buffer object level) result in the shrinker code * having to skip the object. * * Returns: * The number of pages of backing storage actually released.
*/ unsignedlong
i915_gem_shrink(struct i915_gem_ww_ctx *ww, struct drm_i915_private *i915, unsignedlong target, unsignedlong *nr_scanned, unsignedint shrink)
{ conststruct { struct list_head *list; unsignedint bit;
} phases[] = {
{ &i915->mm.purge_list, ~0u },
{
&i915->mm.shrink_list,
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
},
{ NULL, 0 },
}, *phase;
intel_wakeref_t wakeref = NULL; unsignedlong count = 0; unsignedlong scanned = 0; int err = 0, i = 0; struct intel_gt *gt;
/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */ bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
trace_i915_gem_shrink(i915, target, shrink);
/* * Unbinding of objects will require HW access; Let us not wake the * device just to recover a little memory. If absolutely necessary, * we will force the wake during oom-notifier.
*/ if (shrink & I915_SHRINK_BOUND) {
wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); if (!wakeref)
shrink &= ~I915_SHRINK_BOUND;
}
/* * When shrinking the active list, we should also consider active * contexts. Active contexts are pinned until they are retired, and * so can not be simply unbound to retire and unpin their pages. To * shrink the contexts, we must wait until the gpu is idle and * completed its switch to the kernel context. In short, we do * not have a good mechanism for idling a specific context, but * what we can do is give them a kick so that we do not keep idle * contexts around longer than is necessary.
*/ if (shrink & I915_SHRINK_ACTIVE) {
for_each_gt(gt, i915, i) /* Retire requests to unpin all idle contexts */
intel_gt_retire_requests(gt);
}
/* * As we may completely rewrite the (un)bound list whilst unbinding * (due to retiring requests) we have to strictly process only * one element of the list at the time, and recheck the list * on every iteration. * * In particular, we must hold a reference whilst removing the * object as we may end up waiting for and/or retiring the objects. * This might release the final reference (held by the active list) * and result in the object being freed from under us. This is * similar to the precautions the eviction code must take whilst * removing objects. * * Also note that although these lists do not hold a reference to * the object we can safely grab one here: The final object * unreferencing and the bound_list are both protected by the * dev->struct_mutex and so we won't ever be able to observe an * object on the bound_list with a reference count equals 0.
*/ for (phase = phases; phase->list; phase++) { struct list_head still_in_list; struct drm_i915_gem_object *obj; unsignedlong flags;
if ((shrink & phase->bit) == 0) continue;
INIT_LIST_HEAD(&still_in_list);
/* * We serialize our access to unreferenced objects through * the use of the struct_mutex. While the objects are not * yet freed (due to RCU then a workqueue) we still want * to be able to shrink their pages, so they remain on * the unbound/bound list until actually freed.
*/
spin_lock_irqsave(&i915->mm.obj_lock, flags); while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
mm.link))) {
list_move_tail(&obj->mm.link, &still_in_list);
if (shrink & I915_SHRINK_VMAPS &&
!is_vmalloc_addr(obj->mm.mapping)) continue;
if (!(shrink & I915_SHRINK_ACTIVE) &&
i915_gem_object_is_framebuffer(obj)) continue;
if (!can_release_pages(obj)) continue;
if (!kref_get_unless_zero(&obj->base.refcount)) continue;
/* May arrive from get_pages on another bo */ if (!ww) { if (!i915_gem_object_trylock(obj, NULL)) goto skip;
} else {
err = i915_gem_object_lock(obj, ww); if (err) goto skip;
}
spin_lock_irqsave(&i915->mm.obj_lock, flags); if (err) break;
}
list_splice_tail(&still_in_list, phase->list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags); if (err) break;
}
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
if (err) return err;
if (nr_scanned)
*nr_scanned += scanned; return count;
}
/** * i915_gem_shrink_all - Shrink buffer object caches completely * @i915: i915 device * * This is a simple wrapper around i915_gem_shrink() to aggressively shrink all * caches completely. It also first waits for and retires all outstanding * requests to also be able to release backing storage for active objects. * * This should only be used in code to intentionally quiescent the gpu or as a * last-ditch effort when memory seems to have run out. * * Returns: * The number of pages of backing storage actually released.
*/ unsignedlong i915_gem_shrink_all(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref; unsignedlong freed = 0;
/* * Update our preferred vmscan batch size for the next pass. * Our rough guess for an effective batch size is roughly 2 * available GEM objects worth of pages. That is we don't want * the shrinker to fire, until it is worth the cost of freeing an * entire GEM object.
*/ if (num_objects) { unsignedlong avg = 2 * count / num_objects;
/* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not * being pointed to by hardware.
*/
available = unevictable = 0;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT; else
available += obj->base.size >> PAGE_SHIFT;
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (freed_pages || available)
pr_info("Purging GPU memory, %lu pages freed, " "%lu pages still pinned, %lu pages left available.\n",
freed_pages, unevictable, available);
/** * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By * default all object types that support shrinking(see IS_SHRINKABLE), will also * make the object visible to the shrinker after allocating the system memory * pages. * @obj: The GEM object. * * This is typically used for special kernel internal objects that can't be * easily processed by the shrinker, like if they are perma-pinned.
*/ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{ struct drm_i915_private *i915 = obj_to_i915(obj); unsignedlong flags;
/* * We can only be called while the pages are pinned or when * the pages are released. If pinned, we should only be called * from a single caller under controlled conditions; and on release * only one caller may release us. Neither the two may cross.
*/ if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0)) return;
/** * __i915_gem_object_make_shrinkable - Move the object to the tail of the * shrinkable list. Objects on this list might be swapped out. Used with * WILLNEED objects. * @obj: The GEM object. * * DO NOT USE. This is intended to be called on very special objects that don't * yet have mm.pages, but are guaranteed to have potentially reclaimable pages * underneath.
*/ void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
{
___i915_gem_object_make_shrinkable(obj,
&obj_to_i915(obj)->mm.shrink_list);
}
/** * __i915_gem_object_make_purgeable - Move the object to the tail of the * purgeable list. Objects on this list might be swapped out. Used with * DONTNEED objects. * @obj: The GEM object. * * DO NOT USE. This is intended to be called on very special objects that don't * yet have mm.pages, but are guaranteed to have potentially reclaimable pages * underneath.
*/ void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
{
___i915_gem_object_make_shrinkable(obj,
&obj_to_i915(obj)->mm.purge_list);
}
/** * i915_gem_object_make_shrinkable - Move the object to the tail of the * shrinkable list. Objects on this list might be swapped out. Used with * WILLNEED objects. * @obj: The GEM object. * * MUST only be called on objects which have backing pages. * * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
*/ void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
__i915_gem_object_make_shrinkable(obj);
}
/** * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable * list. Used with DONTNEED objects. Unlike with shrinkable objects, the * shrinker will attempt to discard the backing pages, instead of trying to swap * them out. * @obj: The GEM object. * * MUST only be called on objects which have backing pages. * * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
*/ void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
__i915_gem_object_make_purgeable(obj);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.