/* Returns the next zone at or below highest_zoneidx in a zonelist */ struct zoneref *__next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx,
nodemask_t *nodes)
{ /* * Find the next suitable zone to use for the allocation. * Only filter based on nodemask if it's set
*/ if (unlikely(nodes == NULL)) while (zonelist_zone_idx(z) > highest_zoneidx)
z++; else while (zonelist_zone_idx(z) > highest_zoneidx ||
(zonelist_zone(z) && !zref_in_nodemask(z, nodes)))
z++;
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]); /* * The "Unevictable LRU" is imaginary: though its size is maintained, * it is never scanned, and unevictable pages are not threaded on it * (so that their lru fields can be reused to hold mlock_count). * Poison its list head, so that any operations on it would crash.
*/
list_del(&lruvec->lists[LRU_UNEVICTABLE]);
lru_gen_init_lruvec(lruvec);
}
#ifdefined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{ unsignedlong old_flags, flags; int last_cpupid;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.