/* * Page order with-respect-to which proactive compaction * calculates external fragmentation, which is used as * the "fragmentation score" of a node/zone.
*/ #ifdefined CONFIG_TRANSPARENT_HUGEPAGE #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER #elifdefined CONFIG_HUGETLBFS #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER #else #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) #endif
list_del(&page->lru); /* * Convert free pages into post allocation pages, so * that we can free them via __free_page.
*/
mark_allocated(page, order, __GFP_MOVABLE);
__free_pages(page, order); if (pfn > high_pfn)
high_pfn = pfn;
}
} return high_pfn;
}
#ifdef CONFIG_COMPACTION
/* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6
/* * Compaction is deferred when compaction fails to result in a page * allocation success. 1 << compact_defer_shift, compactions are skipped up * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
*/ staticvoid defer_compaction(struct zone *zone, int order)
{
zone->compact_considered = 0;
zone->compact_defer_shift++;
if (order < zone->compact_order_failed)
zone->compact_order_failed = order;
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
/* Returns true if compaction should be skipped this time */ staticbool compaction_deferred(struct zone *zone, int order)
{ unsignedlong defer_limit = 1UL << zone->compact_defer_shift;
if (order < zone->compact_order_failed) returnfalse;
/* Avoid possible overflow */ if (++zone->compact_considered >= defer_limit) {
zone->compact_considered = defer_limit; returnfalse;
}
trace_mm_compaction_deferred(zone, order);
returntrue;
}
/* * Update defer tracking counters after successful compaction of given order, * which means an allocation either succeeded (alloc_success == true) or is * expected to succeed.
*/ void compaction_defer_reset(struct zone *zone, int order, bool alloc_success)
{ if (alloc_success) {
zone->compact_considered = 0;
zone->compact_defer_shift = 0;
} if (order >= zone->compact_order_failed)
zone->compact_order_failed = order + 1;
trace_mm_compaction_defer_reset(zone, order);
}
/* Returns true if restarting compaction after many failures */ staticbool compaction_restarting(struct zone *zone, int order)
{ if (order < zone->compact_order_failed) returnfalse;
/* Returns true if the pageblock should be scanned for pages to isolate. */ staticinlinebool isolation_suitable(struct compact_control *cc, struct page *page)
{ if (cc->ignore_skip_hint) returntrue;
#ifdef CONFIG_SPARSEMEM /* * If the PFN falls into an offline section, return the start PFN of the * next online section. If the PFN falls into an online section or if * there is no next online section, return 0.
*/ staticunsignedlong skip_offline_sections(unsignedlong start_pfn)
{ unsignedlong start_nr = pfn_to_section_nr(start_pfn);
if (online_section_nr(start_nr)) return 0;
while (++start_nr <= __highest_present_section_nr) { if (online_section_nr(start_nr)) return section_nr_to_pfn(start_nr);
}
return 0;
}
/* * If the PFN falls into an offline section, return the end PFN of the * next online section in reverse. If the PFN falls into an online section * or if there is no next online section in reverse, return 0.
*/ staticunsignedlong skip_offline_sections_reverse(unsignedlong start_pfn)
{ unsignedlong start_nr = pfn_to_section_nr(start_pfn);
if (!start_nr || online_section_nr(start_nr)) return 0;
while (start_nr-- > 0) { if (online_section_nr(start_nr)) return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION;
}
/* * Compound pages of >= pageblock_order should consistently be skipped until * released. It is always pointless to compact pages of such order (if they are * migratable), and the pageblocks they occupy cannot contain any free pages.
*/ staticbool pageblock_skip_persistent(struct page *page)
{ if (!PageCompound(page)) returnfalse;
page = compound_head(page);
if (compound_order(page) >= pageblock_order) returntrue;
if (!page) returnfalse; if (zone != page_zone(page)) returnfalse; if (pageblock_skip_persistent(page)) returnfalse;
/* * If skip is already cleared do no further checking once the * restart points have been set.
*/ if (check_source && check_target && !get_pageblock_skip(page)) returntrue;
/* * If clearing skip for the target scanner, do not select a * non-movable pageblock as the starting point.
*/ if (!check_source && check_target &&
get_pageblock_migratetype(page) != MIGRATE_MOVABLE) returnfalse;
/* Ensure the start of the pageblock or zone is online and valid */
block_pfn = pageblock_start_pfn(pfn);
block_pfn = max(block_pfn, zone->zone_start_pfn);
block_page = pfn_to_online_page(block_pfn); if (block_page) {
page = block_page;
pfn = block_pfn;
}
/* Ensure the end of the pageblock or zone is online and valid */
block_pfn = pageblock_end_pfn(pfn) - 1;
block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
end_page = pfn_to_online_page(block_pfn); if (!end_page) returnfalse;
/* * Only clear the hint if a sample indicates there is either a * free page or an LRU page in the block. One or other condition * is necessary for the block to be a migration source/target.
*/ do { if (check_source && PageLRU(page)) {
clear_pageblock_skip(page); returntrue;
}
if (check_target && PageBuddy(page)) {
clear_pageblock_skip(page); returntrue;
}
page += (1 << PAGE_ALLOC_COSTLY_ORDER);
} while (page <= end_page);
returnfalse;
}
/* * This function is called to clear all cached information on pageblocks that * should be skipped for page isolation when the migrate and free page scanner * meet.
*/ staticvoid __reset_isolation_suitable(struct zone *zone)
{ unsignedlong migrate_pfn = zone->zone_start_pfn; unsignedlong free_pfn = zone_end_pfn(zone) - 1; unsignedlong reset_migrate = free_pfn; unsignedlong reset_free = migrate_pfn; bool source_set = false; bool free_set = false;
/* Only flush if a full compaction finished recently */ if (!zone->compact_blockskip_flush) return;
zone->compact_blockskip_flush = false;
/* * Walk the zone and update pageblock skip information. Source looks * for PageLRU while target looks for PageBuddy. When the scanner * is found, both PageBuddy and PageLRU are checked as the pageblock * is suitable as both source and target.
*/ for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
free_pfn -= pageblock_nr_pages) {
cond_resched();
/* Leave no distance if no suitable block was reset */ if (reset_migrate >= reset_free) {
zone->compact_cached_migrate_pfn[0] = migrate_pfn;
zone->compact_cached_migrate_pfn[1] = migrate_pfn;
zone->compact_cached_free_pfn = free_pfn;
}
}
void reset_isolation_suitable(pg_data_t *pgdat)
{ int zoneid;
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { struct zone *zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue;
__reset_isolation_suitable(zone);
}
}
/* * Sets the pageblock skip bit if it was clear. Note that this is a hint as * locks are not required for read/writers. Returns true if it was already set.
*/ staticbool test_and_set_skip(struct compact_control *cc, struct page *page)
{ bool skip;
/* Do not update if skip hint is being ignored */ if (cc->ignore_skip_hint) returnfalse;
skip = get_pageblock_skip(page); if (!skip && !cc->no_set_skip_hint)
set_pageblock_skip(page);
/* Set for isolation rather than compaction */ if (cc->no_set_skip_hint) return;
pfn = pageblock_end_pfn(pfn);
/* Update where async and sync compaction should restart */ if (pfn > zone->compact_cached_migrate_pfn[0])
zone->compact_cached_migrate_pfn[0] = pfn; if (cc->mode != MIGRATE_ASYNC &&
pfn > zone->compact_cached_migrate_pfn[1])
zone->compact_cached_migrate_pfn[1] = pfn;
}
/* * If no pages were isolated then mark this pageblock to be skipped in the * future. The information is later cleared by __reset_isolation_suitable().
*/ staticvoid update_pageblock_skip(struct compact_control *cc, struct page *page, unsignedlong pfn)
{ struct zone *zone = cc->zone;
/* * Compaction requires the taking of some coarse locks that are potentially * very heavily contended. For async compaction, trylock and record if the * lock is contended. The lock will still be acquired but compaction will * abort when the current block is finished regardless of success rate. * Sync compaction acquires the lock. * * Always returns true which makes it easier to track lock state in callers.
*/ staticbool compact_lock_irqsave(spinlock_t *lock, unsignedlong *flags, struct compact_control *cc)
__acquires(lock)
{ /* Track if the lock is contended in async mode */ if (cc->mode == MIGRATE_ASYNC && !cc->contended) { if (spin_trylock_irqsave(lock, *flags)) returntrue;
cc->contended = true;
}
spin_lock_irqsave(lock, *flags); returntrue;
}
/* * Compaction requires the taking of some coarse locks that are potentially * very heavily contended. The lock should be periodically unlocked to avoid * having disabled IRQs for a long time, even when there is nobody waiting on * the lock. It might also be that allowing the IRQs will result in * need_resched() becoming true. If scheduling is needed, compaction schedules. * Either compaction type will also abort if a fatal signal is pending. * In either case if the lock was locked, it is dropped and not regained. * * Returns true if compaction should abort due to fatal signal pending. * Returns false when compaction can continue.
*/ staticbool compact_unlock_should_abort(spinlock_t *lock, unsignedlong flags, bool *locked, struct compact_control *cc)
{ if (*locked) {
spin_unlock_irqrestore(lock, flags);
*locked = false;
}
if (fatal_signal_pending(current)) {
cc->contended = true; returntrue;
}
cond_resched();
returnfalse;
}
/* * Isolate free pages onto a private freelist. If @strict is true, will abort * returning 0 on any invalid PFNs or non-free pages inside of the pageblock * (even though it may still end up isolating some pages).
*/ staticunsignedlong isolate_freepages_block(struct compact_control *cc, unsignedlong *start_pfn, unsignedlong end_pfn, struct list_head *freelist, unsignedint stride, bool strict)
{ int nr_scanned = 0, total_isolated = 0; struct page *page; unsignedlong flags = 0; bool locked = false; unsignedlong blockpfn = *start_pfn; unsignedint order;
/* Strict mode is for isolation, speed is secondary */ if (strict)
stride = 1;
page = pfn_to_page(blockpfn);
/* Isolate free pages. */ for (; blockpfn < end_pfn; blockpfn += stride, page += stride) { int isolated;
/* * Periodically drop the lock (if held) regardless of its * contention, to give chance to IRQs. Abort if fatal signal * pending.
*/ if (!(blockpfn % COMPACT_CLUSTER_MAX)
&& compact_unlock_should_abort(&cc->zone->lock, flags,
&locked, cc)) break;
nr_scanned++;
/* * For compound pages such as THP and hugetlbfs, we can save * potentially a lot of iterations if we skip them at once. * The check is racy, but we can consider only valid values * and the only danger is skipping too much.
*/ if (PageCompound(page)) { constunsignedint order = compound_order(page);
/* If we already hold the lock, we can skip some rechecking. */ if (!locked) {
locked = compact_lock_irqsave(&cc->zone->lock,
&flags, cc);
/* Recheck this is a buddy page under lock */ if (!PageBuddy(page)) goto isolate_fail;
}
/* Found a free page, will break it into order-0 pages */
order = buddy_order(page);
isolated = __isolate_free_page(page, order); if (!isolated) break;
set_page_private(page, order);
/* Record how far we have got within the block */
*start_pfn = blockpfn;
/* * If strict isolation is requested by CMA then check that all the * pages requested were isolated. If there were any failures, 0 is * returned and CMA will fail.
*/ if (strict && blockpfn < end_pfn)
total_isolated = 0;
cc->total_free_scanned += nr_scanned; if (total_isolated)
count_compact_events(COMPACTISOLATED, total_isolated); return total_isolated;
}
/** * isolate_freepages_range() - isolate free pages. * @cc: Compaction control structure. * @start_pfn: The first PFN to start isolating. * @end_pfn: The one-past-last PFN. * * Non-free pages, invalid PFNs, or zone boundaries within the * [start_pfn, end_pfn) range are considered errors, cause function to * undo its actions and return zero. cc->freepages[] are empty. * * Otherwise, function returns one-past-the-last PFN of isolated page * (which may be greater then end_pfn if end fell in a middle of * a free page). cc->freepages[] contain free pages isolated.
*/ unsignedlong
isolate_freepages_range(struct compact_control *cc, unsignedlong start_pfn, unsignedlong end_pfn)
{ unsignedlong isolated, pfn, block_start_pfn, block_end_pfn; int order;
for (order = 0; order < NR_PAGE_ORDERS; order++)
INIT_LIST_HEAD(&cc->freepages[order]);
for (; pfn < end_pfn; pfn += isolated,
block_start_pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) { /* Protect pfn from changing by isolate_freepages_block */ unsignedlong isolate_start_pfn = pfn;
/* * pfn could pass the block_end_pfn if isolated freepage * is more than pageblock order. In this case, we adjust * scanning range to right one.
*/ if (pfn >= block_end_pfn) {
block_start_pfn = pageblock_start_pfn(pfn);
block_end_pfn = pageblock_end_pfn(pfn);
}
block_end_pfn = min(block_end_pfn, end_pfn);
if (!pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, cc->zone)) break;
/* * In strict mode, isolate_freepages_block() returns 0 if * there are any holes in the block (ie. invalid PFNs or * non-free pages).
*/ if (!isolated) break;
/* * If we managed to isolate pages, it is always (1 << n) * * pageblock_nr_pages for some non-negative n. (Max order * page may span two pageblocks).
*/
}
/* We don't use freelists for anything. */ return pfn;
}
/* Similar to reclaim, but different enough that they don't share logic */ staticbool too_many_isolated(struct compact_control *cc)
{
pg_data_t *pgdat = cc->zone->zone_pgdat; bool too_many;
/* * Allow GFP_NOFS to isolate past the limit set for regular * compaction runs. This prevents an ABBA deadlock when other * compactors have already isolated to the limit, but are * blocked on filesystem locks held by the GFP_NOFS thread.
*/ if (cc->gfp_mask & __GFP_FS) {
inactive >>= 3;
active >>= 3;
}
/** * skip_isolation_on_order() - determine when to skip folio isolation based on * folio order and compaction target order * @order: to-be-isolated folio order * @target_order: compaction target order * * This avoids unnecessary folio isolations during compaction.
*/ staticbool skip_isolation_on_order(int order, int target_order)
{ /* * Unless we are performing global compaction (i.e., * is_via_compact_memory), skip any folios that are larger than the * target order: we wouldn't be here if we'd have a free folio with * the desired target_order, so migrating this folio would likely fail * later.
*/ if (!is_via_compact_memory(target_order) && order >= target_order) returntrue; /* * We limit memory compaction to pageblocks and won't try * creating free blocks of memory that are larger than that.
*/ return order >= pageblock_order;
}
/** * isolate_migratepages_block() - isolate all migrate-able pages within * a single pageblock * @cc: Compaction control structure. * @low_pfn: The first PFN to isolate * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock * @mode: Isolation mode to be used. * * Isolate all pages that can be migrated from the range specified by * [low_pfn, end_pfn). The range is expected to be within same pageblock. * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, * -ENOMEM in case we could not allocate a page, or 0. * cc->migrate_pfn will contain the next pfn to scan. * * The pages are isolated on cc->migratepages list (not required to be empty), * and cc->nr_migratepages is updated accordingly.
*/ staticint
isolate_migratepages_block(struct compact_control *cc, unsignedlong low_pfn, unsignedlong end_pfn, isolate_mode_t mode)
{
pg_data_t *pgdat = cc->zone->zone_pgdat; unsignedlong nr_scanned = 0, nr_isolated = 0; struct lruvec *lruvec; unsignedlong flags = 0; struct lruvec *locked = NULL; struct folio *folio = NULL; struct page *page = NULL, *valid_page = NULL; struct address_space *mapping; unsignedlong start_pfn = low_pfn; bool skip_on_failure = false; unsignedlong next_skip_pfn = 0; bool skip_updated = false; int ret = 0;
cc->migrate_pfn = low_pfn;
/* * Ensure that there are not too many pages isolated from the LRU * list by either parallel reclaimers or compaction. If there are, * delay for some time until fewer pages are isolated
*/ while (unlikely(too_many_isolated(cc))) { /* stop isolation if there are still pages not migrated */ if (cc->nr_migratepages) return -EAGAIN;
/* async migration should just abort */ if (cc->mode == MIGRATE_ASYNC) return -EAGAIN;
/* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { bool is_dirty, is_unevictable;
if (skip_on_failure && low_pfn >= next_skip_pfn) { /* * We have isolated all migration candidates in the * previous order-aligned block, and did not skip it due * to failure. We should migrate the pages now and * hopefully succeed compaction.
*/ if (nr_isolated) break;
/* * We failed to isolate in the previous order-aligned * block. Set the new boundary to the end of the * current block. Note we can't simply increase * next_skip_pfn by 1 << order, as low_pfn might have * been incremented by a higher number due to skipping * a compound or a high-order buddy page in the * previous loop iteration.
*/
next_skip_pfn = block_end_pfn(low_pfn, cc->order);
}
/* * Periodically drop the lock (if held) regardless of its * contention, to give chance to IRQs. Abort completely if * a fatal signal is pending.
*/ if (!(low_pfn % COMPACT_CLUSTER_MAX)) { if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
if (fatal_signal_pending(current)) {
cc->contended = true;
ret = -EINTR;
goto fatal_pending;
}
cond_resched();
}
nr_scanned++;
page = pfn_to_page(low_pfn);
/* * Check if the pageblock has already been marked skipped. * Only the first PFN is checked as the caller isolates * COMPACT_CLUSTER_MAX at a time so the second call must * not falsely conclude that the block should be skipped.
*/ if (!valid_page && (pageblock_aligned(low_pfn) ||
low_pfn == cc->zone->zone_start_pfn)) { if (!isolation_suitable(cc, page)) {
low_pfn = end_pfn;
folio = NULL; goto isolate_abort;
}
valid_page = page;
}
if (PageHuge(page)) { constunsignedint order = compound_order(page); /* * skip hugetlbfs if we are not compacting for pages * bigger than its order. THPs and other compound pages * are handled below.
*/ if (!cc->alloc_contig) {
if (order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
} goto isolate_fail;
} /* for alloc_contig case */ if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
folio = page_folio(page);
ret = isolate_or_dissolve_huge_folio(folio, &cc->migratepages);
/* * Fail isolation in case isolate_or_dissolve_huge_folio() * reports an error. In case of -ENOMEM, abort right away.
*/ if (ret < 0) { /* Do not report -EBUSY down the chain */ if (ret == -EBUSY)
ret = 0;
low_pfn += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1; goto isolate_fail;
}
if (folio_test_hugetlb(folio)) { /* * Hugepage was successfully isolated and placed * on the cc->migratepages list.
*/
low_pfn += folio_nr_pages(folio) - 1; goto isolate_success_no_list;
}
/* * Ok, the hugepage was dissolved. Now these pages are * Buddy and cannot be re-allocated because they are * isolated. Fall-through as the check below handles * Buddy pages.
*/
}
/* * Skip if free. We read page order here without zone lock * which is generally unsafe, but the race window is small and * the worst thing that can happen is that we skip some * potential isolation targets.
*/ if (PageBuddy(page)) { unsignedlong freepage_order = buddy_order_unsafe(page);
/* * Without lock, we cannot be sure that what we got is * a valid page order. Consider only values in the * valid order range to prevent low_pfn overflow.
*/ if (freepage_order > 0 && freepage_order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << freepage_order) - 1;
nr_scanned += (1UL << freepage_order) - 1;
} continue;
}
/* * Regardless of being on LRU, compound pages such as THP * (hugetlbfs is handled above) are not to be compacted unless * we are attempting an allocation larger than the compound * page size. We can potentially save a lot of iterations if we * skip them at once. The check is racy, but we can consider * only valid values and the only danger is skipping too much.
*/ if (PageCompound(page) && !cc->alloc_contig) { constunsignedint order = compound_order(page);
/* Skip based on page order and compaction target order. */ if (skip_isolation_on_order(order, cc->order)) { if (order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
} goto isolate_fail;
}
}
/* * Check may be lockless but that's ok as we recheck later. * It's possible to migrate LRU and non-lru movable pages. * Skip any other type of page
*/ if (!PageLRU(page)) { /* Isolation code will deal with any races. */ if (unlikely(page_has_movable_ops(page)) &&
!PageMovableOpsIsolated(page)) { if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
/* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it.
*/
folio = folio_get_nontail_page(page); if (unlikely(!folio)) goto isolate_fail;
/* * Migration will fail if an anonymous page is pinned in memory, * so avoid taking lru_lock and isolating it unnecessarily in an * admittedly racy check.
*/
mapping = folio_mapping(folio); if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio)) goto isolate_fail_put;
/* * Only allow to migrate anonymous pages in GFP_NOFS context * because those do not depend on fs locks.
*/ if (!(cc->gfp_mask & __GFP_FS) && mapping) goto isolate_fail_put;
/* Only take pages on LRU: a check now makes later tests safe */ if (!folio_test_lru(folio)) goto isolate_fail_put;
is_unevictable = folio_test_unevictable(folio);
/* Compaction might skip unevictable pages but CMA takes them */ if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable) goto isolate_fail_put;
/* * To minimise LRU disruption, the caller can indicate with * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages * it will be able to migrate without blocking - clean pages * for the most part. PageWriteback would require blocking.
*/ if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio)) goto isolate_fail_put;
/* * Only folios without mappings or that have * a ->migrate_folio callback are possible to migrate * without blocking. * * Folios from inaccessible mappings are not migratable. * * However, we can be racing with truncation, which can * free the mapping that we need to check. Truncation * holds the folio lock until after the folio is removed * from the page so holding it ourselves is sufficient. * * To avoid locking the folio just to check inaccessible, * assume every inaccessible folio is also unevictable, * which is a cheaper test. If our assumption goes * wrong, it's not a correctness bug, just potentially * wasted cycles.
*/ if (!folio_trylock(folio)) goto isolate_fail_put;
/* * Try get exclusive access under lock. If marked for * skip, the scan is aborted unless the current context * is a rescan to reach the end of the pageblock.
*/ if (!skip_updated && valid_page) {
skip_updated = true; if (test_and_set_skip(cc, valid_page) &&
!cc->finish_pageblock) {
low_pfn = end_pfn; goto isolate_abort;
}
}
/* * Check LRU folio order under the lock
*/ if (unlikely(skip_isolation_on_order(folio_order(folio),
cc->order) &&
!cc->alloc_contig)) {
low_pfn += folio_nr_pages(folio) - 1;
nr_scanned += folio_nr_pages(folio) - 1;
folio_set_lru(folio); goto isolate_fail_put;
}
}
/* The folio is taken off the LRU */ if (folio_test_large(folio))
low_pfn += folio_nr_pages(folio) - 1;
/* * Avoid isolating too much unless this block is being * fully scanned (e.g. dirty/writeback pages, parallel allocation) * or a lock is contended. For contention, isolate quickly to * potentially remove one source of contention.
*/ if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
!cc->finish_pageblock && !cc->contended) {
++low_pfn; break;
}
continue;
isolate_fail_put: /* Avoid potential deadlock in freeing page under lru_lock */ if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
folio_put(folio);
isolate_fail: if (!skip_on_failure && ret != -ENOMEM) continue;
/* * We have isolated some pages, but then failed. Release them * instead of migrating, as we cannot form the cc->order buddy * page anyway.
*/ if (nr_isolated) { if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
putback_movable_pages(&cc->migratepages);
cc->nr_migratepages = 0;
nr_isolated = 0;
}
if (low_pfn < next_skip_pfn) {
low_pfn = next_skip_pfn - 1; /* * The check near the loop beginning would have updated * next_skip_pfn too, but this is a bit simpler.
*/
next_skip_pfn += 1UL << cc->order;
}
if (ret == -ENOMEM) break;
}
/* * The PageBuddy() check could have potentially brought us outside * the range to be scanned.
*/ if (unlikely(low_pfn > end_pfn))
low_pfn = end_pfn;
folio = NULL;
isolate_abort: if (locked)
unlock_page_lruvec_irqrestore(locked, flags); if (folio) {
folio_set_lru(folio);
folio_put(folio);
}
/* * Update the cached scanner pfn once the pageblock has been scanned. * Pages will either be migrated in which case there is no point * scanning in the near future or migration failed in which case the * failure reason may persist. The block is marked for skipping if * there were no pages isolated in the block or if the block is * rescanned twice in a row.
*/ if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { if (!cc->no_set_skip_hint && valid_page && !skip_updated)
set_pageblock_skip(valid_page);
update_cached_migrate(cc, low_pfn);
}
fatal_pending:
cc->total_migrate_scanned += nr_scanned; if (nr_isolated)
count_compact_events(COMPACTISOLATED, nr_isolated);
cc->migrate_pfn = low_pfn;
return ret;
}
/** * isolate_migratepages_range() - isolate migrate-able pages in a PFN range * @cc: Compaction control structure. * @start_pfn: The first PFN to start isolating. * @end_pfn: The one-past-last PFN. * * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM * in case we could not allocate a page, or 0.
*/ int
isolate_migratepages_range(struct compact_control *cc, unsignedlong start_pfn, unsignedlong end_pfn)
{ unsignedlong pfn, block_start_pfn, block_end_pfn; int ret = 0;
/* Scan block by block. First and last block may be incomplete */
pfn = start_pfn;
block_start_pfn = pageblock_start_pfn(pfn); if (block_start_pfn < cc->zone->zone_start_pfn)
block_start_pfn = cc->zone->zone_start_pfn;
block_end_pfn = pageblock_end_pfn(pfn);
/* Returns true if the page is within a block suitable for migration to */ staticbool suitable_migration_target(struct compact_control *cc, struct page *page)
{ /* If the page is a large free page, then disallow migration */ if (PageBuddy(page)) { int order = cc->order > 0 ? cc->order : pageblock_order;
/* * We are checking page_order without zone->lock taken. But * the only small danger is that we skip a potentially suitable * pageblock, so it's not worth to check order for valid range.
*/ if (buddy_order_unsafe(page) >= order) returnfalse;
}
if (cc->ignore_block_suitable) returntrue;
/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ if (is_migrate_movable(get_pageblock_migratetype(page))) returntrue;
/* * Test whether the free scanner has reached the same or lower pageblock than * the migration scanner, and compaction should thus terminate.
*/ staticinlinebool compact_scanners_met(struct compact_control *cc)
{ return (cc->free_pfn >> pageblock_order)
<= (cc->migrate_pfn >> pageblock_order);
}
/* * Used when scanning for a suitable migration target which scans freelists * in reverse. Reorders the list such as the unscanned pages are scanned * first on the next iteration of the free scanner
*/ staticvoid
move_freelist_head(struct list_head *freelist, struct page *freepage)
{
LIST_HEAD(sublist);
/* * Similar to move_freelist_head except used by the migration scanner * when scanning forward. It's possible for these list operations to * move against each other if they search the free list exactly in * lockstep.
*/ staticvoid
move_freelist_tail(struct list_head *freelist, struct page *freepage)
{
LIST_HEAD(sublist);
/* Full compaction passes in a negative order */ if (cc->order <= 0) return;
/* * If starting the scan, use a deeper search and use the highest * PFN found if a suitable one is not found.
*/ if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
limit = pageblock_nr_pages >> 1;
scan_start = true;
}
/* * Preferred point is in the top quarter of the scan space but take * a pfn from the top half if the search is problematic.
*/
distance = (cc->free_pfn - cc->migrate_pfn);
low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
if (WARN_ON_ONCE(min_pfn > low_pfn))
low_pfn = min_pfn;
/* * Search starts from the last successful isolation order or the next * order to search after a previous failure
*/
cc->search_order = min_t(unsignedint, cc->order - 1, cc->search_order);
if (!page) {
cc->fast_search_fail++; if (scan_start) { /* * Use the highest PFN found above min. If one was * not found, be pessimistic for direct compaction * and use the min mark.
*/ if (highest >= min_pfn) {
page = pfn_to_page(highest);
cc->free_pfn = highest;
} else { if (cc->direct_compaction && pfn_valid(min_pfn)) {
page = pageblock_pfn_to_page(min_pfn,
min(pageblock_end_pfn(min_pfn),
zone_end_pfn(cc->zone)),
cc->zone); if (page && !suitable_migration_target(cc, page))
page = NULL;
/* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them.
*/ staticvoid isolate_freepages(struct compact_control *cc)
{ struct zone *zone = cc->zone; struct page *page; unsignedlong block_start_pfn; /* start of current pageblock */ unsignedlong isolate_start_pfn; /* exact pfn we start at */ unsignedlong block_end_pfn; /* end of current pageblock */ unsignedlong low_pfn; /* lowest pfn scanner is able to scan */ unsignedint stride;
/* Try a small search of the free lists for a candidate */
fast_isolate_freepages(cc); if (cc->nr_freepages) return;
/* * Initialise the free scanner. The starting point is where we last * successfully isolated from, zone-cached value, or the end of the * zone when isolating for the first time. For looping we also need * this pfn aligned down to the pageblock boundary, because we do * block_start_pfn -= pageblock_nr_pages in the for loop. * For ending point, take care when isolating in last pageblock of a * zone which ends in the middle of a pageblock. * The low boundary is the end of the pageblock the migration scanner * is using.
*/
isolate_start_pfn = cc->free_pfn;
block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
zone_end_pfn(zone));
low_pfn = pageblock_end_pfn(cc->migrate_pfn);
stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
/* * Isolate free pages until enough are available to migrate the * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated.
*/ for (; block_start_pfn >= low_pfn;
block_end_pfn = block_start_pfn,
block_start_pfn -= pageblock_nr_pages,
isolate_start_pfn = block_start_pfn) { unsignedlong nr_isolated;
/* * This can iterate a massively long zone without finding any * suitable migration targets, so periodically check resched.
*/ if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
cond_resched();
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone); if (!page) { unsignedlong next_pfn;
next_pfn = skip_offline_sections_reverse(block_start_pfn); if (next_pfn)
block_start_pfn = max(next_pfn, low_pfn);
continue;
}
/* Check the block is suitable for migration */ if (!suitable_migration_target(cc, page)) continue;
/* If isolation recently failed, do not retry */ if (!isolation_suitable(cc, page)) continue;
/* Found a block suitable for isolating free pages from. */
nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
block_end_pfn, cc->freepages, stride, false);
/* Update the skip hint if the full pageblock was scanned */ if (isolate_start_pfn == block_end_pfn)
update_pageblock_skip(cc, page, block_start_pfn -
pageblock_nr_pages);
/* Are enough freepages isolated? */ if (cc->nr_freepages >= cc->nr_migratepages) { if (isolate_start_pfn >= block_end_pfn) { /* * Restart at previous pageblock if more * freepages can be isolated next time.
*/
isolate_start_pfn =
block_start_pfn - pageblock_nr_pages;
} break;
} elseif (isolate_start_pfn < block_end_pfn) { /* * If isolation failed early, do not continue * needlessly.
*/ break;
}
/* * Record where the free scanner will restart next time. Either we * broke from the loop and set isolate_start_pfn based on the last * call to isolate_freepages_block(), or we met the migration scanner * and the loop terminated due to isolate_start_pfn < low_pfn
*/
cc->free_pfn = isolate_start_pfn;
}
/* * This is a migrate-callback that "allocates" freepages by taking pages * from the isolated freelists in the block we are migrating to.
*/ staticstruct folio *compaction_alloc_noprof(struct folio *src, unsignedlong data)
{ struct compact_control *cc = (struct compact_control *)data; struct folio *dst; int order = folio_order(src); bool has_isolated_pages = false; int start_order; struct page *freepage; unsignedlong size;
again: for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++) if (!list_empty(&cc->freepages[start_order])) break;
/* no free pages in the list */ if (start_order == NR_PAGE_ORDERS) { if (has_isolated_pages) return NULL;
isolate_freepages(cc);
has_isolated_pages = true; goto again;
}
/* * This is a migrate-callback that "frees" freepages back to the isolated * freelist. All pages on the freelist are from the same zone, so there is no * special handling needed for NUMA.
*/ staticvoid compaction_free(struct folio *dst, unsignedlong data)
{ struct compact_control *cc = (struct compact_control *)data; int order = folio_order(dst); struct page *page = &dst->page;
if (folio_put_testzero(dst)) {
free_pages_prepare(page, order);
list_add(&dst->lru, &cc->freepages[order]);
cc->nr_freepages += 1 << order;
}
cc->nr_migratepages += 1 << order; /* * someone else has referenced the page, we cannot take it back to our * free list.
*/
}
/* possible outcome of isolate_migratepages */ typedefenum {
ISOLATE_ABORT, /* Abort compaction now */
ISOLATE_NONE, /* No pages isolated, continue scanning */
ISOLATE_SUCCESS, /* Pages isolated, migrate */
} isolate_migrate_t;
/* * Allow userspace to control policy on scanning the unevictable LRU for * compactable pages.
*/ staticint sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT; /* * Tunable for proactive compaction. It determines how * aggressively the kernel should compact memory in the * background. It takes values in the range [0, 100].
*/ staticunsignedint __read_mostly sysctl_compaction_proactiveness = 20; staticint sysctl_extfrag_threshold = 500; staticint __read_mostly sysctl_compact_memory;
/* * Briefly search the free lists for a migration source that already has * some free pages to reduce the number of pages that need migration * before a pageblock is free.
*/ staticunsignedlong fast_find_migrateblock(struct compact_control *cc)
{ unsignedint limit = freelist_scan_limit(cc); unsignedint nr_scanned = 0; unsignedlong distance; unsignedlong pfn = cc->migrate_pfn; unsignedlong high_pfn; int order; bool found_block = false;
/* Skip hints are relied on to avoid repeats on the fast search */ if (cc->ignore_skip_hint) return pfn;
/* * If the pageblock should be finished then do not select a different * pageblock.
*/ if (cc->finish_pageblock) return pfn;
/* * If the migrate_pfn is not at the start of a zone or the start * of a pageblock then assume this is a continuation of a previous * scan restarted due to COMPACT_CLUSTER_MAX.
*/ if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) return pfn;
/* * For smaller orders, just linearly scan as the number of pages * to migrate should be relatively small and does not necessarily * justify freeing up a large block for a small allocation.
*/ if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) return pfn;
/* * Only allow kcompactd and direct requests for movable pages to * quickly clear out a MOVABLE pageblock for allocation. This * reduces the risk that a large movable pageblock is freed for * an unmovable/reclaimable small allocation.
*/ if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) return pfn;
/* * When starting the migration scanner, pick any pageblock within the * first half of the search space. Otherwise try and pick a pageblock * within the first eighth to reduce the chances that a migration * target later becomes a source.
*/
distance = (cc->free_pfn - cc->migrate_pfn) >> 1; if (cc->migrate_pfn != cc->zone->zone_start_pfn)
distance >>= 2;
high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
if (nr_scanned++ >= limit) {
move_freelist_tail(freelist, freepage); break;
}
free_pfn = page_to_pfn(freepage); if (free_pfn < high_pfn) { /* * Avoid if skipped recently. Ideally it would * move to the tail but even safe iteration of * the list assumes an entry is deleted, not * reordered.
*/ if (get_pageblock_skip(freepage)) continue;
/* Reorder to so a future search skips recent pages */
move_freelist_tail(freelist, freepage);
/* * If fast scanning failed then use a cached entry for a page block * that had free pages as the basis for starting a linear scan.
*/ if (!found_block) {
cc->fast_search_fail++;
pfn = reinit_migrate_pfn(cc);
} return pfn;
}
/* * Isolate all pages that can be migrated from the first suitable block, * starting at the block pointed to by the migrate scanner pfn within * compact_control.
*/ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
{ unsignedlong block_start_pfn; unsignedlong block_end_pfn; unsignedlong low_pfn; struct page *page; const isolate_mode_t isolate_mode =
(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); bool fast_find_block;
/* * Start at where we last stopped, or beginning of the zone as * initialized by compact_zone(). The first failure will use * the lowest PFN as the starting point for linear scanning.
*/
low_pfn = fast_find_migrateblock(cc);
block_start_pfn = pageblock_start_pfn(low_pfn); if (block_start_pfn < cc->zone->zone_start_pfn)
block_start_pfn = cc->zone->zone_start_pfn;
/* * fast_find_migrateblock() has already ensured the pageblock is not * set with a skipped flag, so to avoid the isolation_suitable check * below again, check whether the fast search was successful.
*/
fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
/* Only scan within a pageblock boundary */
block_end_pfn = pageblock_end_pfn(low_pfn);
/* * Iterate over whole pageblocks until we find the first suitable. * Do not cross the free scanner.
*/ for (; block_end_pfn <= cc->free_pfn;
fast_find_block = false,
cc->migrate_pfn = low_pfn = block_end_pfn,
block_start_pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) {
/* * This can potentially iterate a massively long zone with * many pageblocks unsuitable, so periodically check if we * need to schedule.
*/ if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
cond_resched();
page = pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, cc->zone); if (!page) { unsignedlong next_pfn;
/* * If isolation recently failed, do not retry. Only check the * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock * to be visited multiple times. Assume skip was checked * before making it "skip" so other compaction instances do * not scan the same block.
*/ if ((pageblock_aligned(low_pfn) ||
low_pfn == cc->zone->zone_start_pfn) &&
!fast_find_block && !isolation_suitable(cc, page)) continue;
/* * For async direct compaction, only scan the pageblocks of the * same migratetype without huge pages. Async direct compaction * is optimistic to see if the minimum amount of work satisfies * the allocation. The cached PFN is updated as it's possible * that all remaining blocks between source and target are * unsuitable and the compaction scanners fail to meet.
*/ if (!suitable_migration_source(cc, page)) {
update_cached_migrate(cc, block_end_pfn); continue;
}
/* Perform the isolation */ if (isolate_migratepages_block(cc, low_pfn, block_end_pfn,
isolate_mode)) return ISOLATE_ABORT;
/* * Either we isolated something and proceed with migration. Or * we failed and compact_zone should decide if we should * continue or not.
*/ break;
}
/* * Determine whether kswapd is (or recently was!) running on this node. * * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't * zero it.
*/ staticbool kswapd_is_running(pg_data_t *pgdat)
{ bool running;
/* * A zone's fragmentation score is the external fragmentation wrt to the * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100].
*/ staticunsignedint fragmentation_score_zone(struct zone *zone)
{ return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
}
/* * A weighted zone's fragmentation score is the external fragmentation * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It * returns a value in the range [0, 100]. * * The scaling factor ensures that proactive compaction focuses on larger * zones like ZONE_NORMAL, rather than smaller, specialized zones like * ZONE_DMA32. For smaller zones, the score value remains close to zero, * and thus never exceeds the high threshold for proactive compaction.
*/ staticunsignedint fragmentation_score_zone_weighted(struct zone *zone)
{ unsignedlong score;
/* * The per-node proactive (background) compaction process is started by its * corresponding kcompactd thread when the node's fragmentation score * exceeds the high threshold. The compaction process remains active till * the node's score falls below the low threshold, or one of the back-off * conditions is met.
*/ staticunsignedint fragmentation_score_node(pg_data_t *pgdat)
{ unsignedint score = 0; int zoneid;
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { struct zone *zone;
zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue;
score += fragmentation_score_zone_weighted(zone);
}
/* Compaction run completes if the migrate and free scanner meet */ if (compact_scanners_met(cc)) { /* Let the next compaction start anew. */
reset_cached_positions(cc->zone);
/* * Mark that the PG_migrate_skip information should be cleared * by kswapd when it goes to sleep. kcompactd does not set the * flag itself as the decision to be clear should be directly * based on an allocation request.
*/ if (cc->direct_compaction)
cc->zone->compact_blockskip_flush = true;
if (cc->whole_zone) return COMPACT_COMPLETE; else return COMPACT_PARTIAL_SKIPPED;
}
if (cc->proactive_compaction) { int score, wmark_low;
pg_data_t *pgdat;
pgdat = cc->zone->zone_pgdat; if (kswapd_is_running(pgdat)) return COMPACT_PARTIAL_SKIPPED;
if (score > wmark_low)
ret = COMPACT_CONTINUE; else
ret = COMPACT_SUCCESS;
goto out;
}
if (is_via_compact_memory(cc->order)) return COMPACT_CONTINUE;
/* * Always finish scanning a pageblock to reduce the possibility of * fallbacks in the future. This is particularly important when * migration source is unmovable/reclaimable but it's not worth * special casing.
*/ if (!pageblock_aligned(cc->migrate_pfn)) return COMPACT_CONTINUE;
/* * When defrag_mode is enabled, make kcompactd target * watermarks in whole pageblocks. Because they can be stolen * without polluting, no further fallback checks are needed.
*/ if (defrag_mode && !cc->direct_compaction) { if (__zone_watermark_ok(cc->zone, cc->order,
high_wmark_pages(cc->zone),
cc->highest_zoneidx, cc->alloc_flags,
zone_page_state(cc->zone,
NR_FREE_PAGES_BLOCKS))) return COMPACT_SUCCESS;
return COMPACT_CONTINUE;
}
/* Direct compactor: Is a suitable page free? */
ret = COMPACT_NO_SUITABLE_PAGE; for (order = cc->order; order < NR_PAGE_ORDERS; order++) { struct free_area *area = &cc->zone->free_area[order];
/* Job done if page is free of the right migratetype */ if (!free_area_empty(area, migratetype)) return COMPACT_SUCCESS;
#ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ if (migratetype == MIGRATE_MOVABLE &&
!free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif /* * Job done if allocation would steal freepages from * other migratetype buddy lists.
*/
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.27 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.