/** * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? * @folio: The folio to test. * * We would like to get this info without a page flag, but the state * needs to survive until the folio is last deleted from the LRU, which * could be as far down as __page_cache_release. * * Return: An integer (not a boolean!) used to sort a folio onto the * right LRU list and to account folios correctly. * 1 if @folio is a regular filesystem backed page cache folio * or a lazily freed anonymous folio (e.g. via MADV_FREE). * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise * ram or swap backed folio.
*/ staticinlineint folio_is_file_lru(struct folio *folio)
{ return !folio_test_swapbacked(folio);
}
/** * __folio_clear_lru_flags - Clear page lru flags before releasing a page. * @folio: The folio that was on lru and now has a zero reference.
*/ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
__folio_clear_lru(folio);
/* this shouldn't happen, so leave the flags to bad_page() */ if (folio_test_active(folio) && folio_test_unevictable(folio)) return;
/** * folio_lru_list - Which LRU list should a folio be on? * @folio: The folio to test. * * Return: The LRU list a folio should be on, as an index * into the array of LRU lists.
*/ static __always_inline enum lru_list folio_lru_list(struct folio *folio)
{ enum lru_list lru;
if (!(flags & BIT(PG_referenced))) return 0; /* * Return the total number of accesses including PG_referenced. Also see * the comment on LRU_REFS_FLAGS.
*/ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
}
/* see the comment on MIN_NR_GENS */ return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
}
staticinlinevoid lru_gen_update_size(struct lruvec *lruvec, struct folio *folio, int old_gen, int new_gen)
{ int type = folio_is_file_lru(folio); int zone = folio_zonenum(folio); int delta = folio_nr_pages(folio); enum lru_list lru = type * LRU_INACTIVE_FILE; struct lru_gen_folio *lrugen = &lruvec->lrugen;
if (lru_gen_add_folio(lruvec, folio, true)) return;
update_lru_size(lruvec, lru, folio_zonenum(folio),
folio_nr_pages(folio)); /* This is not expected to be used on LRU_UNEVICTABLE */
list_add_tail(&folio->lru, &lruvec->lists[lru]);
}
if (anon_name)
new_vma->anon_name = anon_vma_name_reuse(anon_name);
}
staticinlinevoid free_anon_vma_name(struct vm_area_struct *vma)
{ /* * Not using anon_vma_name because it generates a warning if mmap_lock * is not held, which might be the case here.
*/
anon_vma_name_put(vma->anon_name);
}
staticinlinevoid inc_tlb_flush_pending(struct mm_struct *mm)
{
atomic_inc(&mm->tlb_flush_pending); /* * The only time this value is relevant is when there are indeed pages * to flush. And we'll only flush pages after changing them, which * requires the PTL. * * So the ordering here is: * * atomic_inc(&mm->tlb_flush_pending); * spin_lock(&ptl); * ... * set_pte_at(); * spin_unlock(&ptl); * * spin_lock(&ptl) * mm_tlb_flush_pending(); * .... * spin_unlock(&ptl); * * flush_tlb_range(); * atomic_dec(&mm->tlb_flush_pending); * * Where the increment if constrained by the PTL unlock, it thus * ensures that the increment is visible if the PTE modification is * visible. After all, if there is no PTE modification, nobody cares * about TLB flushes either. * * This very much relies on users (mm_tlb_flush_pending() and * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc * locks (PPC) the unlock of one doesn't order against the lock of * another PTL. * * The decrement is ordered by the flush_tlb_range(), such that * mm_tlb_flush_pending() will not return false unless all flushes have * completed.
*/
}
staticinlinevoid dec_tlb_flush_pending(struct mm_struct *mm)
{ /* * See inc_tlb_flush_pending(). * * This cannot be smp_mb__before_atomic() because smp_mb() simply does * not order against TLB invalidate completion, which is what we need. * * Therefore we must rely on tlb_flush_*() to guarantee order.
*/
atomic_dec(&mm->tlb_flush_pending);
}
staticinlinebool mm_tlb_flush_pending(struct mm_struct *mm)
{ /* * Must be called after having acquired the PTL; orders against that * PTLs release and therefore ensures that if we observe the modified * PTE we must also observe the increment from inc_tlb_flush_pending(). * * That is, it only guarantees to return true if there is a flush * pending for _this_ PTL.
*/ return atomic_read(&mm->tlb_flush_pending);
}
staticinlinebool mm_tlb_flush_nested(struct mm_struct *mm)
{ /* * Similar to mm_tlb_flush_pending(), we must have acquired the PTL * for which there is a TLB flush pending in order to guarantee * we've seen both that PTE modification and the increment. * * (no requirement on actually still holding the PTL, that is irrelevant)
*/ return atomic_read(&mm->tlb_flush_pending) > 1;
}
#ifdef CONFIG_MMU /* * Computes the pte marker to copy from the given source entry into dst_vma. * If no marker should be copied, returns 0. * The caller should insert a new pte created with make_pte_marker().
*/ staticinline pte_marker copy_pte_marker(
swp_entry_t entry, struct vm_area_struct *dst_vma)
{
pte_marker srcm = pte_marker_get(entry); /* Always copy error entries. */
pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD);
/* Only copy PTE markers if UFFD register matches. */ if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
dstm |= PTE_MARKER_UFFD_WP;
return dstm;
} #endif
/* * If this pte is wr-protected by uffd-wp in any form, arm the special pte to * replace a none pte. NOTE! This should only be called when *pte is already * cleared so we will never accidentally replace something valuable. Meanwhile * none pte also means we are not demoting the pte so tlb flushed is not needed. * E.g., when pte cleared the caller should have taken care of the tlb flush. * * Must be called with pgtable lock held so that no thread will see the none * pte, and if they see it, they'll fault and serialize at the pgtable lock. * * Returns true if an uffd-wp pte was installed, false otherwise.
*/ staticinlinebool
pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsignedlong addr,
pte_t *pte, pte_t pteval)
{ #ifdef CONFIG_PTE_MARKER_UFFD_WP bool arm_uffd_pte = false;
/* The current status of the pte should be "cleared" before calling */
WARN_ON_ONCE(!pte_none(ptep_get(pte)));
/* * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole * thing, because when zapping either it means it's dropping the * page, or in TTU where the present pte will be quickly replaced * with a swap pte. There's no way of leaking the bit.
*/ if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) returnfalse;
/* A uffd-wp wr-protected normal pte */ if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
arm_uffd_pte = true;
/* * A uffd-wp wr-protected swap pte. Note: this should even cover an * existing pte marker with uffd-wp bit set.
*/ if (unlikely(pte_swp_uffd_wp_any(pteval)))
arm_uffd_pte = true;
if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE)) returnfalse;
returntrue;
}
#endif
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.13Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.