if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(vma->vm_mm, ptep, entry); /* * update_mmu_cache will unconditionally execute, handling both * the case that the PTE changed and the spurious fault case.
*/ returntrue;
svvptc: if (!pte_same(ptep_get(ptep), entry)) {
__set_pte_at(vma->vm_mm, ptep, entry); /* Here only not svadu is impacted */
flush_tlb_page(vma, address); returntrue;
}
returnfalse;
}
int ptep_test_and_clear_young(struct vm_area_struct *vma, unsignedlong address,
pte_t *ptep)
{ if (!pte_young(ptep_get(ptep))) return 0; return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
}
EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp))); /* * When leaf PTE entries (regular pages) are collapsed into a leaf * PMD entry (huge page), a valid non-leaf PTE is converted into a * valid leaf PTE at the level 1 page table. Since the sfence.vma * forms that specify an address only apply to leaf PTEs, we need a * global flush here. collapse_huge_page() assumes these flushes are * eager, so just do the fence here.
*/
flush_tlb_mm(vma->vm_mm); return pmd;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.