if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { /* * Note that this routine only sets pmds for THP pages. * Hugetlb pages are handled elsewhere. We need to check * for huge zero page. Huge zero pages are like hugetlb * pages in that there is no RSS, but there is the need * for TSB entries. So, huge zero page counts go into * hugetlb_pte_count.
*/ if (pmd_val(pmd) & _PAGE_PMD_HUGE) { if (is_huge_zero_pmd(pmd))
mm->context.hugetlb_pte_count++; else
mm->context.thp_pte_count++;
} else { if (is_huge_zero_pmd(orig))
mm->context.hugetlb_pte_count--; else
mm->context.thp_pte_count--;
}
/* Do not try to allocate the TSB hash table if we * don't have one already. We have various locks held * and thus we'll end up doing a GFP_KERNEL allocation * in an atomic context. * * Instead, we let the first TLB miss on a hugepage * take care of this.
*/
}
if (!pmd_none(orig)) {
addr &= HPAGE_MASK; if (pmd_trans_huge(orig)) {
pte_t orig_pte = __pte(pmd_val(orig)); bool exec = pte_exec(orig_pte);
do {
old = *pmdp;
} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
__set_pmd_acct(vma->vm_mm, address, old, pmd);
return old;
}
/* * This routine is only called when splitting a THP
*/
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsignedlong address,
pmd_t *pmdp)
{
pmd_t old, entry;
/* * set_pmd_at() will not be called in a way to decrement * thp_pte_count when splitting a THP, so do it now. * Sanity check pmd before doing the actual decrement.
*/ if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
!is_huge_zero_pmd(entry))
(vma->vm_mm)->context.thp_pte_count--;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.