if (pvmw->flags & PVMW_SYNC) { /* Use the stricter lookup */
pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
pvmw->address, &pvmw->ptl);
*ptlp = pvmw->ptl; return !!pvmw->pte;
}
again: /* * It is important to return the ptl corresponding to pte, * in case *pvmw->pmd changes underneath us; so we need to * return it even when choosing not to lock, in case caller * proceeds to loop over next ptes, and finds a match later. * Though, in most cases, page lock already protects this.
*/
pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd,
pvmw->address, pmdvalp, ptlp); if (!pvmw->pte) returnfalse;
ptent = ptep_get(pvmw->pte);
if (pvmw->flags & PVMW_MIGRATION) { if (!is_swap_pte(ptent)) returnfalse;
} elseif (is_swap_pte(ptent)) {
swp_entry_t entry; /* * Handle un-addressable ZONE_DEVICE memory. * * We get here when we are trying to unmap a private * device page from the process address space. Such * page is not CPU accessible and thus is mapped as * a special swap entry, nonetheless it still does * count as a valid regular mapping for the page * (and is accounted as such in page maps count). * * So handle this special case as if it was a normal * page mapping ie lock CPU page table and return true. * * For more details on device private memory see HMM * (include/linux/hmm.h or mm/hmm.c).
*/
entry = pte_to_swp_entry(ptent); if (!is_device_private_entry(entry) &&
!is_device_exclusive_entry(entry)) returnfalse;
} elseif (!pte_present(ptent)) { returnfalse;
}
spin_lock(*ptlp); if (unlikely(!pmd_same(*pmdvalp, pmdp_get_lockless(pvmw->pmd)))) {
pte_unmap_unlock(pvmw->pte, *ptlp); goto again;
}
pvmw->ptl = *ptlp;
returntrue;
}
/** * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is * mapped at the @pvmw->pte * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range * for checking * @pte_nr: the number of small pages described by @pvmw->pte. * * page_vma_mapped_walk() found a place where pfn range is *potentially* * mapped. check_pte() has to validate this. * * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to * arbitrary page. * * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) * * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) * * Otherwise, return false. *
*/ staticbool check_pte(struct page_vma_mapped_walk *pvmw, unsignedlong pte_nr)
{ unsignedlong pfn;
pte_t ptent = ptep_get(pvmw->pte);
if (pvmw->flags & PVMW_MIGRATION) {
swp_entry_t entry; if (!is_swap_pte(ptent)) returnfalse;
entry = pte_to_swp_entry(ptent);
/** * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at * @pvmw->address * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags * must be set. pmd, pte and ptl must be NULL. * * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is * adjusted if needed (for PTE-mapped THPs). * * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in * a loop to find all PTEs that map the THP. * * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry * regardless of which page table level the page is mapped at. @pvmw->pmd is * NULL. * * Returns false if there are no more page table entries for the page in * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. * * If you need to stop the walk before page_vma_mapped_walk() returned false, * use page_vma_mapped_walk_done(). It will do the housekeeping.
*/ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
{ struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsignedlong end;
spinlock_t *ptl;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t pmde;
/* The only possible pmd mapping has been handled on last iteration */ if (pvmw->pmd && !pvmw->pte) return not_found(pvmw);
if (unlikely(is_vm_hugetlb_page(vma))) { struct hstate *hstate = hstate_vma(vma); unsignedlong size = huge_page_size(hstate); /* The only possible mapping was handled on last iteration */ if (pvmw->pte) return not_found(pvmw); /* * All callers that get here will already hold the * i_mmap_rwsem. Therefore, no additional locks need to be * taken before calling hugetlb_walk().
*/
pvmw->pte = hugetlb_walk(vma, pvmw->address, size); if (!pvmw->pte) returnfalse;
pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); if (!check_pte(pvmw, pages_per_huge_page(hstate))) return not_found(pvmw); returntrue;
}
end = vma_address_end(pvmw); if (pvmw->pte) goto next_pte;
restart: do {
pgd = pgd_offset(mm, pvmw->address); if (!pgd_present(*pgd)) {
step_forward(pvmw, PGDIR_SIZE); continue;
}
p4d = p4d_offset(pgd, pvmw->address); if (!p4d_present(*p4d)) {
step_forward(pvmw, P4D_SIZE); continue;
}
pud = pud_offset(p4d, pvmw->address); if (!pud_present(*pud)) {
step_forward(pvmw, PUD_SIZE); continue;
}
pvmw->pmd = pmd_offset(pud, pvmw->address); /* * Make sure the pmd value isn't cached in a register by the * compiler and used as a stale value after we've observed a * subsequent update.
*/
pmde = pmdp_get_lockless(pvmw->pmd);
if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
pmde = *pvmw->pmd; if (!pmd_present(pmde)) {
swp_entry_t entry;
if (!thp_migration_supported() ||
!(pvmw->flags & PVMW_MIGRATION)) return not_found(pvmw);
entry = pmd_to_swp_entry(pmde); if (!is_migration_entry(entry) ||
!check_pmd(swp_offset_pfn(entry), pvmw)) return not_found(pvmw); returntrue;
} if (likely(pmd_trans_huge(pmde))) { if (pvmw->flags & PVMW_MIGRATION) return not_found(pvmw); if (!check_pmd(pmd_pfn(pmde), pvmw)) return not_found(pvmw); returntrue;
} /* THP pmd was split under us: handle on pte level */
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
} elseif (!pmd_present(pmde)) { /* * If PVMW_SYNC, take and drop THP pmd lock so that we * cannot return prematurely, while zap_huge_pmd() has * cleared *pmd but not decremented compound_mapcount().
*/ if ((pvmw->flags & PVMW_SYNC) &&
thp_vma_suitable_order(vma, pvmw->address,
PMD_ORDER) &&
(pvmw->nr_pages >= HPAGE_PMD_NR)) {
spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
spin_unlock(ptl);
}
step_forward(pvmw, PMD_SIZE); continue;
} if (!map_pte(pvmw, &pmde, &ptl)) { if (!pvmw->pte) goto restart; goto next_pte;
}
this_pte: if (check_pte(pvmw, 1)) returntrue;
next_pte: do {
pvmw->address += PAGE_SIZE; if (pvmw->address >= end) return not_found(pvmw); /* Did we cross page table boundary? */ if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { if (pvmw->ptl) {
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
}
pte_unmap(pvmw->pte);
pvmw->pte = NULL; goto restart;
}
pvmw->pte++;
} while (pte_none(ptep_get(pvmw->pte)));
if (!pvmw->ptl) {
spin_lock(ptl); if (unlikely(!pmd_same(pmde, pmdp_get_lockless(pvmw->pmd)))) {
pte_unmap_unlock(pvmw->pte, ptl);
pvmw->pte = NULL; goto restart;
}
pvmw->ptl = ptl;
} goto this_pte;
} while (pvmw->address < end);
returnfalse;
}
#ifdef CONFIG_MEMORY_FAILURE /** * page_mapped_in_vma - check whether a page is really mapped in a VMA * @page: the page to test * @vma: the VMA to test * * Return: The address the page is mapped at if the page is in the range * covered by the VMA and present in the page table. If the page is * outside the VMA or not present, returns -EFAULT. * Only valid for normal file or anonymous VMAs.
*/ unsignedlong page_mapped_in_vma(conststruct page *page, struct vm_area_struct *vma)
{ conststruct folio *folio = page_folio(page); struct page_vma_mapped_walk pvmw = {
.pfn = page_to_pfn(page),
.nr_pages = 1,
.vma = vma,
.flags = PVMW_SYNC,
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.