/* * demand-loading started 01.12.91 - seems it is high on the list of * things wanted, and it should be easy to implement. - Linus
*/
/* * Ok, demand-loading was easy, shared pages a little bit tricker. Shared * pages started 02.12.91, seems to work. - Linus. * * Tested sharing by executing about 30 /bin/sh: under the old kernel it * would have taken more than the 6M I have free, but it worked well as * far as I could see. * * Also corrected some "invalidate()"s - I wasn't doing enough of them.
*/
/* * Real VM (paging to/from disk) started 18.12.91. Much more work and * thought has to go into this. Oh, well.. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. * Found it. Everything seems to work now. * 20.12.91 - Ok, making the swap-device changeable like the root.
*/
/* * 05.04.94 - Multi-page memory management added for v1.1. * Idea by Alex Bligh (alex@cconcepts.co.uk) * * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG * (Gerhard.Wichert@pdb.siemens.de) * * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
*/
#ifdefined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif
/* * Return true if the original pte was a uffd-wp pte marker (so the pte was * wr-protected).
*/ static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
{ if (!userfaultfd_wp(vmf->vma)) returnfalse; if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) returnfalse;
return pte_marker_uffd_wp(vmf->orig_pte);
}
/* * Randomize the address space (stacks, mmaps, brk, etc.). * * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, * as ancient (libc5 based) binaries can segfault. )
*/ int randomize_va_space __read_mostly = #ifdef CONFIG_COMPAT_BRK
1; #else
2; #endif
#ifndef arch_wants_old_prefaulted_pte staticinlinebool arch_wants_old_prefaulted_pte(void)
{ /* * Transitioning a PTE from 'old' to 'young' can be expensive on * some architectures, even if it's performed in hardware. By * default, "false" means prefaulted entries will be 'young'.
*/ returnfalse;
} #endif
/* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/ staticint __init init_zero_pfn(void)
{
zero_pfn = page_to_pfn(ZERO_PAGE(0)); return 0;
}
early_initcall(init_zero_pfn);
void mm_trace_rss_stat(struct mm_struct *mm, int member)
{
trace_rss_stat(mm, member);
}
/* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions.
*/ staticvoid free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsignedlong addr)
{
pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
mm_dec_nr_ptes(tlb->mm);
}
/** * free_pgd_range - Unmap and free page tables in the range * @tlb: the mmu_gather containing pending TLB flush info * @addr: virtual address start * @end: virtual address end * @floor: lowest address boundary * @ceiling: highest address boundary * * This function tears down all user-level page tables in the * specified virtual address range [@addr..@end). It is part of * the memory unmap flow.
*/ void free_pgd_range(struct mmu_gather *tlb, unsignedlong addr, unsignedlong end, unsignedlong floor, unsignedlong ceiling)
{
pgd_t *pgd; unsignedlong next;
/* * The next few lines have given us lots of grief... * * Why are we testing PMD* at this top level? Because often * there will be no work to do at all, and we'd prefer not to * go all the way down to the bottom just to discover that. * * Why all these "- 1"s? Because 0 represents both the bottom * of the address space and the top of it (using -1 for the * top wouldn't help much: the masks would do the wrong thing). * The rule is that addr 0 and floor 0 refer to the bottom of * the address space, but end 0 and ceiling 0 refer to the top * Comparisons need to use "end - 1" and "ceiling - 1" (though * that end 0 case should be mythical). * * Wherever addr is brought up or ceiling brought down, we must * be careful to reject "the opposite 0" before it confuses the * subsequent tests. But what about where end is brought down * by PMD_SIZE below? no, end can't go down to 0 there. * * Whereas we round start (addr) and ceiling down, by different * masks at different levels, in order to test whether a table * now has no other vmas using it, so can be freed, we don't * bother to round floor or end up - the tests don't need that.
*/
addr &= PMD_MASK; if (addr < floor) {
addr += PMD_SIZE; if (!addr) return;
} if (ceiling) {
ceiling &= PMD_MASK; if (!ceiling) return;
} if (end - 1 > ceiling - 1)
end -= PMD_SIZE; if (addr > end - 1) return; /* * We add page table cache pages with PAGE_SIZE, * (see pte_free_tlb()), flush the tlb if we need
*/
tlb_change_page_size(tlb, PAGE_SIZE);
pgd = pgd_offset(tlb->mm, addr); do {
next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue;
free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
}
do { unsignedlong addr = vma->vm_start; struct vm_area_struct *next;
/* * Note: USER_PGTABLES_CEILING may be passed as ceiling and may * be 0. This will underflow and is okay.
*/
next = mas_find(mas, ceiling - 1); if (unlikely(xa_is_zero(next)))
next = NULL;
/* * Hide vma from rmap and truncate_pagecache before freeing * pgtables
*/ if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
mm_inc_nr_ptes(mm); /* * Ensure all pte setup (eg. pte page lock and page clearing) are * visible before the pte is made visible to other CPUs by being * put into page tables. * * The other side of the story is the pointer chasing in the page * table walking code (when walking the page table without locking; * ie. most of the time). Fortunately, these data accesses consist * of a chain of data-dependent loads, meaning most CPUs (alpha * being the notable exception) will already guarantee loads are * seen in-order. See the alpha page table accessors for the * smp_rmb() barriers in page table walking code.
*/
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
pmd_populate(mm, pmd, *pte);
*pte = NULL;
}
spin_unlock(ptl);
}
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
{
pgtable_t new = pte_alloc_one(mm); if (!new) return -ENOMEM;
pmd_install(mm, pmd, &new); if (new)
pte_free(mm, new); return 0;
}
int __pte_alloc_kernel(pmd_t *pmd)
{
pte_t *new = pte_alloc_one_kernel(&init_mm); if (!new) return -ENOMEM;
spin_lock(&init_mm.page_table_lock); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
smp_wmb(); /* See comment in pmd_install() */
pmd_populate_kernel(&init_mm, pmd, new); new = NULL;
}
spin_unlock(&init_mm.page_table_lock); if (new)
pte_free_kernel(&init_mm, new); return 0;
}
staticinlinevoid add_mm_rss_vec(struct mm_struct *mm, int *rss)
{ int i;
for (i = 0; i < NR_MM_COUNTERS; i++) if (rss[i])
add_mm_counter(mm, i, rss[i]);
}
/* * This function is called to print an error when a bad pte * is found. For example, we might have a PFN-mapped pte in * a region that doesn't allow it. * * The calling function must still handle the error.
*/ staticvoid print_bad_pte(struct vm_area_struct *vma, unsignedlong addr,
pte_t pte, struct page *page)
{
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping;
pgoff_t index; staticunsignedlong resume; staticunsignedlong nr_shown; staticunsignedlong nr_unshown;
/* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second.
*/ if (nr_shown == 60) { if (time_before(jiffies, resume)) {
nr_unshown++; return;
} if (nr_unshown) {
pr_alert("BUG: Bad page map: %lu messages suppressed\n",
nr_unshown);
nr_unshown = 0;
}
nr_shown = 0;
} if (nr_shown++ == 0)
resume = jiffies + 60 * HZ;
/* * vm_normal_page -- This function gets the "struct page" associated with a pte. * * "Special" mappings do not wish to be associated with a "struct page" (either * it doesn't exist, or it exists but they don't want to touch it). In this * case, NULL is returned here. "Normal" mappings do have a struct page. * * There are 2 broad cases. Firstly, an architecture may define a pte_special() * pte bit, in which case this function is trivial. Secondly, an architecture * may not have a spare pte bit, which requires a more complicated scheme, * described below. * * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a * special mapping (even if there are underlying and valid "struct pages"). * COWed pages of a VM_PFNMAP are always normal. * * The way we recognize COWed pages within VM_PFNMAP mappings is through the * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit * set, and the vm_pgoff will point to the first PFN mapped: thus every special * mapping will always honor the rule * * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) * * And for normal mappings this is false. * * This restricts such mappings to be a linear translation from virtual address * to pfn. To get around this restriction, we allow arbitrary mappings so long * as the vma is not a COW mapping; in that case, we know that all ptes are * special (because none can have been COWed). * * * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. * * VM_MIXEDMAP mappings can likewise contain memory with or without "struct * page" backing, however the difference is that _all_ pages with a struct * page (that is, those where pfn_valid is true) are refcounted and considered * normal pages by the VM. The only exception are zeropages, which are * *never* refcounted. * * The disadvantage is that pages are refcounted (which can be slower and * simply not an option for some PFNMAP users). The advantage is that we * don't have to follow the strict linearity rule of PFNMAP mappings in * order to support COWable mappings. *
*/ struct page *vm_normal_page(struct vm_area_struct *vma, unsignedlong addr,
pte_t pte)
{ unsignedlong pfn = pte_pfn(pte);
if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { if (likely(!pte_special(pte))) goto check_pfn; if (vma->vm_ops && vma->vm_ops->find_special_page) return vma->vm_ops->find_special_page(vma, addr); if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (is_zero_pfn(pfn)) return NULL;
/* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist.
*/
out:
VM_WARN_ON_ONCE(is_zero_pfn(pfn)); return pfn_to_page(pfn);
}
if (page) return page_folio(page); return NULL;
} #endif
/** * restore_exclusive_pte - Restore a device-exclusive entry * @vma: VMA covering @address * @folio: the mapped folio * @page: the mapped folio page * @address: the virtual address * @ptep: pte pointer into the locked page table mapping the folio page * @orig_pte: pte value at @ptep * * Restore a device-exclusive non-swap entry to an ordinary present pte. * * The folio and the page table must be locked, and MMU notifiers must have * been called to invalidate any (exclusive) device mappings. * * Locking the folio makes sure that anybody who just converted the pte to * a device-exclusive entry can map it into the device to make forward * progress without others converting it back until the folio was unlocked. * * If the folio lock ever becomes an issue, we can stop relying on the folio * lock; it might make some scenarios with heavy thrashing less likely to * make forward progress, but these scenarios might not be valid use cases. * * Note that the folio lock does not protect against all cases of concurrent * page table modifications (e.g., MADV_DONTNEED, mprotect), so device drivers * must use MMU notifiers to sync against any concurrent changes.
*/ staticvoid restore_exclusive_pte(struct vm_area_struct *vma, struct folio *folio, struct page *page, unsignedlong address,
pte_t *ptep, pte_t orig_pte)
{
pte_t pte;
/* * No need to invalidate - it was non-present before. However * secondary CPUs may have mappings that need invalidating.
*/
update_mmu_cache(vma, address, ptep);
}
/* * Tries to restore an exclusive pte if the page lock can be acquired without * sleeping.
*/ staticint try_restore_exclusive_pte(struct vm_area_struct *vma, unsignedlong addr, pte_t *ptep, pte_t orig_pte)
{ struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte)); struct folio *folio = page_folio(page);
/* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range * covered by this vma.
*/
if (likely(!non_swap_entry(entry))) { if (swap_duplicate(entry) < 0) return -EIO;
/* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock); if (list_empty(&dst_mm->mmlist))
list_add(&dst_mm->mmlist,
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
} /* Mark the swap entry as shared. */ if (pte_swp_exclusive(orig_pte)) {
pte = pte_swp_clear_exclusive(orig_pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
rss[MM_SWAPENTS]++;
} elseif (is_migration_entry(entry)) {
folio = pfn_swap_entry_folio(entry);
rss[mm_counter(folio)]++;
if (!is_readable_migration_entry(entry) &&
is_cow_mapping(vm_flags)) { /* * COW mappings require pages in both parent and child * to be set to read. A previously exclusive entry is * now shared.
*/
entry = make_readable_migration_entry(
swp_offset(entry));
pte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(orig_pte))
pte = pte_swp_mksoft_dirty(pte); if (pte_swp_uffd_wp(orig_pte))
pte = pte_swp_mkuffd_wp(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
} elseif (is_device_private_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
folio = page_folio(page);
/* * Update rss count even for unaddressable pages, as * they should treated just like normal pages in this * respect. * * We will likely want to have some new rss counters * for unaddressable pages, at some point. But for now * keep things as they are.
*/
folio_get(folio);
rss[mm_counter(folio)]++; /* Cannot fail as these pages cannot get pinned. */
folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma);
/* * We do not preserve soft-dirty information, because so * far, checkpoint/restore is the only feature that * requires that. And checkpoint/restore does not work * when a device driver is involved (you cannot easily * save and restore device driver state).
*/ if (is_writable_device_private_entry(entry) &&
is_cow_mapping(vm_flags)) {
entry = make_readable_device_private_entry(
swp_offset(entry));
pte = swp_entry_to_pte(entry); if (pte_swp_uffd_wp(orig_pte))
pte = pte_swp_mkuffd_wp(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
} elseif (is_device_exclusive_entry(entry)) { /* * Make device exclusive entries present by restoring the * original entry then copying as for a present pte. Device * exclusive entries currently only support private writable * (ie. COW) mappings.
*/
VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte)) return -EBUSY; return -ENOENT;
} elseif (is_pte_marker_entry(entry)) {
pte_marker marker = copy_pte_marker(entry, dst_vma);
/* * Copy a present and normal page. * * NOTE! The usual case is that this isn't required; * instead, the caller can just increase the page refcount * and re-use the pte the traditional way. * * And if we need a pre-allocated page but don't yet have * one, return a negative error to let the preallocation * code know so that it can do so outside the page table * lock.
*/ staticinlineint
copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, unsignedlong addr, int *rss, struct folio **prealloc, struct page *page)
{ struct folio *new_folio;
pte_t pte;
new_folio = *prealloc; if (!new_folio) return -EAGAIN;
/* * We have a prealloc page, all good! Take it * over and copy the page & arm it.
*/
if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma)) return -EHWPOISON;
/* All done, just insert the new page copy in the child */
pte = folio_mk_pte(new_folio, dst_vma->vm_page_prot);
pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) /* Uffd-wp needs to be delivered to dest pte as well */
pte = pte_mkuffd_wp(pte);
set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); return 0;
}
/* If it's a COW mapping, write protect it both processes. */ if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
wrprotect_ptes(src_mm, addr, src_pte, nr);
pte = pte_wrprotect(pte);
}
/* If it's a shared mapping, mark it clean in the child. */ if (src_vma->vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
if (!userfaultfd_wp(dst_vma))
pte = pte_clear_uffd_wp(pte);
/* * Copy one present PTE, trying to batch-process subsequent PTEs that map * consecutive pages of the same folio by copying them as well. * * Returns -EAGAIN if one preallocated page is required to copy the next PTE. * Otherwise, returns the number of copied PTEs (at least 1).
*/ staticinlineint
copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsignedlong addr, int max_nr, int *rss, struct folio **prealloc)
{
fpb_t flags = FPB_MERGE_WRITE; struct page *page; struct folio *folio; int err, nr;
page = vm_normal_page(src_vma, addr, pte); if (unlikely(!page)) goto copy_pte;
folio = page_folio(page);
/* * If we likely have to copy, just don't bother with batching. Make * sure that the common "small folio" case is as fast as possible * by keeping the batching logic separate.
*/ if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { if (!(src_vma->vm_flags & VM_SHARED))
flags |= FPB_RESPECT_DIRTY; if (vma_soft_dirty_enabled(src_vma))
flags |= FPB_RESPECT_SOFT_DIRTY;
folio_get(folio); if (folio_test_anon(folio)) { /* * If this page may have been pinned by the parent process, * copy the page immediately for the child so that we'll always * guarantee the pinned page won't be randomly replaced in the * future.
*/ if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) { /* Page may be pinned, we have to copy. */
folio_put(folio);
err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
addr, rss, prealloc, page); return err ? err : 1;
}
rss[MM_ANONPAGES]++;
VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
} else {
folio_dup_file_rmap_pte(folio, page, dst_vma);
rss[mm_counter_file(folio)]++;
}
/* * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the * error handling here, assume that exclusive mmap_lock on dst and src * protects anon from unexpected THP transitions; with shmem and file * protected by mmap_lock-less collapse skipping areas with anon_vma * (whereas vma_needs_copy() skips areas without anon_vma). A rework * can remove such assumptions later, but this is good enough for now.
*/
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) {
ret = -ENOMEM; goto out;
}
/* * We already hold the exclusive mmap_lock, the copy_pte_range() and * retract_page_tables() are using vma->anon_vma to be exclusive, so * the PTE page is stable, and there is no need to get pmdval and do * pmd_same() check.
*/
src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval,
&src_ptl); if (!src_pte) {
pte_unmap_unlock(dst_pte, dst_ptl); /* ret == 0 */ goto out;
}
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
orig_src_pte = src_pte;
orig_dst_pte = dst_pte;
arch_enter_lazy_mmu_mode();
do {
nr = 1;
/* * We are holding two locks at this point - either of them * could generate latencies in another task on another CPU.
*/ if (progress >= 32) {
progress = 0; if (need_resched() ||
spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) break;
}
ptent = ptep_get(src_pte); if (pte_none(ptent)) {
progress++; continue;
} if (unlikely(!pte_present(ptent))) {
ret = copy_nonpresent_pte(dst_mm, src_mm,
dst_pte, src_pte,
dst_vma, src_vma,
addr, rss); if (ret == -EIO) {
entry = pte_to_swp_entry(ptep_get(src_pte)); break;
} elseif (ret == -EBUSY) { break;
} elseif (!ret) {
progress += 8; continue;
}
ptent = ptep_get(src_pte);
VM_WARN_ON_ONCE(!pte_present(ptent));
/* * Device exclusive entry restored, continue by copying * the now present pte.
*/
WARN_ON_ONCE(ret != -ENOENT);
} /* copy_present_ptes() will clear `*prealloc' if consumed */
max_nr = (end - addr) / PAGE_SIZE;
ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte,
ptent, addr, max_nr, rss, &prealloc); /* * If we need a pre-allocated page for this pte, drop the * locks, allocate, and try again. * If copy failed due to hwpoison in source page, break out.
*/ if (unlikely(ret == -EAGAIN || ret == -EHWPOISON)) break; if (unlikely(prealloc)) { /* * pre-alloc page cannot be reused by next time so as * to strictly follow mempolicy (e.g., alloc_page_vma() * will allocate page according to address). This * could only happen if one pinned pte changed.
*/
folio_put(prealloc);
prealloc = NULL;
}
nr = ret;
progress += 8 * nr;
} while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
addr != end);
dst_pud = pud_alloc(dst_mm, dst_p4d, addr); if (!dst_pud) return -ENOMEM;
src_pud = pud_offset(src_p4d, addr); do {
next = pud_addr_end(addr, end); if (pud_trans_huge(*src_pud)) { int err;
VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
err = copy_huge_pud(dst_mm, src_mm,
dst_pud, src_pud, addr, src_vma); if (err == -ENOMEM) return -ENOMEM; if (!err) continue; /* fall through */
} if (pud_none_or_clear_bad(src_pud)) continue; if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
addr, next)) return -ENOMEM;
} while (dst_pud++, src_pud++, addr = next, addr != end); return 0;
}
dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); if (!dst_p4d) return -ENOMEM;
src_p4d = p4d_offset(src_pgd, addr); do {
next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(src_p4d)) continue; if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
addr, next)) return -ENOMEM;
} while (dst_p4d++, src_p4d++, addr = next, addr != end); return 0;
}
/* * Return true if the vma needs to copy the pgtable during this fork(). Return * false when we can speed up fork() by allowing lazy page faults later until * when the child accesses the memory range.
*/ staticbool
vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
{ /* * Always copy pgtables when dst_vma has uffd-wp enabled even if it's * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable * contains uffd-wp protection information, that's something we can't * retrieve from page cache, and skip copying will lose those info.
*/ if (userfaultfd_wp(dst_vma)) returntrue;
if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) returntrue;
if (src_vma->anon_vma) returntrue;
/* * Don't copy ptes where a page fault will fill them correctly. Fork * becomes much lighter when there are big shared or private readonly * mappings. The tradeoff is that copy_page_range is more efficient * than faulting.
*/ returnfalse;
}
if (is_vm_hugetlb_page(src_vma)) return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
/* * We need to invalidate the secondary MMU mappings only when * there could be a permission downgrade on the ptes of the * parent mm. And a permission downgrade will only happen if * is_cow_mapping() returns true.
*/
is_cow = is_cow_mapping(src_vma->vm_flags);
if (is_cow) {
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, src_mm, addr, end);
mmu_notifier_invalidate_range_start(&range); /* * Disabling preemption is not needed for the write side, as * the read side doesn't spin, but goes to the mmap_lock. * * Use the raw variant of the seqcount_t write API to avoid * lockdep complaining about preemptibility.
*/
vma_assert_write_locked(src_vma);
raw_write_seqcount_begin(&src_mm->write_protect_seq);
}
ret = 0;
dst_pgd = pgd_offset(dst_mm, addr);
src_pgd = pgd_offset(src_mm, addr); do {
next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(src_pgd)) continue; if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
addr, next))) {
ret = -ENOMEM; break;
}
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
if (is_cow) {
raw_write_seqcount_end(&src_mm->write_protect_seq);
mmu_notifier_invalidate_range_end(&range);
} return ret;
}
/* Whether we should zap all COWed (private) pages too */ staticinlinebool should_zap_cows(struct zap_details *details)
{ /* By default, zap all pages */ if (!details || details->reclaim_pt) returntrue;
/* Or, we zap COWed pages only if the caller wants to */ return details->even_cows;
}
/* Decides whether we should zap this folio with the folio pointer specified */ staticinlinebool should_zap_folio(struct zap_details *details, struct folio *folio)
{ /* If we can make a decision without *folio.. */ if (should_zap_cows(details)) returntrue;
/* Otherwise we should only zap non-anon folios */ return !folio_test_anon(folio);
}
staticinlinebool zap_drop_markers(struct zap_details *details)
{ if (!details) returnfalse;
/* * This function makes sure that we'll replace the none pte with an uffd-wp * swap special pte marker when necessary. Must be with the pgtable lock held. * * Returns true if uffd-wp ptes was installed, false otherwise.
*/ staticinlinebool
zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsignedlong addr, pte_t *pte, int nr, struct zap_details *details, pte_t pteval)
{ bool was_installed = false;
#ifdef CONFIG_PTE_MARKER_UFFD_WP /* Zap on anonymous always means dropping everything */ if (vma_is_anonymous(vma)) returnfalse;
if (zap_drop_markers(details)) returnfalse;
for (;;) { /* the PFN in the PTE is irrelevant. */ if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
was_installed = true; if (--nr == 0) break;
pte++;
addr += PAGE_SIZE;
} #endif return was_installed;
}
/* * Zap or skip at least one present PTE, trying to batch-process subsequent * PTEs that map consecutive pages of the same folio. * * Returns the number of processed (skipped or zapped) PTEs (at least 1).
*/ staticinlineint zap_present_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_t *pte, pte_t ptent, unsignedint max_nr, unsignedlong addr, struct zap_details *details, int *rss, bool *force_flush, bool *force_break, bool *any_skipped)
{ struct mm_struct *mm = tlb->mm; struct folio *folio; struct page *page; int nr;
/* * Make sure that the common "small folio" case is as fast as possible * by keeping the batching logic separate.
*/ if (unlikely(folio_test_large(folio) && max_nr != 1)) {
nr = folio_pte_batch(folio, pte, ptent, max_nr);
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
addr, details, rss, force_flush,
force_break, any_skipped); return nr;
}
zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
details, rss, force_flush, force_break, any_skipped); return 1;
}
staticinlineint zap_nonpresent_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_t *pte, pte_t ptent, unsignedint max_nr, unsignedlong addr, struct zap_details *details, int *rss, bool *any_skipped)
{
swp_entry_t entry; int nr = 1;
if (unlikely(!should_zap_folio(details, folio))) return 1; /* * Both device private/exclusive mappings should only * work with anonymous page so far, so we don't need to * consider uffd-wp bit when zap. For more information, * see zap_install_uffd_wp_if_needed().
*/
WARN_ON_ONCE(!vma_is_anonymous(vma));
rss[mm_counter(folio)]--;
folio_remove_rmap_pte(folio, page, vma);
folio_put(folio);
} elseif (!non_swap_entry(entry)) { /* Genuine swap entries, hence a private anon pages */ if (!should_zap_cows(details)) return 1;
if (!should_zap_folio(details, folio)) return 1;
rss[mm_counter(folio)]--;
} elseif (pte_marker_entry_uffd_wp(entry)) { /* * For anon: always drop the marker; for file: only * drop the marker if explicitly requested.
*/ if (!vma_is_anonymous(vma) && !zap_drop_markers(details)) return 1;
} elseif (is_guard_swp_entry(entry)) { /* * Ordinary zapping should not remove guard PTE * markers. Only do so if we should remove PTE markers * in general.
*/ if (!zap_drop_markers(details)) return 1;
} elseif (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { if (!should_zap_cows(details)) return 1;
} else { /* We should have covered all the swap entry types */
pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
WARN_ON_ONCE(1);
}
clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm);
*any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode(); do { bool any_skipped = false;
if (need_resched()) {
direct_reclaim = false; break;
}
nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss,
&force_flush, &force_break, &any_skipped); if (any_skipped)
can_reclaim_pt = false; if (unlikely(force_break)) {
addr += nr * PAGE_SIZE;
direct_reclaim = false; break;
}
} while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
/* * Fast path: try to hold the pmd lock and unmap the PTE page. * * If the pte lock was released midway (retry case), or if the attempt * to hold the pmd lock failed, then we need to recheck all pte entries * to ensure they are still none, thereby preventing the pte entries * from being repopulated by another thread.
*/ if (can_reclaim_pt && direct_reclaim && addr == end)
direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
/* Do the actual TLB flush before dropping ptl */ if (force_flush) {
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_rmaps(tlb, vma);
}
pte_unmap_unlock(start_pte, ptl);
/* * If we forced a TLB flush (either due to running out of * batch buffers or because we needed to flush dirty TLB * entries before releasing the ptl), free the batched * memory too. Come back again if we didn't do everything.
*/ if (force_flush)
tlb_flush_mmu(tlb);
if (start >= vma->vm_end) return;
end = min(vma->vm_end, end_addr); if (end <= vma->vm_start) return;
if (vma->vm_file)
uprobe_munmap(vma, start, end);
if (start != end) { if (unlikely(is_vm_hugetlb_page(vma))) { /* * It is undesirable to test vma->vm_file as it * should be non-null for valid hugetlb area. * However, vm_file will be NULL in the error * cleanup path of mmap_region. When * hugetlbfs ->mmap method fails, * mmap_region() nullifies vma->vm_file * before calling this function to clean up. * Since no pte has actually been setup, it is * safe to do nothing in this case.
*/ if (vma->vm_file) {
zap_flags_t zap_flags = details ?
details->zap_flags : 0;
__unmap_hugepage_range(tlb, vma, start, end,
NULL, zap_flags);
}
} else
unmap_page_range(tlb, vma, start, end, details);
}
}
/** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather * @mas: the maple state * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping * @tree_end: The maximum index to check * @mm_wr_locked: lock flag * * Unmap all pages in the vma list. * * Only addresses between `start' and `end' will be unmapped. * * The VMA list must be sorted in ascending virtual address order. * * unmap_vmas() assumes that the caller will flush the whole unmapped address * range after unmap_vmas() returns. So the only responsibility here is to * ensure that any thus-far unmapped pages are flushed before unmap_vmas() * drops the lock and schedules.
*/ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *vma, unsignedlong start_addr, unsignedlong end_addr, unsignedlong tree_end, bool mm_wr_locked)
{ struct mmu_notifier_range range; struct zap_details details = {
.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP, /* Careful - we need to zap private pages too! */
.even_cows = true,
};
/** * zap_page_range_single_batched - remove user pages in a given range * @tlb: pointer to the caller's struct mmu_gather * @vma: vm_area_struct holding the applicable pages * @address: starting address of pages to remove * @size: number of bytes to remove * @details: details of shared cache invalidation * * @tlb shouldn't be NULL. The range must fit into one VMA. If @vma is for * hugetlb, @tlb is flushed and re-initialized by this function.
*/ void zap_page_range_single_batched(struct mmu_gather *tlb, struct vm_area_struct *vma, unsignedlong address, unsignedlong size, struct zap_details *details)
{ constunsignedlong end = address + size; struct mmu_notifier_range range;
VM_WARN_ON_ONCE(!tlb || tlb->mm != vma->vm_mm);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end);
hugetlb_zap_begin(vma, &range.start, &range.end);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range); /* * unmap 'address-end' not 'range.start-range.end' as range * could have been expanded for hugetlb pmd sharing.
*/
unmap_single_vma(tlb, vma, address, end, details, false);
mmu_notifier_invalidate_range_end(&range); if (is_vm_hugetlb_page(vma)) { /* * flush tlb and free resources before hugetlb_zap_end(), to * avoid concurrent page faults' allocation failure.
*/
tlb_finish_mmu(tlb);
hugetlb_zap_end(vma, details);
tlb_gather_mmu(tlb, vma->vm_mm);
}
}
/** * zap_page_range_single - remove user pages in a given range * @vma: vm_area_struct holding the applicable pages * @address: starting address of pages to zap * @size: number of bytes to zap * @details: details of shared cache invalidation * * The range must fit into one VMA.
*/ void zap_page_range_single(struct vm_area_struct *vma, unsignedlong address, unsignedlong size, struct zap_details *details)
{ struct mmu_gather tlb;
/** * zap_vma_ptes - remove ptes mapping the vma * @vma: vm_area_struct holding ptes to be zapped * @address: starting address of pages to zap * @size: number of bytes to zap * * This function only unmaps ptes assigned to VM_PFNMAP vmas. * * The entire address range must be fully contained within the vma. *
*/ void zap_vma_ptes(struct vm_area_struct *vma, unsignedlong address, unsignedlong size)
{ if (!range_in_vma(vma, address, address + size) ||
!(vma->vm_flags & VM_PFNMAP)) return;
if (!pmd) return NULL; return pte_alloc_map_lock(mm, pmd, addr, ptl);
}
staticbool vm_mixed_zeropage_allowed(struct vm_area_struct *vma)
{
VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP); /* * Whoever wants to forbid the zeropage after some zeropages * might already have been mapped has to scan the page tables and * bail out on any zeropages. Zeropages in COW mappings can * be unshared using FAULT_FLAG_UNSHARE faults.
*/ if (mm_forbids_zeropage(vma->vm_mm)) returnfalse; /* zeropages in COW mappings are common and unproblematic. */ if (is_cow_mapping(vma->vm_flags)) returntrue; /* Mappings that do not allow for writable PTEs are unproblematic. */ if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) returntrue; /* * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could * find the shared zeropage and longterm-pin it, which would * be problematic as soon as the zeropage gets replaced by a different * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would * now differ to what GUP looked up. FSDAX is incompatible to * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see * check_vma_flags).
*/ return vma->vm_ops && vma->vm_ops->pfn_mkwrite &&
(vma_is_fsdax(vma) || vma->vm_flags & VM_IO);
}
/* Allocate the PTE if necessary; takes PMD lock once only. */
ret = -ENOMEM; if (pte_alloc(mm, pmd)) goto out;
while (pages_to_write_in_pmd) { int pte_idx = 0; constint batch_size = min_t(int, pages_to_write_in_pmd, 8);
start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); if (!start_pte) {
ret = -EFAULT; goto out;
} for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { int err = insert_page_in_batch_locked(vma, pte,
addr, pages[curr_page_idx], prot); if (unlikely(err)) {
pte_unmap_unlock(start_pte, pte_lock);
ret = err;
remaining_pages_total -= pte_idx; goto out;
}
addr += PAGE_SIZE;
++curr_page_idx;
}
pte_unmap_unlock(start_pte, pte_lock);
pages_to_write_in_pmd -= batch_size;
remaining_pages_total -= batch_size;
} if (remaining_pages_total) goto more;
ret = 0;
out:
*num = remaining_pages_total; return ret;
}
/** * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. * @vma: user vma to map to * @addr: target start user address of these pages * @pages: source kernel pages * @num: in: number of pages to map. out: number of pages that were *not* * mapped. (0 means all pages were successfully mapped). * * Preferred over vm_insert_page() when inserting multiple pages. * * In case of error, we may have mapped a subset of the provided * pages. It is the caller's responsibility to account for this case. * * The same restrictions apply as in vm_insert_page().
*/ int vm_insert_pages(struct vm_area_struct *vma, unsignedlong addr, struct page **pages, unsignedlong *num)
{ constunsignedlong end_addr = addr + (*num * PAGE_SIZE) - 1;
if (addr < vma->vm_start || end_addr >= vma->vm_end) return -EFAULT; if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vm_flags_set(vma, VM_MIXEDMAP);
} /* Defer page refcount checking till we're about to map that page. */ return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_pages);
/** * vm_insert_page - insert single page into user vma * @vma: user vma to map to * @addr: target user address of this page * @page: source kernel page
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.