// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains common routines for dealing with free of page tables * Along with common page table handling code * * Derived from arch/powerpc/mm/tlb_64.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <engebret@us.ibm.com> * Rework for PPC64 port.
*/
/* We only try to do i/d cache coherency on stuff that looks like * reasonably "normal" PTEs. We currently require a PTE to be present * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that * on userspace PTEs
*/ staticinlineint pte_looks_normal(pte_t pte, unsignedlong addr)
{
if (pte_present(pte) && !pte_special(pte)) { if (pte_ci(pte)) return 0; if (!is_kernel_addr(addr)) return 1;
} return 0;
}
if (unlikely(!pfn_valid(pfn))) return NULL;
page = pfn_to_page(pfn); if (PageReserved(page)) return NULL; return page_folio(page);
}
#ifdef CONFIG_PPC_BOOK3S
/* Server-style MMU handles coherency when hashing if HW exec permission * is supposed per page (currently 64-bit only). If not, then, we always * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec * support falls into the same category.
*/
/* Embedded type MMU with HW exec support. This is a bit more complicated * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. * * This is also called once for the folio. So only work with folio->flags here.
*/ staticinline pte_t set_pte_filter(pte_t pte, unsignedlong addr)
{ struct folio *folio;
if (radix_enabled()) return pte;
if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) return set_pte_filter_hash(pte, addr);
/* No exec permission in the first place, move on */ if (!pte_exec(pte) || !pte_looks_normal(pte, addr)) return pte;
/* If you set _PAGE_EXEC on weird pages you're on your own */
folio = maybe_pte_to_folio(pte); if (unlikely(!folio)) return pte;
/* If the page clean, we move on */ if (test_bit(PG_dcache_clean, &folio->flags)) return pte;
/* If it's an exec fault, we flush the cache and make it clean */ if (is_exec_fault()) {
flush_dcache_icache_folio(folio);
set_bit(PG_dcache_clean, &folio->flags); return pte;
}
/* Else, we filter out _PAGE_EXEC */ return pte_exprotect(pte);
}
if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) return pte;
/* So here, we only care about exec faults, as we use them * to recover lost _PAGE_EXEC and perform I$/D$ coherency * if necessary. Also if _PAGE_EXEC is already set, same deal, * we just bail out
*/ if (dirty || pte_exec(pte) || !is_exec_fault()) return pte;
#ifdef CONFIG_DEBUG_VM /* So this is an exec fault, _PAGE_EXEC is not set. If it was * an error we would have bailed out earlier in do_page_fault() * but let's make sure of it
*/ if (WARN_ON(!(vma->vm_flags & VM_EXEC))) return pte; #endif/* CONFIG_DEBUG_VM */
/* If you set _PAGE_EXEC on weird pages you're on your own */
folio = maybe_pte_to_folio(pte); if (unlikely(!folio)) goto bail;
/* If the page is already clean, we move on */ if (test_bit(PG_dcache_clean, &folio->flags)) goto bail;
/* Clean the page and set PG_dcache_clean */
flush_dcache_icache_folio(folio);
set_bit(PG_dcache_clean, &folio->flags);
bail: return pte_mkexec(pte);
}
/* * set_pte stores a linux PTE into the linux page table.
*/ void set_ptes(struct mm_struct *mm, unsignedlong addr, pte_t *ptep,
pte_t pte, unsignedint nr)
{
/* Note: mm->context.id might not yet have been assigned as * this context might not have been activated yet when this * is called. Filter the pte value and use the filtered value * to setup all the ptes in the range.
*/
pte = set_pte_filter(pte, addr);
/* * We don't need to call arch_enter/leave_lazy_mmu_mode() * because we expect set_ptes to be only be used on not present * and not hw_valid ptes. Hence there is no translation cache flush * involved that need to be batched.
*/ for (;;) {
/* * Make sure hardware valid bit is not set. We don't do * tlb flush for this update.
*/
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
/* Perform the setting of the PTE */
__set_pte_at(mm, addr, ptep, pte, 0); if (--nr == 0) break;
ptep++;
addr += PAGE_SIZE;
pte = pte_next_pfn(pte);
}
}
pte_clear(&init_mm, va, ptep);
flush_tlb_kernel_range(va, va + PAGE_SIZE);
}
/* * This is called when relaxing access to a PTE. It's also called in the page * fault path when we don't hit any of the major fault cases, ie, a minor * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have * handled those two for us, we additionally deal with missing execute * permission here on some processors
*/ int ptep_set_access_flags(struct vm_area_struct *vma, unsignedlong address,
pte_t *ptep, pte_t entry, int dirty)
{ int changed;
entry = set_access_flags_filter(entry, vma, dirty);
changed = !pte_same(*(ptep), entry); if (changed) {
assert_pte_locked(vma->vm_mm, address);
__ptep_set_access_flags(vma, ptep, entry,
address, mmu_virtual_psize);
} return changed;
}
#ifdef CONFIG_HUGETLB_PAGE int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsignedlong addr, pte_t *ptep,
pte_t pte, int dirty)
{ #ifdef HUGETLB_NEED_PRELOAD /* * The "return 1" forces a call of update_mmu_cache, which will write a * TLB entry. Without this, platforms that don't do a write of the TLB * entry in the TLB miss handler asm will fault ad infinitum.
*/
ptep_set_access_flags(vma, addr, ptep, pte, dirty); return 1; #else int changed, psize;
#else /* * Not used on non book3s64 platforms. * 8xx compares it with mmu_virtual_psize to * know if it is a huge page or not.
*/
psize = MMU_PAGE_COUNT; #endif
__ptep_set_access_flags(vma, ptep, pte, addr, psize);
} return changed; #endif
}
#ifdefined(CONFIG_PPC_8xx)
#ifdefined(CONFIG_SPLIT_PTE_PTLOCKS) || defined(CONFIG_SPLIT_PMD_PTLOCKS) /* We need the same lock to protect the PMD table and the two PTE tables. */ #error"8M hugetlb folios are incompatible with split page table locks" #endif
if (sz == SZ_8M) { /* Flag both PMD entries as 8M and fill both page tables */
*pmdp = __pmd(pmd_val(*pmdp) | _PMD_PAGE_8M);
*(pmdp + 1) = __pmd(pmd_val(*(pmdp + 1)) | _PMD_PAGE_8M);
if (mm == &init_mm) return;
pgd = mm->pgd + pgd_index(addr);
BUG_ON(pgd_none(*pgd));
p4d = p4d_offset(pgd, addr);
BUG_ON(p4d_none(*p4d));
pud = pud_offset(p4d, addr);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, addr); /* * khugepaged to collapse normal pages to hugepage, first set * pmd to none to force page fault/gup to take mmap_lock. After * pmd is set to none, we do a pte_clear which does this assertion * so if we find pmd none, return.
*/ if (pmd_none(*pmd)) return;
pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl);
BUG_ON(!pte);
assert_spin_locked(ptl);
pte_unmap(pte);
} #endif/* CONFIG_DEBUG_VM */
/* * We have 3 cases for pgds and pmds: * (1) invalid (all zeroes) * (2) pointer to next table, as normal; bottom 6 bits == 0 * (3) leaf pte for huge page _PAGE_PTE set * * So long as we atomically load page table pointers we are safe against teardown, * we can follow the address down to the page and take a ref on it. * This function need to be called with interrupts disabled. We use this variant * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
*/
pte_t *__find_linux_pte(pgd_t *pgdir, unsignedlong ea, bool *is_thp, unsigned *hpage_shift)
{
pgd_t *pgdp; #ifdef CONFIG_PPC64
p4d_t p4d, *p4dp;
pud_t pud, *pudp; #endif
pmd_t pmd, *pmdp;
pte_t *ret_pte; unsigned pdshift;
if (hpage_shift)
*hpage_shift = 0;
if (is_thp)
*is_thp = false;
/* * Always operate on the local stack value. This make sure the * value don't get updated by a parallel THP split/collapse, * page fault or a page unmap. The return pte_t * is still not * stable. So should be checked there for above conditions. * Top level is an exception because it is folded into p4d. * * On PPC32, P4D/PUD/PMD are folded into PGD so go straight to * PMD level.
*/
pgdp = pgdir + pgd_index(ea); #ifdef CONFIG_PPC64
p4dp = p4d_offset(pgdp, ea);
p4d = READ_ONCE(*p4dp);
pdshift = P4D_SHIFT;
if (p4d_none(p4d)) return NULL;
if (p4d_leaf(p4d)) {
ret_pte = (pte_t *)p4dp; goto out;
}
/* * Even if we end up with an unmap, the pgtable will not * be freed, because we do an rcu free and here we are * irq disabled
*/
pdshift = PUD_SHIFT;
pudp = pud_offset(&p4d, ea);
pud = READ_ONCE(*pudp);
if (pud_none(pud)) return NULL;
if (pud_leaf(pud)) {
ret_pte = (pte_t *)pudp; goto out;
}
/* * A hugepage collapse is captured by this condition, see * pmdp_collapse_flush.
*/ if (pmd_none(pmd)) return NULL;
#ifdef CONFIG_PPC_BOOK3S_64 /* * A hugepage split is captured by this condition, see * pmdp_invalidate. * * Huge page modification can be caught here too.
*/ if (pmd_is_serializing(pmd)) return NULL; #endif
if (pmd_trans_huge(pmd)) { if (is_thp)
*is_thp = true;
ret_pte = (pte_t *)pmdp; goto out;
}
if (pmd_leaf(pmd)) {
ret_pte = (pte_t *)pmdp; goto out;
}
return pte_offset_kernel(&pmd, ea);
out: if (hpage_shift)
*hpage_shift = pdshift; return ret_pte;
}
EXPORT_SYMBOL_GPL(__find_linux_pte);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.