/* * Reserve CMA areas for the largest supported gigantic * huge page when requested. Any other smaller gigantic * huge pages could still be served from those areas.
*/ #ifdef CONFIG_CMA void __init arm64_hugetlb_cma_reserve(void)
{ int order;
if (pud_sect_supported())
order = PUD_SHIFT - PAGE_SHIFT; else
order = CONT_PMD_SHIFT - PAGE_SHIFT;
if (!pte_present(orig_pte) || !pte_cont(orig_pte)) return orig_pte;
ncontig = find_num_contig(mm, addr, ptep, &pgsize); for (i = 0; i < ncontig; i++, ptep++) {
pte_t pte = __ptep_get(ptep);
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
} return orig_pte;
}
/* * Changing some bits of contiguous entries requires us to follow a * Break-Before-Make approach, breaking the whole contiguous set * before we can change any entries. See ARM DDI 0487A.k_iss10775, * "Misprogramming of the Contiguous bit", page D4-1762. * * This helper performs the break step.
*/ static pte_t get_clear_contig(struct mm_struct *mm, unsignedlong addr,
pte_t *ptep, unsignedlong pgsize, unsignedlong ncontig)
{
pte_t pte, tmp_pte; bool present;
pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize);
present = pte_present(pte); while (--ncontig) {
ptep++;
tmp_pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize); if (present) { if (pte_dirty(tmp_pte))
pte = pte_mkdirty(pte); if (pte_young(tmp_pte))
pte = pte_mkyoung(pte);
}
} return pte;
}
/* * Changing some bits of contiguous entries requires us to follow a * Break-Before-Make approach, breaking the whole contiguous set * before we can change any entries. See ARM DDI 0487A.k_iss10775, * "Misprogramming of the Contiguous bit", page D4-1762. * * This helper performs the break step for use cases where the * original pte is not needed.
*/ staticvoid clear_flush(struct mm_struct *mm, unsignedlong addr,
pte_t *ptep, unsignedlong pgsize, unsignedlong ncontig)
{ struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); unsignedlong i, saddr = addr;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
__ptep_get_and_clear_anysz(mm, ptep, pgsize);
void set_huge_pte_at(struct mm_struct *mm, unsignedlong addr,
pte_t *ptep, pte_t pte, unsignedlong sz)
{
size_t pgsize; int i; int ncontig;
ncontig = num_contig_ptes(sz, &pgsize);
if (!pte_present(pte)) { for (i = 0; i < ncontig; i++, ptep++)
__set_ptes_anysz(mm, ptep, pte, 1, pgsize); return;
}
/* Only need to "break" if transitioning valid -> valid. */ if (pte_cont(pte) && pte_valid(__ptep_get(ptep)))
clear_flush(mm, addr, ptep, pgsize, ncontig);
/* * huge_ptep_set_access_flags will update access flags (dirty, accesssed) * and write permission. * * For a contiguous huge pte range we need to check whether or not write * permission has to change only on the first pte in the set. Then for * all the contiguous ptes we need to check whether or not there is a * discrepancy between dirty or young.
*/ staticint __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
{ int i;
if (pte_write(pte) != pte_write(__ptep_get(ptep))) return 1;
for (i = 0; i < ncontig; i++) {
pte_t orig_pte = __ptep_get(ptep + i);
if (pte_dirty(pte) != pte_dirty(orig_pte)) return 1;
if (pte_young(pte) != pte_young(orig_pte)) return 1;
}
return 0;
}
int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsignedlong addr, pte_t *ptep,
pte_t pte, int dirty)
{ int ncontig;
size_t pgsize = 0; struct mm_struct *mm = vma->vm_mm;
pte_t orig_pte;
VM_WARN_ON(!pte_present(pte));
if (!pte_cont(pte)) return __ptep_set_access_flags(vma, addr, ptep, pte, dirty);
staticint __init hugetlbpage_init(void)
{ /* * HugeTLB pages are supported on maximum four page table * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base * page size, corresponding to hugetlb_add_hstate() calls * here. * * HUGE_MAX_HSTATE should at least match maximum supported * HugeTLB page sizes on the platform. Any new addition to * supported HugeTLB page sizes will also require changing * HUGE_MAX_HSTATE as well.
*/
BUILD_BUG_ON(HUGE_MAX_HSTATE < 4); if (pud_sect_supported())
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { /* * Break-before-make (BBM) is required for all user space mappings * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198.
*/ if (pte_user_exec(__ptep_get(ptep))) return huge_ptep_clear_flush(vma, addr, ptep);
} return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.