/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KSM_H #define __LINUX_KSM_H /* * Memory merging support. * * This code enables dynamic sharing of identical pages found in different * memory areas, even if they are not shared by fork().
*/
#ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsignedlong start, unsignedlong end, int advice, vm_flags_t *vm_flags);
vm_flags_t ksm_vma_flags(conststruct mm_struct *mm, conststruct file *file,
vm_flags_t vm_flags); int ksm_enable_merge_any(struct mm_struct *mm); int ksm_disable_merge_any(struct mm_struct *mm); int ksm_disable(struct mm_struct *mm);
int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); /* * To identify zeropages that were mapped by KSM, we reuse the dirty bit * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when * deduplicating memory.
*/ #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
staticinlinevoid ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{ /* Adding mm to ksm is best effort on fork. */ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) { long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
staticinlineint ksm_execve(struct mm_struct *mm)
{ if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return __ksm_enter(mm);
return 0;
}
staticinlinevoid ksm_exit(struct mm_struct *mm)
{ if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
__ksm_exit(mm);
}
/* * When do_swap_page() first faults in from swap what used to be a KSM page, * no problem, it will be assigned to this vma's anon_vma; but thereafter, * it might be faulted into a different anon_vma (or perhaps to a different * offset in the same anon_vma). do_swap_page() cannot do all the locking * needed to reconstitute a cross-anon_vma KSM page: for now it has to make * a copy, and leave remerging the pages to a later pass of ksmd. * * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out?
*/ struct folio *ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsignedlong addr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.