struct unlink_vma_file_batch { int count; struct vm_area_struct *vmas[8];
};
/* * vma munmap operation
*/ struct vma_munmap_struct { struct vma_iterator *vmi; struct vm_area_struct *vma; /* The first vma to munmap */ struct vm_area_struct *prev; /* vma before the munmap area */ struct vm_area_struct *next; /* vma after the munmap area */ struct list_head *uf; /* Userfaultfd list_head */ unsignedlong start; /* Aligned start addr (inclusive) */ unsignedlong end; /* Aligned end addr (exclusive) */ unsignedlong unmap_start; /* Unmap PTE start */ unsignedlong unmap_end; /* Unmap PTE end */ int vma_count; /* Number of vmas that will be removed */ bool unlock; /* Unlock after the munmap */ bool clear_ptes; /* If there are outstanding PTE to be cleared */ /* 2 byte hole */ unsignedlong nr_pages; /* Number of pages being removed */ unsignedlong locked_vm; /* Number of locked pages */ unsignedlong nr_accounted; /* Number of VM_ACCOUNT pages */ unsignedlong exec_vm; unsignedlong stack_vm; unsignedlong data_vm;
};
/* * Describes a VMA merge operation and is threaded throughout it. * * Any of the fields may be mutated by the merge operation, so no guarantees are * made to the contents of this structure after a merge operation has completed.
*/ struct vma_merge_struct { struct mm_struct *mm; struct vma_iterator *vmi; /* * Adjacent VMAs, any of which may be NULL if not present: * * |------|--------|------| * | prev | middle | next | * |------|--------|------| * * middle may not yet exist in the case of a proposed new VMA being * merged, or it may be an existing VMA. * * next may be assigned by the caller.
*/ struct vm_area_struct *prev; struct vm_area_struct *middle; struct vm_area_struct *next; /* This is the VMA we ultimately target to become the merged VMA. */ struct vm_area_struct *target; /* * Initially, the start, end, pgoff fields are provided by the caller * and describe the proposed new VMA range, whether modifying an * existing VMA (which will be 'middle'), or adding a new one. * * During the merge process these fields are updated to describe the new * range _including those VMAs which will be merged_.
*/ unsignedlong start; unsignedlong end;
pgoff_t pgoff;
/* Flags which callers can use to modify merge behaviour: */
/* * If we can expand, simply do so. We know there is nothing to merge to * the right. Does not reset state upon failure to merge. The VMA * iterator is assumed to be positioned at the previous VMA, rather than * at the gap.
*/ bool just_expand :1;
/* * If a merge is possible, but an OOM error occurs, give up and don't * execute the merge, returning NULL.
*/ bool give_up_on_oom :1;
/* * If set, skip uprobe_mmap upon merged vma.
*/ bool skip_vma_uprobe :1;
/* Internal flags set during merge process: */
/* * Internal flag indicating the merge increases vmg->middle->vm_start * (and thereby, vmg->prev->vm_end).
*/ bool __adjust_middle_start :1; /* * Internal flag indicating the merge decreases vmg->next->vm_start * (and thereby, vmg->middle->vm_end).
*/ bool __adjust_next_start :1; /* * Internal flag used during the merge operation to indicate we will * remove vmg->middle.
*/ bool __remove_middle :1; /* * Internal flag used during the merge operationr to indicate we will * remove vmg->next.
*/ bool __remove_next :1;
/* * Temporary helper functions for file systems which wrap an invocation of * f_op->mmap() but which might have an underlying file system which implements * f_op->mmap_prepare().
*/
staticinlinevoid set_vma_from_desc(struct vm_area_struct *vma, struct vm_area_desc *desc)
{ /* * Since we're invoking .mmap_prepare() despite having a partially * established VMA, we must take care to handle setting fields * correctly.
*/
/* Mutable fields. Populated with initial state. */
vma->vm_pgoff = desc->pgoff; if (vma->vm_file != desc->file)
vma_set_file(vma, desc->file); if (vma->vm_flags != desc->vm_flags)
vm_flags_set(vma, desc->vm_flags);
vma->vm_page_prot = desc->page_prot;
staticinlinebool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
{ /* * We want to check manually if we can change individual PTEs writable * if we can't do that automatically for all PTEs in a mapping. For * private mappings, that's always the case when we have write * permissions as we properly have to handle COW.
*/ if (vma->vm_flags & VM_SHARED) return vma_wants_writenotify(vma, vma->vm_page_prot); return !!(vma->vm_flags & VM_WRITE);
}
/* * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or * if no previous VMA, to index 0.
*/ staticinline struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi, struct vm_area_struct **pprev)
{ struct vm_area_struct *next = vma_next(vmi); struct vm_area_struct *prev = vma_prev(vmi);
/* * Consider the case where no previous VMA exists. We advance to the * next VMA, skipping any gap, then rewind to the start of the range. * * If we were to unconditionally advance to the next range we'd wind up * at the next VMA again, so we check to ensure there is a previous VMA * to skip over.
*/ if (prev)
vma_iter_next_range(vmi);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.