/* Include so header guard set. */ #include"../../../mm/vma.h"
staticbool fail_prealloc;
/* Then override vma_iter_prealloc() so we can choose to fail it. */ #define vma_iter_prealloc(vmi, vma) \
(fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL))
/* Helper function to allocate a VMA and link it to the tree. */ staticstruct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, unsignedlong start, unsignedlong end,
pgoff_t pgoff,
vm_flags_t vm_flags)
{ struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags);
if (vma == NULL) return NULL;
if (attach_vma(mm, vma)) {
detach_free_vma(vma); return NULL;
}
/* * Reset this counter which we use to track whether writes have * begun. Linking to the tree will have caused this to be incremented, * which means we will get a false positive otherwise.
*/
vma->vm_lock_seq = UINT_MAX;
return vma;
}
/* Helper function which provides a wrapper around a merge new VMA operation. */ staticstruct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
{ struct vm_area_struct *vma; /* * For convenience, get prev and next VMAs. Which the new VMA operation * requires.
*/
vmg->next = vma_next(vmg->vmi);
vmg->prev = vma_prev(vmg->vmi);
vma_iter_next_range(vmg->vmi);
vma = vma_merge_new_range(vmg); if (vma)
vma_assert_attached(vma);
return vma;
}
/* * Helper function which provides a wrapper around a merge existing VMA * operation.
*/ staticstruct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
{ struct vm_area_struct *vma;
vma = vma_merge_existing_range(vmg); if (vma)
vma_assert_attached(vma); return vma;
}
/* * Helper function which provides a wrapper around the expansion of an existing * VMA.
*/ staticint expand_existing(struct vma_merge_struct *vmg)
{ return vma_expand(vmg);
}
/* * Helper function to reset merge state the associated VMA iterator to a * specified new range.
*/ staticvoid vmg_set_range(struct vma_merge_struct *vmg, unsignedlong start, unsignedlong end, pgoff_t pgoff, vm_flags_t vm_flags)
{
vma_iter_set(vmg->vmi, start);
/* Helper function to set both the VMG range and its anon_vma. */ staticvoid vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsignedlong start, unsignedlong end, pgoff_t pgoff, vm_flags_t vm_flags, struct anon_vma *anon_vma)
{
vmg_set_range(vmg, start, end, pgoff, vm_flags);
vmg->anon_vma = anon_vma;
}
/* * Helper function to try to merge a new VMA. * * Update vmg and the iterator for it and try to merge, otherwise allocate a new * VMA, link it to the maple tree and return it.
*/ staticstruct vm_area_struct *try_merge_new_vma(struct mm_struct *mm, struct vma_merge_struct *vmg, unsignedlong start, unsignedlong end,
pgoff_t pgoff, vm_flags_t vm_flags, bool *was_merged)
{ struct vm_area_struct *merged;
/* * Helper function to reset the dummy anon_vma to indicate it has not been * duplicated.
*/ staticvoid reset_dummy_anon_vma(void)
{
dummy_anon_vma.was_cloned = false;
dummy_anon_vma.was_unlinked = false;
}
/* * Helper function to remove all VMAs and destroy the maple tree associated with * a virtual address space. Returns a count of VMAs in the tree.
*/ staticint cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
{ struct vm_area_struct *vma; int count = 0;
/* Helper function to determine if VMA has had vma_start_write() performed. */ staticbool vma_write_started(struct vm_area_struct *vma)
{ int seq = vma->vm_lock_seq;
/* We reset after each check. */
vma->vm_lock_seq = UINT_MAX;
/* The vma_start_write() stub simply increments this value. */ return seq > -1;
}
/* Helper function providing a dummy vm_ops->close() method.*/ staticvoid dummy_close(struct vm_area_struct *)
{
}
/* * The flags will not be changed, the vma_modify_flags() function * performs the merge/split only.
*/
vma = vma_modify_flags(&vmi, init_vma, init_vma,
0x1000, 0x2000, VM_READ | VM_MAYREAD);
ASSERT_NE(vma, NULL); /* We modify the provided VMA, and on split allocate new VMAs. */
ASSERT_EQ(vma, init_vma);
/* * 0123456789abc * AA B CC
*/
vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
ASSERT_NE(vma_a, NULL); /* We give each VMA a single avc so we can test anon_vma duplication. */
INIT_LIST_HEAD(&vma_a->anon_vma_chain);
list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
/* Make sure there aren't new VM_SPECIAL flags. */ for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
all_special_flags |= special_flags[i];
}
ASSERT_EQ(all_special_flags, VM_SPECIAL);
/* 1. Set up new VMA with special flag that would otherwise merge. */
/* * 01234 * AAA* * * This should merge if not for the VM_SPECIAL flag.
*/
vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags); for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
vm_flags_t special_flag = special_flags[i];
/* * When merging VMAs we are not permitted to remove any VMA that has a * vm_ops->close() hook. * * Considering the two possible adjacent VMAs to which a VMA can be * merged: * * [ prev ][ vma ][ next ] * * In no case will we need to delete prev. If the operation is * mergeable, then prev will be extended with one or both of vma and * next deleted. * * As a result, during initial mergeability checks, only * can_vma_merge_before() (which implies the VMA being merged with is * 'next' as shown above) bothers to check to see whether the next VMA * has a vm_ops->close() callback that will need to be called when * removed. * * If it does, then we cannot merge as the resources that the close() * operation potentially clears down are tied only to the existing VMA * range and we have no way of extending those to the nearly merged one. * * We must consider two scenarios: * * A. * * vm_ops->close: - - !NULL * [ prev ][ vma ][ next ] * * Where prev may or may not be present/mergeable. * * This is picked up by a specific check in can_vma_merge_before(). * * B. * * vm_ops->close: - !NULL * [ prev ][ vma ] * * Where prev and vma are present and mergeable. * * This is picked up by a specific check in the modified VMA merge. * * IMPORTANT NOTE: We make the assumption that the following case: * * - !NULL NULL * [ prev ][ vma ][ next ] * * Cannot occur, because vma->vm_ops being the same implies the same * vma->vm_file, and therefore this would mean that next->vm_ops->close * would be set too, and thus scenario A would pick this up.
*/
/* * The only case of a new VMA merge that results in a VMA being deleted * is one where both the previous and next VMAs are merged - in this * instance the next VMA is deleted, and the previous VMA is extended. * * If we are unable to do so, we reduce the operation to simply * extending the prev VMA and not merging next. * * 0123456789 * PPP**NNNN * -> * 0123456789 * PPPPPPNNN
*/
/* * When modifying an existing VMA there are further cases where we * delete VMAs. * * <> * 0123456789 * PPPVV * * In this instance, if vma has a close hook, the merge simply cannot * proceed.
*/
/* * The VMA being modified in a way that would otherwise merge should * also fail.
*/
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
/* * This case is mirrored if merging with next. * * <> * 0123456789 * VVNNNN * * In this instance, if vma has a close hook, the merge simply cannot * proceed.
*/
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL); /* * Initially this is misapprehended as an out of memory report, as the * close() check is handled in the same way as anon_vma duplication * failures, however a subsequent patch resolves this.
*/
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
/* * Finally, we consider two variants of the case where we modify a VMA * to merge with both the previous and next VMAs. * * The first variant is where vma has a close hook. In this instance, no * merge can proceed. * * <> * 0123456789 * PPPVVNNNN
*/
/* * The second variant is where next has a close hook. In this instance, * we reduce the operation to a merge between prev and vma. * * <> * 0123456789 * PPPVVNNNN * -> * 0123456789 * PPPPPNNNN
*/
/* * We should allow the partial merge of a proposed new VMA if the * surrounding VMAs have vm_ops->close() hooks (but are otherwise * compatible), e.g.: * * New VMA * A v-------v B * |-----| |-----| * close close * * Since the rule is to not DELETE a VMA with a close operation, this * should be permitted, only rather than expanding A and deleting B, we * should simply expand A and leave B intact, e.g.: * * New VMA * A B * |------------||-----| * close close
*/
/* Have prev and next have a vm_ops->close() hook. */
vma_prev->vm_ops = &vm_ops;
vma_next->vm_ops = &vm_ops;
/* Clear down and reset. We should have deleted prev and next. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
/* * Non-merge ranges. the modified VMA merge operation assumes that the * caller always specifies ranges within the input VMA so we need only * examine these cases. * * - * - * - * <-> * <> * <> * 0123456789a * PPPVVVVVNNN
*/
/* * In the case of modified VMA merge, merging both left and right VMAs * but where prev and next have incompatible anon_vma objects, we revert * to a merge of prev and VMA: * * <--> * 0123456789 * PPPVVVVNNN * -> * 0123456789 * PPPPPPPNNN
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
/* * Give both prev and next single anon_vma_chain fields, so they will * merge with the NULL vmg->anon_vma. * * However, when prev is compared to next, the merge should fail.
*/
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
vmg.prev = vma_prev;
vmg.middle = vma;
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
/* Clear down and reset. */
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
/* * Now consider the new VMA case. This is equivalent, only adding a new * VMA in a gap between prev and next. * * <--> * 0123456789 * PPP****NNN * -> * 0123456789 * PPPPPPPNNN
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
/* * Expanding a VMA delete the next one duplicates next's anon_vma and * assigns it to the expanded VMA. * * This covers new VMA merging, as these operations amount to a VMA * expand.
*/
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vma_next->anon_vma = &dummy_anon_vma;
/* * We are merging vma into prev, with vma possessing an anon_vma, which * will be duplicated. We cause the vmi preallocation to fail and assert * the duplicated anon_vma is unlinked.
*/
/* This will cause the merge to fail. */
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM); /* We will already have assigned the anon_vma. */
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma); /* And it was both cloned and unlinked. */
ASSERT_TRUE(dummy_anon_vma.was_cloned);
ASSERT_TRUE(dummy_anon_vma.was_unlinked);
/* * We repeat the same operation for expanding a VMA, which is what new * VMA merging ultimately uses too. This asserts that unlinking is * performed in this case too.
*/
/* * Place a VMA prior to the one we're expanding so we assert that we do * not erroneously try to traverse to the previous VMA even though we * have, through the use of the just_expand flag, indicated we do not * need to do so.
*/
alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
/* * We will be positioned at the prev VMA, but looking to expand to * 0x9000.
*/
vma_iter_set(&vmi, 0x3000);
vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
vmg.prev = vma_prev;
vmg.just_expand = true;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.