staticinlinebool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsignedint *seq)
{ /* * Since mmap_lock is a sleeping lock, and waiting for it to become * unlocked is more or less equivalent with taking it ourselves, don't * bother with the speculative path if mmap_lock is already write-locked * and take the slow path, which takes the lock.
*/ return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq);
}
staticinlinebool is_vma_writer_only(int refcnt)
{ /* * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on * a detached vma happens only in vma_mark_detached() and is a rare * case, therefore most of the time there will be no unnecessary wakeup.
*/ return refcnt & VMA_LOCK_OFFSET && refcnt <= VMA_LOCK_OFFSET + 1;
}
staticinlinevoid vma_refcount_put(struct vm_area_struct *vma)
{ /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */ struct mm_struct *mm = vma->vm_mm; int oldcnt;
rwsem_release(&vma->vmlock_dep_map, _RET_IP_); if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) {
if (is_vma_writer_only(oldcnt - 1))
rcuwait_wake_up(&mm->vma_writer_wait);
}
}
/* * Try to read-lock a vma. The function is allowed to occasionally yield false * locked result to avoid performance overhead, in which case we fall back to * using mmap_lock. The function should never yield false unlocked result. * False locked result is possible if mm_lock_seq overflows or if vma gets * reused and attached to a different mm before we lock it. * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got * detached. * * WARNING! The vma passed to this function cannot be used if the function * fails to lock it because in certain cases RCU lock is dropped and then * reacquired. Once RCU lock is dropped the vma can be concurently freed.
*/ staticinlinestruct vm_area_struct *vma_start_read(struct mm_struct *mm, struct vm_area_struct *vma)
{ int oldcnt;
/* * Check before locking. A race might cause false locked result. * We can use READ_ONCE() for the mm_lock_seq here, and don't need * ACQUIRE semantics, because this is just a lockless check whose result * we don't rely on for anything - the mm_lock_seq read against which we * need ordering is below.
*/ if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) return NULL;
/* * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire() * will fail because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET. * Acquire fence is required here to avoid reordering against later * vm_lock_seq check and checks inside lock_vma_under_rcu().
*/ if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
VMA_REF_LIMIT))) { /* return EAGAIN if vma got detached from under us */ return oldcnt ? NULL : ERR_PTR(-EAGAIN);
}
/* * If vma got attached to another mm from under us, that mm is not * stable and can be freed in the narrow window after vma->vm_refcnt * is dropped and before rcuwait_wake_up(mm) is called. Grab it before * releasing vma->vm_refcnt.
*/ if (unlikely(vma->vm_mm != mm)) { /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */ struct mm_struct *other_mm = vma->vm_mm;
/* * __mmdrop() is a heavy operation and we don't need RCU * protection here. Release RCU lock during these operations. * We reinstate the RCU read lock as the caller expects it to * be held when this function returns even on error.
*/
rcu_read_unlock();
mmgrab(other_mm);
vma_refcount_put(vma);
mmdrop(other_mm);
rcu_read_lock(); return NULL;
}
/* * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result. * False unlocked result is impossible because we modify and check * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq * modification invalidates all existing locks. * * We must use ACQUIRE semantics for the mm_lock_seq so that if we are * racing with vma_end_write_all(), we only start reading from the VMA * after it has been unlocked. * This pairs with RELEASE semantics in vma_end_write_all().
*/ if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
vma_refcount_put(vma); return NULL;
}
return vma;
}
/* * Use only while holding mmap read lock which guarantees that locking will not * fail (nobody can concurrently write-lock the vma). vma_start_read() should * not be used in such cases because it might fail due to mm_lock_seq overflow. * This functionality is used to obtain vma read lock and drop the mmap read lock.
*/ staticinlinebool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass)
{ int oldcnt;
mmap_assert_locked(vma->vm_mm); if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
VMA_REF_LIMIT))) returnfalse;
/* * Use only while holding mmap read lock which guarantees that locking will not * fail (nobody can concurrently write-lock the vma). vma_start_read() should * not be used in such cases because it might fail due to mm_lock_seq overflow. * This functionality is used to obtain vma read lock and drop the mmap read lock.
*/ staticinlinebool vma_start_read_locked(struct vm_area_struct *vma)
{ return vma_start_read_locked_nested(vma, 0);
}
/* WARNING! Can only be used if mmap_lock is expected to be write-locked */ staticbool __is_vma_write_locked(struct vm_area_struct *vma, unsignedint *mm_lock_seq)
{
mmap_assert_write_locked(vma->vm_mm);
/* * current task is holding mmap_write_lock, both vma->vm_lock_seq and * mm->mm_lock_seq can't be concurrently modified.
*/
*mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence; return (vma->vm_lock_seq == *mm_lock_seq);
}
/* * Begin writing to a VMA. * Exclude concurrent readers under the per-VMA lock until the currently * write-locked mmap_lock is dropped or downgraded.
*/ staticinlinevoid vma_start_write(struct vm_area_struct *vma)
{ unsignedint mm_lock_seq;
if (__is_vma_write_locked(vma, &mm_lock_seq)) return;
/* * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these * assertions should be made either under mmap_write_lock or when the object * has been isolated under mmap_write_lock, ensuring no competing writers.
*/ staticinlinevoid vma_assert_attached(struct vm_area_struct *vma)
{
WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
}
/* * Locks next vma pointed by the iterator. Confirms the locked vma has not * been modified and will retry under mmap_lock protection if modification * was detected. Should be called from read RCU section. * Returns either a valid locked VMA, NULL if no more VMAs or -EINTR if the * process was interrupted.
*/ struct vm_area_struct *lock_next_vma(struct mm_struct *mm, struct vma_iterator *iter, unsignedlong address);
staticinlineint mmap_write_lock_killable(struct mm_struct *mm)
{ int ret;
__mmap_lock_trace_start_locking(mm, true);
ret = down_write_killable(&mm->mmap_lock); if (!ret)
mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, ret == 0); return ret;
}
/* * Drop all currently-held per-VMA locks. * This is called from the mmap_lock implementation directly before releasing * a write-locked mmap_lock (or downgrading it to read-locked). * This should normally NOT be called manually from other places. * If you want to call this manually anyway, keep in mind that this will release * *all* VMA write locks, including ones from further up the stack.
*/ staticinlinevoid vma_end_write_all(struct mm_struct *mm)
{
mmap_assert_write_locked(mm);
mm_lock_seqcount_end(mm);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.