#ifdef CONFIG_TRACING /* * Trace calls must be in a separate file, as otherwise there's a circular * dependency between linux/mmap_lock.h and trace/events/mmap_lock.h.
*/
/* Additional refcnt if the vma is attached. */ if (!detaching)
tgt_refcnt++;
/* * If vma is detached then only vma_mark_attached() can raise the * vm_refcnt. mmap_write_lock prevents racing with vma_mark_attached().
*/ if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt)) returnfalse;
/* * __vma_enter_locked() returns false immediately if the vma is not * attached, otherwise it waits until refcnt is indicating that vma * is attached with no readers.
*/
locked = __vma_enter_locked(vma, false);
/* * We should use WRITE_ONCE() here because we can have concurrent reads * from the early lockless pessimistic check in vma_start_read(). * We don't really care about the correctness of that early check, but * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
*/
WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
/* * We are the only writer, so no need to use vma_refcount_put(). * The condition below is unlikely because the vma has been already * write-locked and readers can increment vm_refcnt only temporarily * before they check vm_lock_seq, realize the vma is locked and drop * back the vm_refcnt. That is a narrow window for observing a raised * vm_refcnt.
*/ if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { /* Wait until vma is detached with no readers. */ if (__vma_enter_locked(vma, true)) { bool detached;
/* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be * stable and not isolated. If the VMA is not found or is being modified the * function returns NULL.
*/ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsignedlong address)
{
MA_STATE(mas, &mm->mm_mt, address, address); struct vm_area_struct *vma;
rcu_read_lock();
retry:
vma = mas_walk(&mas); if (!vma) goto inval;
vma = vma_start_read(mm, vma); if (IS_ERR_OR_NULL(vma)) { /* Check if the VMA got isolated after we found it */ if (PTR_ERR(vma) == -EAGAIN) {
count_vm_vma_lock_event(VMA_LOCK_MISS); /* The area was replaced with another one */ goto retry;
}
/* Failed to lock the VMA */ goto inval;
} /* * At this point, we have a stable reference to a VMA: The VMA is * locked and we know it hasn't already been isolated. * From here on, we can access the VMA without worrying about which * fields are accessible for RCU readers.
*/
/* Check if the vma we locked is the right one. */ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) goto inval_end_read;
ret = mmap_read_lock_killable(mm); if (ret) return ERR_PTR(ret);
/* Lookup the vma at the last position again under mmap_read_lock */
vma_iter_set(vmi, from_addr);
vma = vma_next(vmi); if (vma) { /* Very unlikely vma->vm_refcnt overflow case */ if (unlikely(!vma_start_read_locked(vma)))
vma = ERR_PTR(-EAGAIN);
}
RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu read lock held");
retry: /* Start mmap_lock speculation in case we need to verify the vma later */
mmap_unlocked = mmap_lock_speculate_try_begin(mm, &mm_wr_seq);
vma = vma_next(vmi); if (!vma) return NULL;
vma = vma_start_read(mm, vma); if (IS_ERR_OR_NULL(vma)) { /* * Retry immediately if the vma gets detached from under us. * Infinite loop should not happen because the vma we find will * have to be constantly knocked out from under us.
*/ if (PTR_ERR(vma) == -EAGAIN) { /* reset to search from the last address */
vma_iter_set(vmi, from_addr); goto retry;
}
goto fallback;
}
/* Verify the vma is not behind the last search position. */ if (unlikely(from_addr >= vma->vm_end)) goto fallback_unlock;
/* * vma can be ahead of the last search position but we need to verify * it was not shrunk after we found it and another vma has not been * installed ahead of it. Otherwise we might observe a gap that should * not be there.
*/ if (from_addr < vma->vm_start) { /* Verify only if the address space might have changed since vma lookup. */ if (!mmap_unlocked || mmap_lock_speculate_retry(mm, mm_wr_seq)) {
vma_iter_set(vmi, from_addr); if (vma != vma_next(vmi)) goto fallback_unlock;
}
}
if (regs && !user_mode(regs)) { unsignedlong ip = exception_ip(regs); if (!search_exception_tables(ip)) returnfalse;
}
return !mmap_read_lock_killable(mm);
}
staticinlinebool mmap_upgrade_trylock(struct mm_struct *mm)
{ /* * We don't have this operation yet. * * It should be easy enough to do: it's basically a * atomic_long_try_cmpxchg_acquire() * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but * it also needs the proper lockdep magic etc.
*/ returnfalse;
}
staticinlinebool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
{
mmap_read_unlock(mm); if (regs && !user_mode(regs)) { unsignedlong ip = exception_ip(regs); if (!search_exception_tables(ip)) returnfalse;
} return !mmap_write_lock_killable(mm);
}
/* * Helper for page fault handling. * * This is kind of equivalent to "mmap_read_lock()" followed * by "find_extend_vma()", except it's a lot more careful about * the locking (and will drop the lock on failure). * * For example, if we have a kernel bug that causes a page * fault, we don't want to just use mmap_read_lock() to get * the mm lock, because that would deadlock if the bug were * to happen while we're holding the mm lock for writing. * * So this checks the exception tables on kernel faults in * order to only do this all for instructions that are actually * expected to fault. * * We can also actually take the mm lock for writing if we * need to extend the vma, which helps the VM layer a lot.
*/ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, unsignedlong addr, struct pt_regs *regs)
{ struct vm_area_struct *vma;
if (!get_mmap_lock_carefully(mm, regs)) return NULL;
/* * Well, dang. We might still be successful, but only * if we can extend a vma to do so.
*/ if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
mmap_read_unlock(mm); return NULL;
}
/* * We can try to upgrade the mmap lock atomically, * in which case we can continue to use the vma * we already looked up. * * Otherwise we'll have to drop the mmap lock and * re-take it, and also look up the vma again, * re-checking it.
*/ if (!mmap_upgrade_trylock(mm)) { if (!upgrade_mmap_lock_carefully(mm, regs)) return NULL;
vma = find_vma(mm, addr); if (!vma) goto fail; if (vma->vm_start <= addr) goto success; if (!(vma->vm_flags & VM_GROWSDOWN)) goto fail;
}
/* * At least xtensa ends up having protection faults even with no * MMU.. No stack expansion, at least.
*/ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, unsignedlong addr, struct pt_regs *regs)
{ struct vm_area_struct *vma;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.