/** * rvt_mmap_init - init link list and lock for mem map * @rdi: rvt dev struct
*/ void rvt_mmap_init(struct rvt_dev_info *rdi)
{
INIT_LIST_HEAD(&rdi->pending_mmaps);
spin_lock_init(&rdi->pending_lock);
rdi->mmap_offset = PAGE_SIZE;
spin_lock_init(&rdi->mmap_offset_lock);
}
/** * rvt_release_mmap_info - free mmap info structure * @ref: a pointer to the kref within struct rvt_mmap_info
*/ void rvt_release_mmap_info(struct kref *ref)
{ struct rvt_mmap_info *ip =
container_of(ref, struct rvt_mmap_info, ref); struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
/** * rvt_mmap - create a new mmap region * @context: the IB user context of the process making the mmap() call * @vma: the VMA to be initialized * * Return: zero if the mmap is OK. Otherwise, return an errno.
*/ int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{ struct rvt_dev_info *rdi = ib_to_rvt(context->device); unsignedlong offset = vma->vm_pgoff << PAGE_SHIFT; unsignedlong size = vma->vm_end - vma->vm_start; struct rvt_mmap_info *ip, *pp; int ret = -EINVAL;
/* * Search the device's list of objects waiting for a mmap call. * Normally, this list is very short since a call to create a * CQ, QP, or SRQ is soon followed by a call to mmap().
*/
spin_lock_irq(&rdi->pending_lock);
list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
pending_mmaps) { /* Only the creator is allowed to mmap the object */ if (context != ip->context || (__u64)offset != ip->offset) continue; /* Don't allow a mmap larger than the object. */ if (size > ip->size) break;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.