/* * Copyright (c) 2007, 2020 Oracle and/or its affiliates. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. *
*/ #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
#include"rds.h"
/* * XXX * - build with sparse * - should we detect duplicate keys on a socket? hmm. * - an rdma is an mlock, apply rlimit?
*/
/* * get the number of pages by looking at the page indices that the start and * end addresses fall in. * * Returns 0 if the vec is invalid. It is invalid if the number of bytes * causes the address to wrap or overflows an unsigned int. This comes * from being stored in the 'length' member of 'struct scatterlist'.
*/ staticunsignedint rds_pages_in_vec(struct rds_iovec *vec)
{ if ((vec->addr + vec->bytes <= vec->addr) ||
(vec->bytes > (u64)UINT_MAX)) return 0;
/* * By the time this is called we can't have any more ioctls called on * the socket so we don't need to worry about racing with others.
*/ void rds_rdma_drop_keys(struct rds_sock *rs)
{ struct rds_mr *mr; struct rb_node *node; unsignedlong flags;
/* Release any MRs associated with this socket */
spin_lock_irqsave(&rs->rs_rdma_lock, flags); while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = rb_entry(node, struct rds_mr, r_rb_node); if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
kref_put(&mr->r_kref, __rds_put_mr_final);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
}
/* * Helper function to pin user pages.
*/ staticint rds_pin_pages(unsignedlong user_addr, unsignedint nr_pages, struct page **pages, int write)
{ unsignedint gup_flags = FOLL_LONGTERM; int ret;
if (write)
gup_flags |= FOLL_WRITE;
ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages); if (ret >= 0 && ret < nr_pages) {
unpin_user_pages(pages, ret);
ret = -EFAULT;
}
if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
ret = -ENOTCONN; /* XXX not a great errno */ goto out;
}
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP; goto out;
}
/* If the combination of the addr and size requested for this memory * region causes an integer overflow, return error.
*/ if (((args->vec.addr + args->vec.bytes) < args->vec.addr) ||
PAGE_ALIGN(args->vec.addr + args->vec.bytes) <
(args->vec.addr + args->vec.bytes)) {
ret = -EINVAL; goto out;
}
if (!can_do_mlock()) {
ret = -EPERM; goto out;
}
nr_pages = rds_pages_in_vec(&args->vec); if (nr_pages == 0) {
ret = -EINVAL; goto out;
}
/* Restrict the size of mr irrespective of underlying transport * To account for unaligned mr regions, subtract one from nr_pages
*/ if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
ret = -EMSGSIZE; goto out;
}
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) {
ret = -ENOMEM; goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); if (!mr) {
ret = -ENOMEM; goto out;
}
if (args->flags & RDS_RDMA_USE_ONCE)
mr->r_use_once = 1; if (args->flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1; if (args->flags & RDS_RDMA_READWRITE)
mr->r_write = 1;
/* * Pin the pages that make up the user buffer and transfer the page * pointers to the mr's sg array. We check to see if we've mapped * the whole region after transferring the partial page references * to the sg array so that we can have one page ref cleanup path. * * For now we have no flag that tells us whether the mapping is * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to * the zero page.
*/
ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); if (ret == -EOPNOTSUPP) {
need_odp = 1;
} elseif (ret <= 0) { goto out;
} else {
nents = ret;
sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL); if (!sg) {
ret = -ENOMEM; goto out;
}
WARN_ON(!nents);
sg_init_table(sg, nents);
/* Stick all pages into the scatterlist */ for (i = 0 ; i < nents; i++)
sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
rdsdebug("RDS: trans_private nents is %u\n", nents);
} /* Obtain a transport specific MR. If this succeeds, the * s/g list is now owned by the MR. * Note that dma_map() implies that pending writes are
* flushed to RAM, so no dma_sync is needed here. */
trans_private = rs->rs_transport->get_mr(
sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL,
args->vec.addr, args->vec.bytes,
need_odp ? ODP_ZEROBASED : ODP_NOT_NEEDED);
if (IS_ERR(trans_private)) { /* In ODP case, we don't GUP pages, so don't need * to release anything.
*/ if (!need_odp) {
unpin_user_pages(pages, nr_pages);
kfree(sg);
}
ret = PTR_ERR(trans_private); /* Trigger connection so that its ready for the next retry */ if (ret == -ENODEV && cp)
rds_conn_connect_if_down(cp->cp_conn); goto out;
}
/* The user may pass us an unaligned address, but we can only * map page aligned regions. So we keep the offset, and build * a 64bit cookie containing <R_Key, offset> and pass that
* around. */ if (need_odp)
cookie = rds_rdma_make_cookie(mr->r_key, 0); else
cookie = rds_rdma_make_cookie(mr->r_key,
args->vec.addr & ~PAGE_MASK); if (cookie_ret)
*cookie_ret = cookie;
if (args->cookie_addr &&
put_user(cookie, (u64 __user *)(unsignedlong)args->cookie_addr)) { if (!need_odp) {
unpin_user_pages(pages, nr_pages);
kfree(sg);
}
ret = -EFAULT; goto out;
}
/* Inserting the new MR into the rbtree bumps its
* reference count. */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
BUG_ON(found && found != mr);
rdsdebug("RDS: get_mr key is %x\n", mr->r_key); if (mr_ret) {
kref_get(&mr->r_kref);
*mr_ret = mr;
}
ret = 0;
out:
kfree(pages); if (mr)
kref_put(&mr->r_kref, __rds_put_mr_final); return ret;
}
int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
{ struct rds_get_mr_args args;
if (optlen != sizeof(struct rds_get_mr_args)) return -EINVAL;
if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args))) return -EFAULT;
int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen)
{ struct rds_get_mr_for_dest_args args; struct rds_get_mr_args new_args;
if (optlen != sizeof(struct rds_get_mr_for_dest_args)) return -EINVAL;
if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_for_dest_args))) return -EFAULT;
/* * Initially, just behave like get_mr(). * TODO: Implement get_mr as wrapper around this * and deprecate it.
*/
new_args.vec = args.vec;
new_args.cookie_addr = args.cookie_addr;
new_args.flags = args.flags;
/* * Free the MR indicated by the given R_Key
*/ int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
{ struct rds_free_mr_args args; struct rds_mr *mr; unsignedlong flags;
if (optlen != sizeof(struct rds_free_mr_args)) return -EINVAL;
if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args))) return -EFAULT;
/* Special case - a null cookie means flush all unused MRs */ if (args.cookie == 0) { if (!rs->rs_transport || !rs->rs_transport->flush_mrs) return -EINVAL;
rs->rs_transport->flush_mrs(); return 0;
}
/* Look up the MR given its R_key and remove it from the rbtree * so nobody else finds it. * This should also prevent races with rds_rdma_unuse.
*/
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL); if (mr) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node); if (args.flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* * This is called when we receive an extension header that * tells us this MR was used. It allows us to implement * use_once semantics
*/ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
{ struct rds_mr *mr; unsignedlong flags; int zot_me = 0;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) {
pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
r_key);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); return;
}
/* Get a reference so that the MR won't go away before calling * sync_mr() below.
*/
kref_get(&mr->r_kref);
/* If it is going to be freed, remove it from the tree now so * that no other thread can find it and free it.
*/ if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region. * Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */ if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
/* Release the reference held above. */
kref_put(&mr->r_kref, __rds_put_mr_final);
/* If the MR was marked as invalidate, this will
* trigger an async flush. */ if (zot_me)
kref_put(&mr->r_kref, __rds_put_mr_final);
}
if (ro->op_odp_mr) {
kref_put(&ro->op_odp_mr->r_kref, __rds_put_mr_final);
} else { for (i = 0; i < ro->op_nents; i++) { struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory
*/
unpin_user_pages_dirty_lock(&page, 1, !ro->op_write);
}
}
/* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote
* to local memory */
unpin_user_pages_dirty_lock(&page, 1, true);
/* * Count the number of pages needed to describe an incoming iovec array.
*/ staticint rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
{ int tot_pages = 0; unsignedint nr_pages; unsignedint i;
/* figure out the number of pages in the vector */ for (i = 0; i < nr_iovecs; i++) {
nr_pages = rds_pages_in_vec(&iov[i]); if (nr_pages == 0) return -EINVAL;
tot_pages += nr_pages;
/* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative.
*/ if (tot_pages < 0) return -EINVAL;
}
/* figure out the number of pages in the vector */ for (i = 0; i < args->nr_local; i++, vec++) {
nr_pages = rds_pages_in_vec(vec); if (nr_pages == 0) return -EINVAL;
tot_pages += nr_pages;
/* * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, * so tot_pages cannot overflow without first going negative.
*/ if (tot_pages < 0) return -EINVAL;
}
return tot_pages * sizeof(struct scatterlist);
}
/* * The application asks for a RDMA transfer. * Extract all arguments and set up the rdma_op
*/ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg, struct rds_iov_vector *vec)
{ struct rds_rdma_args *args; struct rm_rdma_op *op = &rm->rdma; int nr_pages; unsignedint nr_bytes; struct page **pages = NULL; struct rds_iovec *iovs; unsignedint i, j; int ret = 0; bool odp_supported = true;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.op_active) return -EINVAL;
args = CMSG_DATA(cmsg);
if (ipv6_addr_any(&rs->rs_bound_addr)) {
ret = -ENOTCONN; /* XXX not a great errno */ goto out_ret;
}
if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE; goto out_ret;
}
if (vec->len != args->nr_local) {
ret = -EINVAL; goto out_ret;
} /* odp-mr is not supported for multiple requests within one message */ if (args->nr_local != 1)
odp_supported = false;
iovs = vec->iov;
nr_pages = rds_rdma_pages(iovs, args->nr_local); if (nr_pages < 0) {
ret = -EINVAL; goto out_ret;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) {
ret = -ENOMEM; goto out_ret;
}
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages); if (IS_ERR(op->op_sg)) {
ret = PTR_ERR(op->op_sg); goto out_pages;
}
if (op->op_notify || op->op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations.
*/
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); if (!op->op_notifier) {
ret = -ENOMEM; goto out_pages;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
}
/* The cookie contains the R_Key of the remote memory region, and * optionally an offset into it. This is how we implement RDMA into * unaligned memory. * When setting up the RDMA, we need to add that offset to the * destination address (which is really an offset into the MR) * FIXME: We may want to move this into ib_rdma.c
*/
op->op_rkey = rds_rdma_cookie_key(args->cookie);
op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
for (i = 0; i < args->nr_local; i++) { struct rds_iovec *iov = &iovs[i]; /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ unsignedint nr = rds_pages_in_vec(iov);
/* If it's a WRITE operation, we want to pin the pages for reading. * If it's a READ operation, we need to pin the pages for writing.
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); if ((!odp_supported && ret <= 0) ||
(odp_supported && ret <= 0 && ret != -EOPNOTSUPP)) goto out_pages;
if (ret == -EOPNOTSUPP) { struct rds_mr *local_odp_mr;
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP; goto out_pages;
}
local_odp_mr =
kzalloc(sizeof(*local_odp_mr), GFP_KERNEL); if (!local_odp_mr) {
ret = -ENOMEM; goto out_pages;
}
RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
kref_init(&local_odp_mr->r_kref);
local_odp_mr->r_trans = rs->rs_transport;
local_odp_mr->r_sock = rs;
local_odp_mr->r_trans_private =
rs->rs_transport->get_mr(
NULL, 0, rs, &local_odp_mr->r_key, NULL,
iov->addr, iov->bytes, ODP_VIRTUAL); if (IS_ERR(local_odp_mr->r_trans_private)) {
ret = PTR_ERR(local_odp_mr->r_trans_private);
rdsdebug("get_mr ret %d %p\"", ret,
local_odp_mr->r_trans_private);
kfree(local_odp_mr);
ret = -EOPNOTSUPP; goto out_pages;
}
rdsdebug("Need odp; local_odp_mr %p trans_private %p\n",
local_odp_mr, local_odp_mr->r_trans_private);
op->op_odp_mr = local_odp_mr;
op->op_odp_addr = iov->addr;
}
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
(unsignedint) args->remote_vec.bytes);
ret = -EINVAL; goto out_pages;
}
op->op_bytes = nr_bytes;
ret = 0;
out_pages:
kfree(pages);
out_ret: if (ret)
rds_rdma_free_op(op); else
rds_stats_inc(s_send_rdma);
return ret;
}
/* * The application wants us to pass an RDMA destination (aka MR) * to the remote
*/ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg)
{ unsignedlong flags; struct rds_mr *mr;
u32 r_key; int err = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
rm->m_rdma_cookie != 0) return -EINVAL;
/* We are reusing a previously mapped MR here. Most likely, the * application has written to the buffer, so we need to explicitly * flush those writes to RAM. Otherwise the HCA may not see them * when doing a DMA from that buffer.
*/
r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
/* * The application passes us an address range it wants to enable RDMA * to/from. We map the area, and save the <R_Key,offset> pair * in rm->m_rdma_cookie. This causes it to be sent along to the peer * in an extension header.
*/ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg)
{ if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
rm->m_rdma_cookie != 0) return -EINVAL;
/* * Fill in rds_message for an atomic request.
*/ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg)
{ struct page *page = NULL; struct rds_atomic_args *args; int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
|| rm->atomic.op_active) return -EINVAL;
if (rm->atomic.op_notify || rm->atomic.op_recverr) { /* We allocate an uninitialized notifier here, because * we don't want to do that in the completion handler. We * would have to use GFP_ATOMIC there, and don't want to deal * with failed allocations.
*/
rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL); if (!rm->atomic.op_notifier) {
ret = -ENOMEM; goto err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.