/* * Copyright (c) 2005 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
module_param(srp_sg_tablesize, uint, 0444);
MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
module_param(cmd_sg_entries, uint, 0444);
MODULE_PARM_DESC(cmd_sg_entries, "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
module_param(indirect_sg_entries, uint, 0444);
MODULE_PARM_DESC(indirect_sg_entries, "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
module_param(allow_ext_sg, bool, 0444);
MODULE_PARM_DESC(allow_ext_sg, "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds, "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always, "Use memory registration even for contiguous memory regions");
staticint srp_fast_io_fail_tmo = 15;
module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fast_io_fail_tmo, "Number of seconds between the observation of a transport" " layer error and failing all I/O. \"off\" means that this" " functionality is disabled.");
staticint srp_dev_loss_tmo = 600;
module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the SRP transport should" " insulate transport layer errors. After this time has been" " exceeded the SCSI host is removed. Should be" " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) " if fast_io_fail_tmo has not been set. \"off\" means that" " this functionality is disabled.");
staticbool srp_use_imm_data = true;
module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
MODULE_PARM_DESC(use_imm_data, "Whether or not to request permission to use immediate data during SRP login.");
staticunsigned ch_count;
module_param(ch_count, uint, 0444);
MODULE_PARM_DESC(ch_count, "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) return -ENOMEM;
ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
target->srp_host->port,
be16_to_cpu(target->ib_cm.pkey),
&attr->pkey_index); if (ret) goto out;
/** * srp_destroy_fr_pool() - free the resources owned by a pool * @pool: Fast registration pool to be destroyed.
*/ staticvoid srp_destroy_fr_pool(struct srp_fr_pool *pool)
{ int i; struct srp_fr_desc *d;
if (!pool) return;
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { if (d->mr)
ib_dereg_mr(d->mr);
}
kfree(pool);
}
/** * srp_create_fr_pool() - allocate and initialize a pool for fast registration * @device: IB device to allocate fast registration descriptors for. * @pd: Protection domain associated with the FR descriptors. * @pool_size: Number of descriptors to allocate. * @max_page_list_len: Maximum fast registration work request page list length.
*/ staticstruct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, struct ib_pd *pd, int pool_size, int max_page_list_len)
{ struct srp_fr_pool *pool; struct srp_fr_desc *d; struct ib_mr *mr; int i, ret = -EINVAL; enum ib_mr_type mr_type;
if (pool_size <= 0) goto err;
ret = -ENOMEM;
pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL); if (!pool) goto err;
pool->size = pool_size;
pool->max_page_list_len = max_page_list_len;
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->free_list);
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
mr = ib_alloc_mr(pd, mr_type, max_page_list_len); if (IS_ERR(mr)) {
ret = PTR_ERR(mr); if (ret == -ENOMEM)
pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
dev_name(&device->dev)); goto destroy_pool;
}
d->mr = mr;
list_add_tail(&d->entry, &pool->free_list);
}
out: return pool;
destroy_pool:
srp_destroy_fr_pool(pool);
err:
pool = ERR_PTR(ret); goto out;
}
/** * srp_fr_pool_get() - obtain a descriptor suitable for fast registration * @pool: Pool to obtain descriptor from.
*/ staticstruct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
{ struct srp_fr_desc *d = NULL; unsignedlong flags;
spin_lock_irqsave(&pool->lock, flags); if (!list_empty(&pool->free_list)) {
d = list_first_entry(&pool->free_list, typeof(*d), entry);
list_del(&d->entry);
}
spin_unlock_irqrestore(&pool->lock, flags);
return d;
}
/** * srp_fr_pool_put() - put an FR descriptor back in the free list * @pool: Pool the descriptor was allocated from. * @desc: Pointer to an array of fast registration descriptor pointers. * @n: Number of descriptors to put back. * * Note: The caller must already have queued an invalidation request for * desc->mr->rkey before calling this function.
*/ staticvoid srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, int n)
{ unsignedlong flags; int i;
spin_lock_irqsave(&pool->lock, flags); for (i = 0; i < n; i++)
list_add(&desc[i]->entry, &pool->free_list);
spin_unlock_irqrestore(&pool->lock, flags);
}
/** * srp_destroy_qp() - destroy an RDMA queue pair * @ch: SRP RDMA channel. * * Drain the qp before destroying it. This avoids that the receive * completion handler can access the queue pair while it is * being destroyed.
*/ staticvoid srp_destroy_qp(struct srp_rdma_ch *ch)
{
spin_lock_irq(&ch->lock);
ib_process_cq_direct(ch->send_cq, -1);
spin_unlock_irq(&ch->lock);
if (dev->use_fast_reg) { if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
ch->fr_pool = fr_pool;
}
kfree(init_attr); return 0;
err_qp: if (target->using_rdma_cm)
rdma_destroy_qp(ch->rdma_cm.cm_id); else
ib_destroy_qp(qp);
err_send_cq:
ib_free_cq(send_cq);
err_recv_cq:
ib_free_cq(recv_cq);
err:
kfree(init_attr); return ret;
}
/* * Note: this function may be called without srp_alloc_iu_bufs() having been * invoked. Hence the ch->[rt]x_ring checks.
*/ staticvoid srp_free_ch_ib(struct srp_target_port *target, struct srp_rdma_ch *ch)
{ struct srp_device *dev = target->srp_host->srp_dev; int i;
if (!ch->target) return;
if (target->using_rdma_cm) { if (ch->rdma_cm.cm_id) {
rdma_destroy_id(ch->rdma_cm.cm_id);
ch->rdma_cm.cm_id = NULL;
}
} else { if (ch->ib_cm.cm_id) {
ib_destroy_cm_id(ch->ib_cm.cm_id);
ch->ib_cm.cm_id = NULL;
}
}
/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ if (!ch->qp) return;
if (dev->use_fast_reg) { if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
}
/* * Avoid that the SCSI error handler tries to use this channel after * it has been freed. The SCSI error handler can namely continue * trying to perform recovery actions after scsi_remove_host() * returned.
*/
ch->target = NULL;
ch->qp = NULL;
ch->send_cq = ch->recv_cq = NULL;
if (ch->rx_ring) { for (i = 0; i < target->queue_size; ++i)
srp_free_iu(target->srp_host, ch->rx_ring[i]);
kfree(ch->rx_ring);
ch->rx_ring = NULL;
} if (ch->tx_ring) { for (i = 0; i < target->queue_size; ++i)
srp_free_iu(target->srp_host, ch->tx_ring[i]);
kfree(ch->tx_ring);
ch->tx_ring = NULL;
}
}
ret = ib_query_port(host->srp_dev->dev, host->port, &attr); if (ret == 0)
subnet_timeout = attr.subnet_timeout;
if (unlikely(subnet_timeout < 15))
pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
dev_name(&host->srp_dev->dev->dev), subnet_timeout);
/* * Pick some arbitrary defaults here; we could make these * module parameters if anyone cared about setting them.
*/
req->ib_param.responder_resources = 4;
req->ib_param.rnr_retry_count = 7;
req->ib_param.max_cm_retries = 15;
/* * In the published SRP specification (draft rev. 16a), the * port identifier format is 8 bytes of ID extension followed * by 8 bytes of GUID. Older drafts put the two halves in the * opposite order, so that the GUID comes first. * * Targets conforming to these obsolete drafts can be * recognized by the I/O Class they report.
*/ if (target->io_class == SRP_REV10_IB_IO_CLASS) {
memcpy(ipi, &target->sgid.global.interface_id, 8);
memcpy(ipi + 8, &target->initiator_ext, 8);
memcpy(tpi, &target->ioc_guid, 8);
memcpy(tpi + 8, &target->id_ext, 8);
} else {
memcpy(ipi, &target->initiator_ext, 8);
memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
memcpy(tpi, &target->id_ext, 8);
memcpy(tpi + 8, &target->ioc_guid, 8);
}
/* * Topspin/Cisco SRP targets will reject our login unless we * zero out the first 8 bytes of our initiator port ID and set * the second 8 bytes to the local node GUID.
*/ if (srp_target_is_topspin(target)) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Topspin/Cisco initiator port ID workaround " "activated for target GUID %016llx\n",
be64_to_cpu(target->ioc_guid));
memset(ipi, 0, 8);
memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
}
if (target->using_rdma_cm)
status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param); else
status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
if (changed)
queue_work(srp_remove_wq, &target->remove_work);
return changed;
}
staticvoid srp_disconnect_target(struct srp_target_port *target)
{ struct srp_rdma_ch *ch; int i, ret;
/* XXX should send SRP_I_LOGOUT request */
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
ch->connected = false;
ret = 0; if (target->using_rdma_cm) { if (ch->rdma_cm.cm_id)
rdma_disconnect(ch->rdma_cm.cm_id);
} else { if (ch->ib_cm.cm_id)
ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
NULL, 0);
} if (ret < 0) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM DREQ failed\n");
}
}
}
/** * srp_del_scsi_host_attr() - Remove attributes defined in the host template. * @shost: SCSI host whose attributes to remove from sysfs. * * Note: Any attributes defined in the host template and that did not exist * before invocation of this function will be ignored.
*/ staticvoid srp_del_scsi_host_attr(struct Scsi_Host *shost)
{ conststruct attribute_group **g; struct attribute **attr;
/** * srp_connected_ch() - number of connected channels * @target: SRP target port.
*/ staticint srp_connected_ch(struct srp_target_port *target)
{ int i, c = 0;
for (i = 0; i < target->ch_count; i++)
c += target->ch[i].connected;
while (1) {
init_completion(&ch->done);
ret = srp_send_req(ch, max_iu_len, multich); if (ret) goto out;
ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) goto out;
/* * The CM event handling code will set status to * SRP_PORT_REDIRECT if we get a port redirect REJ * back, or SRP_DLID_REDIRECT if we get a lid/qp * redirect REJ back.
*/
ret = ch->status; switch (ret) { case 0:
ch->connected = true; goto out;
case SRP_PORT_REDIRECT:
ret = srp_lookup_path(ch); if (ret) goto out; break;
case SRP_DLID_REDIRECT: break;
case SRP_STALE_CONN:
shost_printk(KERN_ERR, target->scsi_host, PFX "giving up on stale connection\n");
ret = -ECONNRESET; goto out;
/** * srp_claim_req - Take ownership of the scmnd associated with a request. * @ch: SRP RDMA channel. * @req: SRP request. * @sdev: If not NULL, only take ownership for this SCSI device. * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take * ownership of @req->scmnd if it equals @scmnd. * * Return value: * Either NULL or a pointer to the SCSI command the caller became owner of.
*/ staticstruct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, struct srp_request *req, struct scsi_device *sdev, struct scsi_cmnd *scmnd)
{ unsignedlong flags;
/* Calculate maximum initiator to target information unit length. */ static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
uint32_t max_it_iu_size)
{
uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN + sizeof(struct srp_indirect_buf) +
cmd_sg_cnt * sizeof(struct srp_direct_buf);
if (use_imm_data)
max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
srp_max_imm_data);
if (max_it_iu_size)
max_iu_len = min(max_iu_len, max_it_iu_size);
pr_debug("max_iu_len = %d\n", max_iu_len);
return max_iu_len;
}
/* * It is up to the caller to ensure that srp_rport_reconnect() calls are * serialized and that no concurrent srp_queuecommand(), srp_abort(), * srp_reset_device() or srp_reset_host() calls will occur while this function * is in progress. One way to realize that is not to call this function * directly but to call srp_reconnect_rport() instead since that last function * serializes calls of this function via rport->mutex and also blocks * srp_queuecommand() calls before invoking this function.
*/ staticint srp_rport_reconnect(struct srp_rport *rport)
{ struct srp_target_port *target = rport->lld_data; struct srp_rdma_ch *ch;
uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
srp_use_imm_data,
target->max_it_iu_size); int i, j, ret = 0; bool multich = false;
srp_disconnect_target(target);
if (target->state == SRP_TARGET_SCANNING) return -ENODEV;
/* * Now get a new local CM ID so that we avoid confusing the target in * case things are really fouled up. Doing so also ensures that all CM * callbacks will have finished before a new QP is allocated.
*/ for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
ret += srp_new_cm_id(ch);
}
{ struct srp_terminate_context context = {
.srp_target = target, .scsi_result = DID_RESET << 16};
scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
&context);
} for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i]; /* * Whether or not creating a new CM ID succeeded, create a new * QP. This guarantees that all completion callback function * invocations have finished before request resetting starts.
*/
ret += srp_create_ch_ib(ch);
/* * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset * where to start in the first element. If sg_offset_p != NULL then * *sg_offset_p is updated to the offset in state->sg[retval] of the first * byte that has not yet been mapped.
*/ staticint srp_map_finish_fr(struct srp_map_state *state, struct srp_request *req, struct srp_rdma_ch *ch, int sg_nents, unsignedint *sg_offset_p)
{ struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_reg_wr wr; struct srp_fr_desc *desc;
u32 rkey; int n, err;
if (state->fr.next >= state->fr.end) {
shost_printk(KERN_ERR, ch->target->scsi_host,
PFX "Out of MRs (mr_per_cmd = %d)\n",
ch->target->mr_per_cmd); return -ENOMEM;
}
/* * Register the indirect data buffer descriptor with the HCA. * * Note: since the indirect data buffer descriptor has been allocated with * kmalloc() it is guaranteed that this buffer is a physically contiguous * memory buffer.
*/ staticint srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, void **next_mr, void **end_mr, u32 idb_len,
__be32 *idb_rkey)
{ struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct srp_map_state state; struct srp_direct_buf idb_desc; struct scatterlist idb_sg[1]; int ret;
for (i = 0; i < state->ndesc; i++)
desc_len += be32_to_cpu(req->indirect_desc[i].len); if (dev->use_fast_reg) for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
mr_len += (*pfr)->mr->length; if (desc_len != scsi_bufflen(req->scmnd) ||
mr_len > scsi_bufflen(req->scmnd))
pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
scsi_bufflen(req->scmnd), desc_len, mr_len,
state->ndesc, state->nmdesc);
}
/** * srp_map_data() - map SCSI data buffer onto an SRP request * @scmnd: SCSI command to map * @ch: SRP RDMA channel * @req: SRP request * * Returns the length in bytes of the SRP_CMD IU or a negative value if * mapping failed. The size of any immediate data is not included in the * return value.
*/ staticint srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, struct srp_request *req)
{ struct srp_target_port *target = ch->target; struct scatterlist *scat, *sg; struct srp_cmd *cmd = req->cmd->buf; int i, len, nents, count, ret; struct srp_device *dev; struct ib_device *ibdev; struct srp_map_state state; struct srp_indirect_buf *indirect_hdr;
u64 data_len;
u32 idb_len, table_len;
__be32 idb_rkey;
u8 fmt;
req->cmd->num_sge = 1;
if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) returnsizeof(struct srp_cmd) + cmd->add_cdb_len;
if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
scmnd->sc_data_direction != DMA_TO_DEVICE) {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled data direction %d\n",
scmnd->sc_data_direction); return -EINVAL;
}
if (count == 1 && target->global_rkey) { /* * The midlayer only generated a single gather/scatter * entry, or DMA mapping coalesced everything to a * single entry. So a direct descriptor along with * the DMA MR suffices.
*/ struct srp_direct_buf *buf;
/* * We have more than one scatter/gather entry, so build our indirect * descriptor table, trying to merge as many entries as we can.
*/
indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
/* We've mapped the request, now pull as much of the indirect * descriptor table as we can into the command buffer. If this * target is not using an external indirect table, we are * guaranteed to fit into the command, as the SCSI layer won't * give us more S/G entries than we allow.
*/ if (state.ndesc == 1) { /* * Memory registration collapsed the sg-list into one entry, * so use a direct descriptor.
*/ struct srp_direct_buf *buf;
if (unlikely(target->cmd_sg_cnt < state.ndesc &&
!target->allow_ext_sg)) {
shost_printk(KERN_ERR, target->scsi_host, "Could not fit S/G list into SRP_CMD\n");
ret = -EIO; goto unmap;
}
unmap:
srp_unmap_data(scmnd, ch, req); if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
ret = -E2BIG; return ret;
}
/* * Return an IU and possible credit to the free pool
*/ staticvoid srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, enum srp_iu_type iu_type)
{ unsignedlong flags;
/* * Must be called with ch->lock held to protect req_lim and free_tx. * If IU is not sent, it must be returned using srp_put_tx_iu(). * * Note: * An upper limit for the number of allocated information units for each * request type is: * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues * more than Scsi_Host.can_queue requests. * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than * one unanswered SRP request to an initiator.
*/ staticstruct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, enum srp_iu_type iu_type)
{ struct srp_target_port *target = ch->target;
s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; struct srp_iu *iu;
lockdep_assert_held(&ch->lock);
ib_process_cq_direct(ch->send_cq, -1);
if (list_empty(&ch->free_tx)) return NULL;
/* Initiator responses to target requests do not consume credits */ if (iu_type != SRP_IU_RSP) { if (ch->req_lim <= rsv) {
++target->zero_req_lim; return NULL;
}
--ch->req_lim;
}
iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
list_del(&iu->list); return iu;
}
/* * Note: if this function is called from inside ib_drain_sq() then it will * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE * with status IB_WC_SUCCESS then that's a bug.
*/ staticvoid srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
{ struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); struct srp_rdma_ch *ch = cq->cq_context;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
srp_handle_qp_err(cq, wc, "SEND"); return;
}
lockdep_assert_held(&ch->lock);
list_add(&iu->list, &ch->free_tx);
}
/** * srp_post_send() - send an SRP information unit * @ch: RDMA channel over which to send the information unit. * @iu: Information unit to send. * @len: Length of the information unit excluding immediate data.
*/ staticint srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
{ struct srp_target_port *target = ch->target; struct ib_send_wr wr;
if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE)) return -EINVAL;
res = srp_post_recv(ch, iu); if (res != 0)
shost_printk(KERN_ERR, target->scsi_host,
PFX "Recv failed with error code %d\n", res);
}
/** * srp_tl_err_work() - handle a transport layer error * @work: Work structure embedded in an SRP target port. * * Note: This function may get invoked before the rport has been created, * hence the target->rport test.
*/ staticvoid srp_tl_err_work(struct work_struct *work)
{ struct srp_target_port *target;
target = container_of(work, struct srp_target_port, tl_err_work); if (target->rport)
srp_start_tl_fail_timers(target->rport);
}
len = srp_map_data(scmnd, ch, req); if (len < 0) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "Failed to map data (%d)\n", len); /* * If we ran out of memory descriptors (-ENOMEM) because an * application is queuing many requests with more than * max_pages_per_mr sg-list elements, tell the SCSI mid-layer * to reduce queue depth temporarily.
*/
scmnd->result = len == -ENOMEM ?
DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16; goto err_iu;
}
/* * Avoid that the loops that iterate over the request ring can * encounter a dangling SCSI command pointer.
*/
req->scmnd = NULL;
err: if (scmnd->result) {
scsi_done(scmnd);
ret = 0;
} else {
ret = SCSI_MLQUEUE_HOST_BUSY;
}
return ret;
}
/* * Note: the resources allocated in this function are freed in * srp_free_ch_ib().
*/ staticint srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
{ struct srp_target_port *target = ch->target; int i;
ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
GFP_KERNEL); if (!ch->rx_ring) goto err_no_ring;
ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
GFP_KERNEL); if (!ch->tx_ring) goto err_no_ring;
for (i = 0; i < target->queue_size; ++i) {
ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
ch->max_ti_iu_len,
GFP_KERNEL, DMA_FROM_DEVICE); if (!ch->rx_ring[i]) goto err;
}
for (i = 0; i < target->queue_size; ++i) {
ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
ch->max_it_iu_len,
GFP_KERNEL, DMA_TO_DEVICE); if (!ch->tx_ring[i]) goto err;
list_add(&ch->tx_ring[i]->list, &ch->free_tx);
}
return 0;
err: for (i = 0; i < target->queue_size; ++i) {
srp_free_iu(target->srp_host, ch->rx_ring[i]);
srp_free_iu(target->srp_host, ch->tx_ring[i]);
}
/* * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, * table 91), both the QP timeout and the retry count have to be set * for RC QP's during the RTR to RTS transition.
*/
WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
(IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
/* * Set target->rq_tmo_jiffies to one second more than the largest time * it can take before an error completion is generated. See also * C9-140..142 in the IBTA spec for more information about how to * convert the QP Local ACK Timeout value to nanoseconds.
*/
T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
do_div(max_compl_time_ms, NSEC_PER_MSEC);
rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
return rq_tmo_jiffies;
}
staticvoid srp_cm_rep_handler(struct ib_cm_id *cm_id, conststruct srp_login_rsp *lrsp, struct srp_rdma_ch *ch)
{ struct srp_target_port *target = ch->target; struct ib_qp_attr *qp_attr = NULL; int attr_mask = 0; int ret = 0; int i;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.