// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*/
d = efct->efcport->domain; if (d)
efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d); break; default:
efc_log_debug(hw->os, "unhandled link status %#x\n",
event->status); break;
}
return 0;
}
int
efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
{
u32 i, max_sgl, cpus;
if (hw->hw_setup_called) return 0;
/* * efct_hw_init() relies on NULL pointers indicating that a structure * needs allocation. If a structure is non-NULL, efct_hw_init() won't * free/realloc that memory
*/
memset(hw, 0, sizeof(struct efct_hw));
/* * Set all the queue sizes to the maximum allowed.
*/ for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i]; /* * Adjust the size of the WQs so that the CQ is twice as big as * the WQ to allow for 2 completions per IO. This allows us to * handle multi-phase as well as aborts.
*/
hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
staticinlinevoid
efct_hw_init_free_io(struct efct_hw_io *io)
{ /* * Set io->done to NULL, to avoid any callbacks, should * a completion be received for one of these IOs
*/
io->done = NULL;
io->abort_done = NULL;
io->status_saved = false;
io->abort_in_progress = false;
io->type = 0xFFFF;
io->wq = NULL;
}
staticbool efct_hw_iotype_is_originator(u16 io_type)
{ switch (io_type) { case EFCT_HW_FC_CT: case EFCT_HW_ELS_REQ: returntrue; default: returnfalse;
}
}
/* clear xbusy flag if WCQE[XB] is clear */ if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
io->xbusy = false;
/* get extended CQE status */ switch (io->type) { case EFCT_HW_BLS_ACC: case EFCT_HW_BLS_RJT: break; case EFCT_HW_ELS_REQ:
sli_fc_els_did(&hw->sli, cqe, &ext);
len = sli_fc_response_length(&hw->sli, cqe); break; case EFCT_HW_ELS_RSP: case EFCT_HW_FC_CT_RSP: break; case EFCT_HW_FC_CT:
len = sli_fc_response_length(&hw->sli, cqe); break; case EFCT_HW_IO_TARGET_WRITE:
len = sli_fc_io_length(&hw->sli, cqe); break; case EFCT_HW_IO_TARGET_READ:
len = sli_fc_io_length(&hw->sli, cqe); break; case EFCT_HW_IO_TARGET_RSP: break; case EFCT_HW_IO_DNRX_REQUEUE: /* release the count for re-posting the buffer */ /* efct_hw_io_free(hw, io); */ break; default:
efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
io->type, io->indicator); break;
} if (status) {
ext = sli_fc_ext_status(&hw->sli, cqe); /* * If we're not an originator IO, and XB is set, then issue * abort for the IO from within the HW
*/ if (efct_hw_iotype_is_originator(io->type) &&
wcqe->flags & SLI4_WCQE_XB) { int rc;
/* * Because targets may send a response when the IO * completes using the same XRI, we must wait for the * XRI_ABORTED CQE to issue the IO callback
*/
rc = efct_hw_io_abort(hw, io, false, NULL, NULL); if (rc == 0) { /* * latch status to return after abort is * complete
*/
io->status_saved = true;
io->saved_status = status;
io->saved_ext = ext;
io->saved_len = len; goto exit_efct_hw_wq_process_io;
} elseif (rc == -EINPROGRESS) { /* * Already being aborted by someone else (ABTS * perhaps). Just return original * error.
*/
efc_log_debug(hw->os, "%s%#x tag=%#x\n", "abort in progress xri=",
io->indicator, io->reqtag);
} else { /* Failed to abort for some other reason, log * error
*/
efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n", "Failed to abort xri=",
io->indicator, io->reqtag, rc);
}
}
}
if (io->done) {
efct_hw_done_t done = io->done;
io->done = NULL;
if (io->status_saved) { /* use latched status if exists */
status = io->saved_status;
len = io->saved_len;
ext = io->saved_ext;
io->status_saved = false;
}
for (n_rem = hw->config.n_io; n_rem; n_rem -= n) { /* Copy address of SGL's into local sgls[] array, break * out if the xri is not contiguous.
*/
u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
for (n = 0; n < min; n++) { /* Check that we have contiguous xri values */ if (n > 0) { if (hw->io[idx + n]->indicator !=
hw->io[idx + n - 1]->indicator + 1) break;
}
sgls[n] = hw->io[idx + n]->sgl;
}
if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
hw->io[idx]->indicator, n, sgls, NULL, &req)) {
rc = -EIO; break;
}
rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL); if (rc) {
efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc); break;
}
/* Add to tail if successful */ for (i = 0; i < n; i++, idx++) {
io = hw->io[idx];
io->state = EFCT_HW_IO_STATE_FREE;
INIT_LIST_HEAD(&io->list_entry);
list_add_tail(&io->list_entry, &hw->io_free);
}
}
/* * Set the filter match/mask values from hw's * filter_def values
*/ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
rq_cfg[i].rq_id = cpu_to_le16(0xffff);
rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
}
/* * Update the rq_id's of the FCF configuration * (don't update more than the number of rq_cfg * elements)
*/
min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ?
hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG; for (i = 0; i < min_rq_count; i++) { struct hw_rq *rq = hw->hw_rq[i];
u32 j;
/* * Since the hash is always bigger than the number of queues, then we * never have to worry about an infinite loop.
*/ while (hash[hash_index].in_use)
hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
/* not used, claim the entry */
hash[hash_index].id = id;
hash[hash_index].in_use = true;
hash[hash_index].index = index;
}
rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); if (rc)
efc_log_err(hw->os, "efct_hw_command returns %d\n", rc); else
efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
return rc;
}
int
efct_hw_init(struct efct_hw *hw)
{ int rc;
u32 i = 0; int rem_count; unsignedlong flags = 0; struct efct_hw_io *temp; struct efc_dma *dma;
/* * Make sure the command lists are empty. If this is start-of-day, * they'll be empty since they were just initialized in efct_hw_setup. * If we've just gone through a reset, the command and command pending * lists should have been cleaned up as part of the reset * (efct_hw_reset()).
*/
spin_lock_irqsave(&hw->cmd_lock, flags); if (!list_empty(&hw->cmd_head)) {
spin_unlock_irqrestore(&hw->cmd_lock, flags);
efc_log_err(hw->os, "command found on cmd list\n"); return -EIO;
} if (!list_empty(&hw->cmd_pending)) {
spin_unlock_irqrestore(&hw->cmd_lock, flags);
efc_log_err(hw->os, "command found on pending list\n"); return -EIO;
}
spin_unlock_irqrestore(&hw->cmd_lock, flags);
/* Free RQ buffers if prevously allocated */
efct_hw_rx_free(hw);
/* * The IO queues must be initialized here for the reset case. The * efct_hw_init_io() function will re-add the IOs to the free list. * The cmd_head list should be OK since we free all entries in * efct_hw_command_cancel() that is called in the efct_hw_reset().
*/
/* If we are in this function due to a reset, there may be stale items * on lists that need to be removed. Clean them up.
*/
rem_count = 0; while ((!list_empty(&hw->io_wait_free))) {
rem_count++;
temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
list_entry);
list_del_init(&temp->list_entry);
} if (rem_count > 0)
efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
rem_count);
rem_count = 0; while ((!list_empty(&hw->io_inuse))) {
rem_count++;
temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
list_entry);
list_del_init(&temp->list_entry);
} if (rem_count > 0)
efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
rem_count);
rem_count = 0; while ((!list_empty(&hw->io_free))) {
rem_count++;
temp = list_first_entry(&hw->io_free, struct efct_hw_io,
list_entry);
list_del_init(&temp->list_entry);
} if (rem_count > 0)
efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
rem_count);
/* If MRQ not required, Make sure we dont request feature. */ if (hw->config.n_rq == 1)
hw->sli.features &= (~SLI4_REQFEAT_MRQP);
if (sli_init(&hw->sli)) {
efc_log_err(hw->os, "SLI failed to initialize\n"); return -EIO;
}
if (hw->sliport_healthcheck) {
rc = efct_hw_config_sli_port_health_check(hw, 0, 1); if (rc != 0) {
efc_log_err(hw->os, "Enable port Health check fail\n"); return rc;
}
}
/* * Set FDT transfer hint, only works on Lancer
*/ if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) { /* * Non-fatal error. In particular, we can disregard failure to * set EFCT_HW_FDT_XFER_HINT on devices with legacy firmware * that do not support EFCT_HW_FDT_XFER_HINT feature.
*/
efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
}
/* zero the hashes */
memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
/* * Allocate the WQ request tag pool, if not previously allocated * (the request tag value is 16 bits, thus the pool allocation size * of 64k)
*/
hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw); if (!hw->wq_reqtag_pool) {
efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n"); return -ENOMEM;
}
/* record the fact that the queues are functional */
hw->state = EFCT_HW_STATE_ACTIVE; /* * Allocate a HW IOs for send frame.
*/
hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw); if (!hw->hw_wq[0]->send_frame_io)
efc_log_err(hw->os, "alloc for send_frame_io failed\n");
/* Initialize send frame sequence id */
atomic_set(&hw->send_frame_seq_id, 0);
/* * In RQ pair mode, we MUST post the header and payload buffer at the * same time.
*/ for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) { struct hw_rq *rq = hw->hw_rq[rq_idx];
for (i = 0; i < rq->entry_count - 1; i++) { struct efc_hw_sequence *seq;
/* * If the chip is in an error state (UE'd) then reject this mailbox * command.
*/ if (sli_fw_error_status(&hw->sli) > 0) {
efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
sli_reg_read_status(&hw->sli),
sli_reg_read_err1(&hw->sli),
sli_reg_read_err2(&hw->sli));
return -EIO;
}
/* * Send a mailbox command to the hardware, and either wait for * a completion (EFCT_CMD_POLL) or get an optional asynchronous * completion (EFCT_CMD_NOWAIT).
*/
if (opts == EFCT_CMD_POLL) {
mutex_lock(&hw->bmbx_lock);
bmbx = hw->sli.bmbx.virt;
/* * Manually clean up remaining commands. Note: since this calls * efct_hw_command_process(), we'll also process the cmd_pending * list, so no need to manually clean that out.
*/ while (!list_empty(&hw->cmd_head)) {
u8 mqe[SLI4_BMBX_SIZE] = { 0 }; struct efct_command_ctx *ctx;
/* * Allocate a callback context (which includes the mbox cmd buffer), * we need this to be persistent as the mbox cmd submission may be * queued and executed later execution.
*/
ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC); if (!ctx) return -EIO;
staticvoid
efct_hw_io_free_move_correct_list(struct efct_hw *hw, struct efct_hw_io *io)
{ /* * When an IO is freed, depending on the exchange busy flag, * move it to the correct list.
*/ if (io->xbusy) { /* * add to wait_free list and wait for XRI_ABORTED CQEs to clean * up
*/
INIT_LIST_HEAD(&io->list_entry);
list_add_tail(&io->list_entry, &hw->io_wait_free);
io->state = EFCT_HW_IO_STATE_WAIT_FREE;
} else { /* IO not busy, add to free list */
INIT_LIST_HEAD(&io->list_entry);
list_add_tail(&io->list_entry, &hw->io_free);
io->state = EFCT_HW_IO_STATE_FREE;
}
}
/* * Some IO types have underlying hardware requirements on the order * of SGEs. Process all special entries here.
*/ switch (type) { case EFCT_HW_IO_TARGET_WRITE:
io->n_sge = 1; break; case EFCT_HW_IO_TARGET_READ: /* * For FCP_TSEND64, the first 2 entries are SKIP SGE's
*/
skips = EFCT_TARGET_READ_SKIPS; break; case EFCT_HW_IO_TARGET_RSP: /* * No skips, etc. for FCP_TRSP64
*/ break; default:
efc_log_err(hw->os, "unsupported IO type %#x\n", type); return -EIO;
}
/* * Always assume this is the last entry and mark as such. * If this is not the first entry unset the "last SGE" * indication for the previous entry
*/
sge_flags |= SLI4_SGE_LAST;
data->dw2_flags = cpu_to_le32(sge_flags);
/* * For IOs that were aborted internally, we may need to issue the * callback here depending on whether a XRI_ABORTED CQE is expected ot * not. If the status is Local Reject/No XRI, then * issue the callback now.
*/
ext = sli_fc_ext_status(&hw->sli, cqe); if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
efct_hw_done_t done = io->done;
io->done = NULL;
/* * Use latched status as this is always saved for an internal * abort Note: We won't have both a done and abort_done * function, so don't worry about * clobbering the len, status and ext fields.
*/
status = io->saved_status;
len = io->saved_len;
ext = io->saved_ext;
io->status_saved = false;
done(io, len, status, ext, io->arg);
}
if (io->abort_done) {
efct_hw_done_t done = io->abort_done;
/* * Call efct_hw_io_free() because this releases the WQ reservation as * well as doing the refcount put. Don't duplicate the code here.
*/
(void)efct_hw_io_free(hw, io);
}
/* take a reference on IO being aborted */ if (kref_get_unless_zero(&io_to_abort->ref) == 0) { /* command no longer active */
efc_log_debug(hw->os, "io not active xri=0x%x tag=0x%x\n",
io_to_abort->indicator, io_to_abort->reqtag); return -ENOENT;
}
/* Must have a valid WQ reference */ if (!io_to_abort->wq) {
efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
io_to_abort->indicator); /* efct_ref_get(): same function */
kref_put(&io_to_abort->ref, io_to_abort->release); return -ENOENT;
}
/* * Validation checks complete; now check to see if already being * aborted, if not set the flag.
*/ if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) { /* efct_ref_get(): same function */
kref_put(&io_to_abort->ref, io_to_abort->release);
efc_log_debug(hw->os, "io already being aborted xri=0x%x tag=0x%x\n",
io_to_abort->indicator, io_to_abort->reqtag); return -EINPROGRESS;
}
/* * If we got here, the possibilities are: * - host owned xri * - io_to_abort->wq_index != U32_MAX * - submit ABORT_WQE to same WQ * - port owned xri: * - rxri: io_to_abort->wq_index == U32_MAX * - submit ABORT_WQE to any WQ * - non-rxri * - io_to_abort->index != U32_MAX * - submit ABORT_WQE to same WQ * - io_to_abort->index == U32_MAX * - submit ABORT_WQE to any WQ
*/
io_to_abort->abort_done = cb;
io_to_abort->abort_arg = arg;
/* Allocate a request tag for the abort portion of this IO */
wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort); if (!wqcb) {
efc_log_err(hw->os, "can't allocate request tag\n"); return -ENOSPC;
}
/* * If the wqe is on the pending list, then set this wqe to be * aborted when the IO's wqe is removed from the list.
*/ if (io_to_abort->wq) {
spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags); if (io_to_abort->wqe.list_entry.next) {
io_to_abort->wqe.abort_wqe_submit_needed = true;
spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
flags); return 0;
}
spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
}
efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
/* ABORT_WQE does not actually utilize an XRI on the Port, * therefore, keep xbusy as-is to track the exchange's state, * not the ABORT_WQE's state
*/ if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
io_to_abort->abort_in_progress = false; /* efct_ref_get(): same function */
kref_put(&io_to_abort->ref, io_to_abort->release); return -EIO;
}
reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL); if (!reqtag_pool) return NULL;
INIT_LIST_HEAD(&reqtag_pool->freelist); /* initialize reqtag pool lock */
spin_lock_init(&reqtag_pool->lock); for (i = 0; i < U16_MAX; i++) {
wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL); if (!wqcb) break;
wqcb = hw->wq_reqtag_pool->tags[instance_index]; if (!wqcb)
efc_log_err(hw->os, "wqcb for instance %d is null\n",
instance_index);
return wqcb;
}
int
efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
{ int index = -1; int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
/* * Since the hash is always bigger than the maximum number of Qs, then * we never have to worry about an infinite loop. We will always find * an unused entry.
*/ do { if (hash[i].in_use && hash[i].id == id)
index = hash[i].index; else
i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
} while (index == -1 && hash[i].in_use);
/* * The caller should disable interrupts if they wish to prevent us * from processing during a shutdown. The following states are defined: * EFCT_HW_STATE_UNINITIALIZED - No queues allocated * EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, * queues are cleared. * EFCT_HW_STATE_ACTIVE - Chip and queues are operational * EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions * EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox * completions.
*/ if (hw->state == EFCT_HW_STATE_UNINITIALIZED) return 0;
/* Get pointer to struct hw_eq */
eq = hw->hw_eq[vector]; if (!eq) return 0;
while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
u16 cq_id = 0; int rc;
rc = sli_eq_parse(&hw->sli, eqe, &cq_id); if (unlikely(rc)) { if (rc == SLI4_EQE_STATUS_EQ_FULL) {
u32 i;
/* * Received a sentinel EQE indicating the * EQ is full. Process all CQs
*/ for (i = 0; i < hw->cq_count; i++)
efct_hw_cq_process(hw, hw->hw_cq[i]); continue;
} else { return rc;
}
} else { int index;
index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
while (!sli_cq_read(&hw->sli, cq->queue, cqe)) { int status;
status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid); /* * The sign of status is significant. If status is: * == 0 : call completed correctly and * the CQE indicated success * > 0 : call completed correctly and * the CQE indicated an error * < 0 : call failed and no information is available about the * CQE
*/ if (status < 0) { if (status == SLI4_MCQE_STATUS_NOT_COMPLETED) /* * Notification that an entry was consumed, * but not completed
*/ continue;
break;
}
switch (ctype) { case SLI4_QENTRY_ASYNC:
sli_cqe_async(&hw->sli, cqe); break; case SLI4_QENTRY_MQ: /* * Process MQ entry. Note there is no way to determine * the MQ_ID from the completion entry.
*/
efct_hw_mq_process(hw, status, hw->mq); break; case SLI4_QENTRY_WQ:
efct_hw_wq_process(hw, cq, cqe, status, rid); break; case SLI4_QENTRY_WQ_RELEASE: {
u32 wq_id = rid; int index; struct hw_wq *wq = NULL;
index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
if (likely(index >= 0)) {
wq = hw->hw_wq[index];
} else {
efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id); break;
} /* Submit any HW IOs that are on the WQ pending list */
hw_wq_submit_pending(wq, wq->wqec_set_count);
io = efct_hw_io_lookup(hw, rid); if (!io) { /* IO lookup failure should never happen */
efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid); return;
}
if (!io->xbusy)
efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid); else /* mark IO as no longer busy */
io->xbusy = false;
/* * For IOs that were aborted internally, we need to issue any pending * callback here.
*/ if (io->done) {
efct_hw_done_t done = io->done; void *arg = io->arg;
/* * Use latched status as this is always saved for an internal * abort
*/ int status = io->saved_status;
u32 len = io->saved_len;
u32 ext = io->saved_ext;
io->done = NULL;
io->status_saved = false;
done(io, len, status, ext, arg);
}
spin_lock_irqsave(&hw->io_lock, flags); if (io->state == EFCT_HW_IO_STATE_INUSE ||
io->state == EFCT_HW_IO_STATE_WAIT_FREE) { /* if on wait_free list, caller has already freed IO; * remove from wait_free list and add to free list. * if on in-use list, already marked as no longer busy; * just leave there and wait for caller to free.
*/ if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
io->state = EFCT_HW_IO_STATE_FREE;
list_del_init(&io->list_entry);
efct_hw_io_free_move_correct_list(hw, io);
}
}
spin_unlock_irqrestore(&hw->io_lock, flags);
}
staticint
efct_hw_flush(struct efct_hw *hw)
{
u32 i = 0;
/* Process any remaining completions */ for (i = 0; i < hw->eq_count; i++)
efct_hw_process(hw, i, ~0);
return 0;
}
int
efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
{ int rc = 0; unsignedlong flags = 0;
/* * Add IO to active io wqe list before submitting, in case the * wcqe processing preempts this thread.
*/
hio->wq->use_count++;
rc = efct_hw_wq_write(hio->wq, &hio->wqe); if (rc >= 0) { /* non-negative return is success */
rc = 0;
} else { /* failed to write wqe, remove from active wqe list */
efc_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
hio->xbusy = false;
}
/** * efct_els_hw_srrs_send() - Send a single request and response cmd. * @efc: efc library structure * @io: Discovery IO used to hold els and ct cmd context. * * This routine supports communication sequences consisting of a single * request and single response between two endpoints. Examples include: * - Sending an ELS request. * - Sending an ELS response - To send an ELS response, the caller must provide * the OX_ID from the received request. * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request, * the caller must provide the R_CTL, TYPE, and DF_CTL * values to place in the FC frame header. * * Return: Status of the request.
*/ int
efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
{ struct efct *efct = efc->base;
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.29 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.