/* * This is the Fusion MPT base driver providing common API layer interface * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c * Copyright (C) 2012-2014 LSI Corporation * Copyright (C) 2013-2014 Avago Technologies * (mailto: MPT-FusionLinux.pdl@avagotech.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations.
* DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
* You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA.
*/
staticint irqpoll_weight = -1;
module_param(irqpoll_weight, int, 0444);
MODULE_PARM_DESC(irqpoll_weight, "irq poll weight (default= one fourth of HBA queue depth)");
staticint mpt3sas_fwfault_debug;
MODULE_PARM_DESC(mpt3sas_fwfault_debug, " enable detection of firmware fault and halt firmware - (default=0)");
staticint perf_mode = -1;
module_param(perf_mode, int, 0444);
MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero/Sea Generation), options:\n\t\t" "0 - balanced: high iops mode is enabled &\n\t\t" "interrupt coalescing is enabled only on high iops queues,\n\t\t" "1 - iops: high iops mode is disabled &\n\t\t" "interrupt coalescing is enabled on all queues,\n\t\t" "2 - latency: high iops mode is disabled &\n\t\t" "interrupt coalescing is enabled on all queues with timeout value 0xA,\n" "\t\tdefault - default perf_mode is 'balanced'"
);
staticint poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" "This parameter is effective only if host_tagset_enable=1. &\n\t\t" "when poll_queues are enabled then &\n\t\t" "perf_mode is set to latency mode. &\n\t\t"
);
/** * mpt3sas_base_check_cmd_timeout - Function * to check timeout and command termination due * to Host reset. * * @ioc: per adapter object. * @status: Status of issued command. * @mpi_request:mf request pointer. * @sz: size of buffer. * * Return: 1/0 Reset to be done or Not
*/
u8
mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
u8 status, void *mpi_request, int sz)
{
u8 issue_reset = 0;
if (!(status & MPT3_CMD_RESET))
issue_reset = 1;
ioc_err(ioc, "Command %s\n",
issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
_debug_dump_mf(mpi_request, sz);
return issue_reset;
}
/** * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. * @val: ? * @kp: ? * * Return: ?
*/ staticint
_scsih_set_fwfault_debug(constchar *val, conststruct kernel_param *kp)
{ int ret = param_set_int(val, kp); struct MPT3SAS_ADAPTER *ioc;
if (ret) return ret;
/* global ioc spinlock to protect controller list on list operations */
pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
spin_lock(&gioc_lock);
list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
ioc->fwfault_debug = mpt3sas_fwfault_debug;
spin_unlock(&gioc_lock); return 0;
}
module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
param_get_int, &mpt3sas_fwfault_debug, 0644);
/** * _base_readl_aero - retry readl for max three times. * @addr: MPT Fusion system interface register address * * Retry the readl() for max three times if it gets zero value * while reading the system interface register.
*/ staticinline u32
_base_readl_aero(constvoid __iomem *addr)
{
u32 i = 0, ret_val;
do {
ret_val = readl(addr);
i++;
} while (ret_val == 0 && i < 3);
return ret_val;
}
static u32
_base_readl_ext_retry(constvoid __iomem *addr)
{
u32 i, ret_val;
for (i = 0 ; i < 30 ; i++) {
ret_val = readl(addr); if (ret_val != 0) break;
}
/** * _base_clone_reply_to_sys_mem - copies reply to reply free iomem * in BAR0 space. * * @ioc: per adapter object * @reply: reply message frame(lower 32bit addr) * @index: System request message index.
*/ staticvoid
_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
u32 index)
{ /* * 256 is offset within sys register. * 256 offset MPI frame starts. Max MPI frame supported is 32. * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
*/
u16 cmd_credit = ioc->facts.RequestCredit + 1; void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
MPI_FRAME_START_OFFSET +
(cmd_credit * ioc->request_sz) + (index * sizeof(u32));
writel(reply, reply_free_iomem);
}
/** * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames * to system/BAR0 region. * * @dst_iomem: Pointer to the destination location in BAR0 space. * @src: Pointer to the Source data. * @size: Size of data to be copied.
*/ staticvoid
_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
{ int i;
u32 *src_virt_mem = (u32 *)src;
for (i = 0; i < size/4; i++)
writel((u32)src_virt_mem[i],
(void __iomem *)dst_iomem + (i * 4));
}
/** * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region * * @dst_iomem: Pointer to the destination location in BAR0 space. * @src: Pointer to the Source data. * @size: Size of data to be copied.
*/ staticvoid
_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
{ int i;
u32 *src_virt_mem = (u32 *)(src);
for (i = 0; i < size/4; i++)
writel((u32)src_virt_mem[i],
(void __iomem *)dst_iomem + (i * 4));
}
/** * _base_get_chain - Calculates and Returns virtual chain address * for the provided smid in BAR0 space. * * @ioc: per adapter object * @smid: system request message index * @sge_chain_count: Scatter gather chain count. * * Return: the chain address.
*/ staticinlinevoid __iomem*
_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u8 sge_chain_count)
{ void __iomem *base_chain, *chain_virt;
u16 cmd_credit = ioc->facts.RequestCredit + 1;
/** * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host * buffer address for the provided smid. * (Each smid can have 64K starts from 17024) * * @ioc: per adapter object * @smid: system request message index * * Return: Pointer to buffer location in BAR0.
*/
/* From smid we can get scsi_cmd, once we have sg_scmd, * we just need to get sg_virt and sg_next to get virtual * address associated with sgel->Address.
*/
if (is_scsiio_req) { /* Get scsi_cmd using smid */
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); if (scmd == NULL) {
ioc_err(ioc, "scmd is NULL\n"); return;
}
/* Get sg_scmd from scmd provided */
sg_scmd = scsi_sglist(scmd);
}
/* * 0 - 255 System register * 256 - 4352 MPI Frame. (This is based on maxCredit 32) * 4352 - 4864 Reply_free pool (512 byte is reserved * considering maxCredit 32. Reply need extra * room, for mCPU case kept four times of * maxCredit). * 4864 - 17152 SGE chain element. (32cmd * 3 chain of * 128 byte size = 12288) * 17152 - x Host buffer mapped with smid. * (Each smid can have 64K Max IO.) * BAR0+Last 1K MSIX Addr and Data * Total size in use 2113664 bytes of 4MB BAR0
*/
switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) { case MPI2_SGE_FLAGS_CHAIN_ELEMENT: /* * Helper function which on passing * chain_buffer_dma returns chain_buffer. Get * the virtual address for sgel->Address
*/
sgel_next =
_base_get_chain_buffer_dma_to_chain_buffer(ioc,
le32_to_cpu(sgel->Address)); if (sgel_next == NULL) return; /* * This is coping 128 byte chain * frame (not a host buffer)
*/
dst_chain_addr[sge_chain_count] =
_base_get_chain(ioc,
smid, sge_chain_count);
src_chain_addr[sge_chain_count] =
(void *) sgel_next;
dst_addr_phys = _base_get_chain_phys(ioc,
smid, sge_chain_count);
WARN_ON(dst_addr_phys > U32_MAX);
sgel->Address =
cpu_to_le32(lower_32_bits(dst_addr_phys));
sgel = sgel_next;
sge_chain_count++; break; case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: if (is_write) { if (is_scsiio_req) {
_base_clone_to_sys_mem(buff_ptr,
sg_virt(sg_scmd),
(le32_to_cpu(sgel->FlagsLength) &
0x00ffffff)); /* * FIXME: this relies on a a zero * PCI mem_offset.
*/
sgel->Address =
cpu_to_le32((u32)buff_ptr_phys);
} else {
_base_clone_to_sys_mem(buff_ptr,
ioc->config_vaddr,
(le32_to_cpu(sgel->FlagsLength) &
0x00ffffff));
sgel->Address =
cpu_to_le32((u32)buff_ptr_phys);
}
}
buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
0x00ffffff);
buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
0x00ffffff); if ((le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_END_OF_BUFFER
<< MPI2_SGE_FLAGS_SHIFT))) goto eob_clone_chain; else { /* * Every single element in MPT will have * associated sg_next. Better to sanity that * sg_next is not NULL, but it will be a bug * if it is null.
*/ if (is_scsiio_req) {
sg_scmd = sg_next(sg_scmd); if (sg_scmd)
sgel++; else goto eob_clone_chain;
}
} break;
}
}
eob_clone_chain: for (i = 0; i < sge_chain_count; i++) { if (is_scsiio_req)
_base_clone_to_sys_mem(dst_chain_addr[i],
src_chain_addr[i], ioc->request_sz);
}
}
/** * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc * @arg: input argument, used to derive ioc * * Return: * 0 if controller is removed from pci subsystem. * -1 for other case.
*/ staticint mpt3sas_remove_dead_ioc_func(void *arg)
{ struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; struct pci_dev *pdev;
doorbell = mpt3sas_base_get_iocstate(ioc, 0); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
ioc_err(ioc, "SAS host is non-operational !!!!\n");
/* It may be possible that EEH recovery can resolve some of * pci bus failure issues rather removing the dead ioc function * by considering controller is in a non-operational state. So * here priority is given to the EEH recovery. If it doesn't * not resolve this issue, mpt3sas driver will consider this * controller to non-operational state and remove the dead ioc * function.
*/ if (ioc->non_operational_loop++ < 5) {
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
flags); goto rearm_timer;
}
/* * Call _scsih_flush_pending_cmds callback so that we flush all * pending commands back to OS. This call is required to avoid * deadlock at block layer. Dead IOC will fail to do diag reset, * and this call is safe since dead ioc will never return any * command back from HW.
*/
mpt3sas_base_pause_mq_polling(ioc);
ioc->schedule_dead_ioc_flush_running_cmds(ioc); /* * Set remove_host flag early since kernel thread will * take some time to execute.
*/
ioc->remove_host = 1; /*Remove the Dead Host */
p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, "%s_dead_ioc_%d", ioc->driver_name, ioc->id); if (IS_ERR(p))
ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
__func__); else
ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
__func__); return; /* don't rearm timer */
}
/* Wait until CoreDump completes or times out */ if (ioc->ioc_coredump_loop++ < timeout) {
spin_lock_irqsave(
&ioc->ioc_reset_in_progress_lock, flags); goto rearm_timer;
}
}
/** * mpt3sas_base_wait_for_coredump_completion - Wait until coredump * completes or times out * @ioc: per adapter object * @caller: caller function name * * Return: 0 for success, non-zero for failure.
*/ int
mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, constchar *caller)
{
u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
ioc->manu_pg11.CoreDumpTOSec :
MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
timeout);
/** * mpt3sas_halt_firmware - halt's mpt controller firmware * @ioc: per adapter object * * For debugging timeout related issues. Writing 0xCOFFEE00 * to the doorbell register will halt controller firmware. With * the purpose to stop both driver and firmware, the enduser can * obtain a ring buffer from controller UART.
*/ void
mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
{
u32 doorbell;
if (!ioc->fwfault_debug) return;
dump_stack();
doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, doorbell &
MPI2_DOORBELL_DATA_MASK);
} elseif ((doorbell & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_COREDUMP) {
mpt3sas_print_coredump_info(ioc, doorbell &
MPI2_DOORBELL_DATA_MASK);
} else {
writel(0xC0FFEE00, &ioc->chip->Doorbell);
ioc_err(ioc, "Firmware is halted due to command timeout\n");
}
if (ioc->fwfault_debug == 2) for (;;)
; else
panic("panic in %s\n", __func__);
}
/** * _base_sas_ioc_info - verbose translation of the ioc status * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @request_hdr: request mf
*/ staticvoid
_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
MPI2RequestHeader_t *request_hdr)
{
u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
MPI2_IOCSTATUS_MASK; char *desc = NULL;
u16 frame_sz; char *func_str = NULL;
/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) return;
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) return; /* * Older Firmware version doesn't support driver trigger pages. * So, skip displaying 'config invalid type' type * of error message.
*/ if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
/**************************************************************************** * Common IOCStatus values for all replies
****************************************************************************/
case MPI2_IOCSTATUS_INVALID_FUNCTION:
desc = "invalid function"; break; case MPI2_IOCSTATUS_BUSY:
desc = "busy"; break; case MPI2_IOCSTATUS_INVALID_SGL:
desc = "invalid sgl"; break; case MPI2_IOCSTATUS_INTERNAL_ERROR:
desc = "internal error"; break; case MPI2_IOCSTATUS_INVALID_VPID:
desc = "invalid vpid"; break; case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
desc = "insufficient resources"; break; case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
desc = "insufficient power"; break; case MPI2_IOCSTATUS_INVALID_FIELD:
desc = "invalid field"; break; case MPI2_IOCSTATUS_INVALID_STATE:
desc = "invalid state"; break; case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
desc = "op state not supported"; break;
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: break;
/**************************************************************************** * For use by SCSI Initiator and SCSI Target end-to-end data protection
****************************************************************************/
case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
desc = "eedp guard error"; break; case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
desc = "eedp ref tag error"; break; case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
desc = "eedp app tag error"; break;
/** * mpt3sas_base_done - base internal command completion routine * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * * Return: * 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function.
*/
u8
mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply)
{
MPI2DefaultReply_t *mpi_reply;
/** * _base_async_event - main callback handler for firmware asyn events * @ioc: per adapter object * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * * Return: * 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function.
*/ static u8
_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
{
Mpi2EventNotificationReply_t *mpi_reply;
Mpi2EventAckRequest_t *ack_request;
u16 smid; struct _event_ack_list *delayed_event_ack;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (!mpi_reply) return 1; if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) return 1;
/** * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues * when driver is flushing out the IOs. * @ioc: per adapter object * * Pause polling on the mq poll (io uring) queues when driver is flushing * out the IOs. Otherwise we may see the race condition of completing the same * IO from two paths. * * Returns nothing.
*/ void
mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
{ int iopoll_q_count =
ioc->reply_queue_count - ioc->iopoll_q_start_index; int qid;
for (qid = 0; qid < iopoll_q_count; qid++)
atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
/* * wait for current poll to complete.
*/ for (qid = 0; qid < iopoll_q_count; qid++) { while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
cpu_relax();
udelay(500);
}
}
}
/** * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues. * @ioc: per adapter object * * Returns nothing.
*/ void
mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
{ int iopoll_q_count =
ioc->reply_queue_count - ioc->iopoll_q_start_index; int qid;
rpf->Words = cpu_to_le64(ULLONG_MAX);
reply_q->reply_post_host_index =
(reply_q->reply_post_host_index ==
(ioc->reply_post_queue_depth - 1)) ? 0 :
reply_q->reply_post_host_index + 1;
request_descript_type =
reply_q->reply_post_free[reply_q->reply_post_host_index]. Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
completed_cmds++; /* Update the reply post host index after continuously * processing the threshold number of Reply Descriptors. * So that FW can find enough entries to post the Reply * Descriptors in the reply descriptor post queue.
*/ if (completed_cmds >= ioc->thresh_hold) { if (ioc->combined_reply_queue) {
writel(reply_q->reply_post_host_index |
((msix_index & 7) <<
MPI2_RPHI_MSIX_INDEX_SHIFT),
ioc->replyPostRegisterIndex[msix_index/8]);
} else {
writel(reply_q->reply_post_host_index |
(msix_index <<
MPI2_RPHI_MSIX_INDEX_SHIFT),
&ioc->chip->ReplyPostHostIndex);
} if (!reply_q->is_iouring_poll_q &&
!reply_q->irq_poll_scheduled) {
reply_q->irq_poll_scheduled = true;
irq_poll_sched(&reply_q->irqpoll);
}
atomic_dec(&reply_q->busy); return completed_cmds;
} if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) goto out; if (!reply_q->reply_post_host_index)
rpf = reply_q->reply_post_free; else
rpf++;
} while (1);
out:
if (!completed_cmds) {
atomic_dec(&reply_q->busy); return completed_cmds;
}
if (ioc->is_warpdrive) {
writel(reply_q->reply_post_host_index,
ioc->reply_post_host_index[msix_index]);
atomic_dec(&reply_q->busy); return completed_cmds;
}
/* Update Reply Post Host Index. * For those HBA's which support combined reply queue feature * 1. Get the correct Supplemental Reply Post Host Index Register. * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host * Index Register address bank i.e replyPostRegisterIndex[], * 2. Then update this register with new reply host index value * in ReplyPostIndex field and the MSIxIndex field with * msix_index value reduced to a value between 0 and 7, * using a modulo 8 operation. Since each Supplemental Reply Post * Host Index Register supports 8 MSI-X vectors. * * For other HBA's just update the Reply Post Host Index register with * new reply host index value in ReplyPostIndex Field and msix_index * value in MSIxIndex field.
*/ if (ioc->combined_reply_queue)
writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
MPI2_RPHI_MSIX_INDEX_SHIFT),
ioc->replyPostRegisterIndex[msix_index/8]); else
writel(reply_q->reply_post_host_index | (msix_index <<
MPI2_RPHI_MSIX_INDEX_SHIFT),
&ioc->chip->ReplyPostHostIndex);
atomic_dec(&reply_q->busy); return completed_cmds;
}
/** * mpt3sas_blk_mq_poll - poll the blk mq poll queue * @shost: Scsi_Host object * @queue_num: hw ctx queue number * * Return number of entries that has been processed from poll queue.
*/ int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsignedint queue_num)
{ struct MPT3SAS_ADAPTER *ioc =
(struct MPT3SAS_ADAPTER *)shost->hostdata; struct adapter_reply_queue *reply_q; int num_entries = 0; int qid = queue_num - ioc->iopoll_q_start_index;
if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
!atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1)) return 0;
if (ioc->mask_interrupts) return IRQ_NONE; if (reply_q->irq_poll_scheduled) return IRQ_HANDLED; return ((_base_process_reply_queue(reply_q) > 0) ?
IRQ_HANDLED : IRQ_NONE);
}
/** * _base_irqpoll - IRQ poll callback handler * @irqpoll: irq_poll object * @budget: irq poll weight * * Return: number of reply descriptors processed
*/ staticint
_base_irqpoll(struct irq_poll *irqpoll, int budget)
{ struct adapter_reply_queue *reply_q; int num_entries = 0;
reply_q = container_of(irqpoll, struct adapter_reply_queue,
irqpoll); if (reply_q->irq_line_enable) {
disable_irq_nosync(reply_q->os_irq);
reply_q->irq_line_enable = false;
}
num_entries = _base_process_reply_queue(reply_q); if (num_entries < budget) {
irq_poll_complete(irqpoll);
reply_q->irq_poll_scheduled = false;
reply_q->irq_line_enable = true;
enable_irq(reply_q->os_irq); /* * Go for one more round of processing the * reply descriptor post queue in case the HBA * Firmware has posted some reply descriptors * while reenabling the IRQ.
*/
_base_process_reply_queue(reply_q);
}
/** * _base_is_controller_msix_enabled - is controller support muli-reply queues * @ioc: per adapter object * * Return: Whether or not MSI/X is enabled.
*/ staticinlineint
_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
{ return (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
}
/** * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts * @ioc: per adapter object * @poll: poll over reply descriptor pools incase interrupt for * timed-out SCSI command got delayed * Context: non-ISR context * * Called when a Task Management request has completed.
*/ void
mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
{ struct adapter_reply_queue *reply_q;
/* If MSIX capability is turned off * then multi-queues are not enabled
*/ if (!_base_is_controller_msix_enabled(ioc)) return;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery) return; /* TMs are on msix_index == 0 */ if (reply_q->msix_index == 0) continue;
if (reply_q->is_iouring_poll_q) {
_base_process_reply_queue(reply_q); continue;
}
synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); if (reply_q->irq_poll_scheduled) { /* Calling irq_poll_disable will wait for any pending * callbacks to have completed.
*/
irq_poll_disable(&reply_q->irqpoll);
irq_poll_enable(&reply_q->irqpoll); /* check how the scheduled poll has ended, * clean up only if necessary
*/ if (reply_q->irq_poll_scheduled) {
reply_q->irq_poll_scheduled = false;
reply_q->irq_line_enable = true;
enable_irq(reply_q->os_irq);
}
}
/** * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler * @cb_func: callback function * * Return: Index of @cb_func.
*/
u8
mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
{
u8 cb_idx;
for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) if (mpt_callbacks[cb_idx] == NULL) break;
for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
mpt3sas_base_release_callback_handler(cb_idx);
}
/** * _base_build_zero_len_sge - build zero length sg entry * @ioc: per adapter object * @paddr: virtual address for SGE * * Create a zero length scatter gather entry to insure the IOCs hardware has * something to use if the target device goes brain dead and tries * to send data even when none is asked for.
*/ staticvoid
_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
{
u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
MPI2_SGE_FLAGS_SHIFT);
ioc->base_add_sg_single(paddr, flags_length, -1);
}
/** * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. * @paddr: virtual address for SGE * @flags_length: SGE flags and data transfer length * @dma_addr: Physical address
*/ staticvoid
_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
{
Mpi2SGESimple32_t *sgel = paddr;
/** * _base_build_nvme_prp - This function is called for NVMe end devices to build * a native SGL (NVMe PRP). * @ioc: per adapter object * @smid: system request message index for getting asscociated SGL * @nvme_encap_request: the NVMe request msg frame pointer * @data_out_dma: physical address for WRITES * @data_out_sz: data xfer size for WRITES * @data_in_dma: physical address for READS * @data_in_sz: data xfer size for READS * * The native SGL is built starting in the first PRP * entry of the NVMe message (PRP1). If the data buffer is small enough to be * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is * used to describe a larger data buffer. If the data buffer is too large to * describe using the two PRP entriess inside the NVMe message, then PRP1 * describes the first data memory segment, and PRP2 contains a pointer to a PRP * list located elsewhere in memory to describe the remaining data memory * segments. The PRP list will be contiguous. * * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP * consists of a list of PRP entries to describe a number of noncontigous * physical memory segments as a single memory buffer, just as a SGL does. Note * however, that this function is only used by the IOCTL call, so the memory * given will be guaranteed to be contiguous. There is no need to translate * non-contiguous SGL into a PRP in this case. All PRPs will describe * contiguous space that is one page size each. * * Each NVMe message contains two PRP entries. The first (PRP1) either contains * a PRP list pointer or a PRP element, depending upon the command. PRP2 * contains the second PRP element if the memory being described fits within 2 * PRP entries, or a PRP list pointer if the PRP spans more than two entries. * * A PRP list pointer contains the address of a PRP list, structured as a linear * array of PRP entries. Each PRP entry in this list describes a segment of * physical memory. *
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.22Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.