/** * megasas_adp_reset_wait_for_ready - initiate chip reset and wait for * controller to come to ready state * @instance: adapter's soft state * @do_adp_reset: If true, do a chip reset * @ocr_context: If called from OCR context this will * be set to 1, else 0 * * This function initiates a chip reset followed by a wait for controller to * transition to ready state. * During this, driver will block all access to PCI config space from userspace
*/ int
megasas_adp_reset_wait_for_ready(struct megasas_instance *instance, bool do_adp_reset, int ocr_context)
{ int ret = FAILED;
/* * Block access to PCI config space from userspace * when diag reset is initiated from driver
*/ if (megasas_dbg_lvl & OCR_DEBUG)
dev_info(&instance->pdev->dev, "Block access to PCI config space %s %d\n",
__func__, __LINE__);
pci_cfg_access_lock(instance->pdev);
if (do_adp_reset) { if (instance->instancet->adp_reset
(instance, instance->reg_set)) goto out;
}
/* Wait for FW to become ready */ if (megasas_transition_to_ready(instance, ocr_context)) {
dev_warn(&instance->pdev->dev, "Failed to transition controller to ready for scsi%d.\n",
instance->host->host_no); goto out;
}
ret = SUCCESS;
out: if (megasas_dbg_lvl & OCR_DEBUG)
dev_info(&instance->pdev->dev, "Unlock access to PCI config space %s %d\n",
__func__, __LINE__);
pci_cfg_access_unlock(instance->pdev);
return ret;
}
/** * megasas_check_same_4gb_region - check if allocation * crosses same 4GB boundary or not * @instance: adapter's soft instance * @start_addr: start address of DMA allocation * @size: size of allocation in bytes * @return: true : allocation does not cross same * 4GB boundary * false: allocation crosses same * 4GB boundary
*/ staticinlinebool megasas_check_same_4gb_region
(struct megasas_instance *instance, dma_addr_t start_addr, size_t size)
{
dma_addr_t end_addr;
end_addr = start_addr + size;
if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) {
dev_err(&instance->pdev->dev, "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n",
(unsignedlonglong)start_addr,
(unsignedlonglong)end_addr); returnfalse;
}
instance->mask_interrupts = 0; /* For Thunderbolt/Invader also clear intr on enable */
writel(~0, ®s->outbound_intr_status);
readl(®s->outbound_intr_status);
/* Dummy readl to force pci flush */
dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
__func__, readl(®s->outbound_intr_mask));
}
writel(mask, ®s->outbound_intr_mask); /* Dummy readl to force pci flush */
dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
__func__, readl(®s->outbound_intr_mask));
}
int
megasas_clear_intr_fusion(struct megasas_instance *instance)
{
u32 status; struct megasas_register_set __iomem *regs;
regs = instance->reg_set; /* * Check if it is our interrupt
*/
status = megasas_readl(instance,
®s->outbound_intr_status);
if (status & 1) {
writel(status, ®s->outbound_intr_status);
readl(®s->outbound_intr_status); return 1;
} if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) return 0;
/** * megasas_get_cmd_fusion - Get a command from the free pool * @instance: Adapter soft state * @blk_tag: Command tag * * Returns a blk_tag indexed mpt frame
*/ inlinestruct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
*instance, u32 blk_tag)
{ struct fusion_context *fusion;
/** * megasas_fire_cmd_fusion - Sends command to the FW * @instance: Adapter soft state * @req_desc: 32bit or 64bit Request descriptor * * Perform PCI Write. AERO SERIES supports 32 bit Descriptor. * Prior to AERO_SERIES support 64 bit Descriptor.
*/ staticvoid
megasas_fire_cmd_fusion(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
{ if (instance->atomic_desc_support)
writel(le32_to_cpu(req_desc->u.low),
&instance->reg_set->inbound_single_queue_port); else
megasas_write_64bit_req_desc(instance, req_desc);
}
/** * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here * @instance: Adapter soft state * @fw_boot_context: Whether this function called during probe or after OCR * * This function is only for fusion controllers. * Update host can queue, if firmware downgrade max supported firmware commands. * Firmware upgrade case will be skipped because underlying firmware has * more resource than exposed to the OS. *
*/ staticvoid
megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
{
u16 cur_max_fw_cmds = 0;
u16 ldio_threshold = 0;
/* ventura FW does not fill outbound_scratch_pad_2 with queue depth */ if (instance->adapter_type < VENTURA_SERIES)
cur_max_fw_cmds =
megasas_readl(instance,
&instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF;
if (reset_devices)
instance->max_fw_cmds = min(instance->max_fw_cmds,
(u16)MEGASAS_KDUMP_QUEUE_DEPTH); /* * Reduce the max supported cmds by 1. This is to ensure that the * reply_q_sz (1 more than the max cmd that driver may send) * does not exceed max cmds that the FW can support
*/
instance->max_fw_cmds = instance->max_fw_cmds-1;
}
}
/** * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool * @instance: Adapter soft state
*/ void
megasas_free_cmds_fusion(struct megasas_instance *instance)
{ int i; struct fusion_context *fusion = instance->ctrl_context; struct megasas_cmd_fusion *cmd;
if (fusion->sense)
dma_pool_free(fusion->sense_dma_pool, fusion->sense,
fusion->sense_phys_addr);
/* SG */ if (fusion->cmd_list) { for (i = 0; i < instance->max_mpt_cmds; i++) {
cmd = fusion->cmd_list[i]; if (cmd) { if (cmd->sg_frame)
dma_pool_free(fusion->sg_dma_pool,
cmd->sg_frame,
cmd->sg_frame_phys_addr);
}
kfree(cmd);
}
kfree(fusion->cmd_list);
}
if (fusion->sg_dma_pool) {
dma_pool_destroy(fusion->sg_dma_pool);
fusion->sg_dma_pool = NULL;
} if (fusion->sense_dma_pool) {
dma_pool_destroy(fusion->sense_dma_pool);
fusion->sense_dma_pool = NULL;
}
/* Reply Frame, Desc*/ if (instance->is_rdpq)
megasas_free_rdpq_fusion(instance); else
megasas_free_reply_fusion(instance);
/* Request Frame, Desc*/ if (fusion->req_frames_desc)
dma_free_coherent(&instance->pdev->dev,
fusion->request_alloc_sz, fusion->req_frames_desc,
fusion->req_frames_desc_phys); if (fusion->io_request_frames)
dma_pool_free(fusion->io_request_frames_pool,
fusion->io_request_frames,
fusion->io_request_frames_phys); if (fusion->io_request_frames_pool) {
dma_pool_destroy(fusion->io_request_frames_pool);
fusion->io_request_frames_pool = NULL;
}
}
/** * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames * @instance: Adapter soft state *
*/ staticint megasas_create_sg_sense_fusion(struct megasas_instance *instance)
{ int i;
u16 max_cmd; struct fusion_context *fusion; struct megasas_cmd_fusion *cmd; int sense_sz;
u32 offset;
if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM;
}
fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
GFP_KERNEL, &fusion->sense_phys_addr); if (!fusion->sense) {
dev_err(&instance->pdev->dev, "failed from %s %d\n", __func__, __LINE__); return -ENOMEM;
}
/* sense buffer, request frame and reply desc pool requires to be in * same 4 gb region. Below function will check this. * In case of failure, new pci pool will be created with updated * alignment. * Older allocation and pool will be destroyed. * Alignment will be used such a way that next allocation if success, * will always meet same 4gb region requirement. * Actual requirement is not alignment, but we need start and end of * DMA address must have same upper 32 bit address.
*/
fusion->sense_dma_pool =
dma_pool_create("mr_sense_align", &instance->pdev->dev,
sense_sz, roundup_pow_of_two(sense_sz),
0); if (!fusion->sense_dma_pool) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM;
}
fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
GFP_KERNEL,
&fusion->sense_phys_addr); if (!fusion->sense) {
dev_err(&instance->pdev->dev, "failed from %s %d\n", __func__, __LINE__); return -ENOMEM;
}
}
/* * Allocate and attach a frame to each of the commands in cmd_list
*/ for (i = 0; i < max_cmd; i++) {
cmd = fusion->cmd_list[i];
cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
GFP_KERNEL, &cmd->sg_frame_phys_addr);
/* * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. * Allocate the dynamic array first and then allocate individual * commands.
*/
fusion->cmd_list =
kcalloc(max_mpt_cmd, sizeof(struct megasas_cmd_fusion *),
GFP_KERNEL); if (!fusion->cmd_list) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM;
}
for (i = 0; i < max_mpt_cmd; i++) {
fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
GFP_KERNEL); if (!fusion->cmd_list[i]) { for (j = 0; j < i; j++)
kfree(fusion->cmd_list[j]);
kfree(fusion->cmd_list);
dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM;
}
}
if (!fusion->reply_frames_desc[0]) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM;
}
}
reply_desc = fusion->reply_frames_desc[0]; for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = cpu_to_le64(ULLONG_MAX);
/* This is not a rdpq mode, but driver still populate * reply_frame_desc array to use same msix index in ISR path.
*/ for (i = 0; i < (count - 1); i++)
fusion->reply_frames_desc[i + 1] =
fusion->reply_frames_desc[i] +
(fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
return 0;
}
staticint
megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
{ int i, j, k, msix_count; struct fusion_context *fusion; union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT];
dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT];
u8 dma_alloc_count, abs_index;
u32 chunk_size, array_size, offset;
if (!fusion->reply_frames_desc_pool ||
!fusion->reply_frames_desc_pool_align) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM;
}
/* * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be * within 4GB boundary and also reply queues in a set must have same * upper 32-bits in their memory address. so here driver is allocating the * DMA'able memory for reply queues according. Driver uses limitation of * VENTURA_SERIES to manage INVADER_SERIES as well.
*/
dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK);
for (i = 0; i < dma_alloc_count; i++) {
rdpq_chunk_virt[i] =
dma_pool_alloc(fusion->reply_frames_desc_pool,
GFP_KERNEL, &rdpq_chunk_phys[i]); if (!rdpq_chunk_virt[i]) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM;
} /* reply desc pool requires to be in same 4 gb region. * Below function will check this. * In case of failure, new pci pool will be created with updated * alignment. * For RDPQ buffers, driver always allocate two separate pci pool. * Alignment will be used such a way that next allocation if * success, will always meet same 4gb region requirement. * rdpq_tracker keep track of each buffer's physical, * virtual address and pci pool descriptor. It will help driver * while freeing the resources. *
*/ if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i],
chunk_size)) {
dma_pool_free(fusion->reply_frames_desc_pool,
rdpq_chunk_virt[i],
rdpq_chunk_phys[i]);
for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) { if (fusion->rdpq_tracker[i].pool_entry_virt)
dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
fusion->rdpq_tracker[i].pool_entry_virt,
fusion->rdpq_tracker[i].pool_entry_phys);
if (fusion->reply_frames_desc[0])
dma_pool_free(fusion->reply_frames_desc_pool,
fusion->reply_frames_desc[0],
fusion->reply_frames_desc_phys[0]);
dma_pool_destroy(fusion->reply_frames_desc_pool);
}
/** * megasas_alloc_cmds_fusion - Allocates the command packets * @instance: Adapter soft state * * * Each frame has a 32-bit field called context. This context is used to get * back the megasas_cmd_fusion from the frame when a frame gets completed * In this driver, the 32 bit values are the indices into an array cmd_list. * This array is used only to look up the megasas_cmd_fusion given the context. * The free commands themselves are maintained in a linked list called cmd_pool. * * cmds are formed in the io_request and sg_frame members of the * megasas_cmd_fusion. The context field is used to get a request descriptor * and is used as SMID of the cmd. * SMID value range is from 1 to max_fw_cmds.
*/ staticint
megasas_alloc_cmds_fusion(struct megasas_instance *instance)
{ int i; struct fusion_context *fusion; struct megasas_cmd_fusion *cmd;
u32 offset;
dma_addr_t io_req_base_phys;
u8 *io_req_base;
fusion = instance->ctrl_context;
if (megasas_alloc_request_fusion(instance)) goto fail_exit;
if (instance->is_rdpq) { if (megasas_alloc_rdpq_fusion(instance)) goto fail_exit;
} else if (megasas_alloc_reply_fusion(instance)) goto fail_exit;
if (megasas_alloc_cmdlist_fusion(instance)) goto fail_exit;
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
/* * Add all the commands to command pool (fusion->cmd_pool)
*/
/* SMID 0 is reserved. Set SMID/index from 1 */ for (i = 0; i < instance->max_mpt_cmds; i++) {
cmd = fusion->cmd_list[i];
offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
cmd->index = i + 1;
cmd->scmd = NULL;
cmd->sync_cmd_idx =
(i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ?
(i - instance->max_scsi_cmds) :
(u32)ULONG_MAX; /* Set to Invalid */
cmd->instance = instance;
cmd->io_request =
(struct MPI2_RAID_SCSI_IO_REQUEST *)
(io_req_base + offset);
memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
cmd->io_request_phys_addr = io_req_base_phys + offset;
cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
}
if (megasas_create_sg_sense_fusion(instance)) goto fail_exit;
/** * wait_and_poll - Issues a polling command * @instance: Adapter soft state * @cmd: Command packet to be issued * @seconds: Maximum poll time * * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
*/ int
wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, int seconds)
{ int i; struct megasas_header *frame_hdr = &cmd->frame->hdr;
u32 status_reg;
u32 msecs = seconds * 1000;
/* * Wait for cmd_status to change
*/ for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
rmb();
msleep(20); if (!(i % 5000)) {
status_reg = instance->instancet->read_fw_status_reg(instance)
& MFI_STATE_MASK; if (status_reg == MFI_STATE_FAULT) break;
}
}
if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
dev_err(&instance->pdev->dev, "Driver was operating on 64bit " "DMA mask, but upcoming FW does not support 64bit DMA mask\n");
megaraid_sas_kill_hba(instance);
ret = 1; goto fail_fw_init;
}
}
if (instance->is_rdpq && !cur_rdpq_mode) {
dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*" " from RDPQ mode to non RDPQ mode\n");
ret = 1; goto fail_fw_init;
}
/* * Each bit in replyqueue_mask represents one group of MSI-x vectors * (each group has 8 vectors)
*/ switch (instance->perf_mode) { case MR_BALANCED_PERF_MODE:
init_frame->replyqueue_mask =
cpu_to_le16(~(~0 << instance->low_latency_index_start/8)); break; case MR_IOPS_PERF_MODE:
init_frame->replyqueue_mask =
cpu_to_le16(~(~0 << instance->msix_vectors/8)); break;
}
fail_fw_init:
dev_err(&instance->pdev->dev, "Init cmd return status FAILED for SCSI host %d\n",
instance->host->host_no);
return ret;
}
/** * megasas_sync_pd_seq_num - JBOD SEQ MAP * @instance: Adapter soft state * @pend: set to 1, if it is pended jbod map. * * Issue Jbod map to the firmware. If it is pended command, * issue command and return. If it is first instance of jbod map * issue and receive command.
*/ int
megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { int ret = 0;
size_t pd_seq_map_sz; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct fusion_context *fusion = instance->ctrl_context; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
dma_addr_t pd_seq_h;
if (pend) {
instance->instancet->issue_dcmd(instance, cmd); return 0;
}
/* Below code is only for non pended DCMD */ if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS); else
ret = megasas_issue_polled(instance, cmd);
if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
dev_warn(&instance->pdev->dev, "driver supports max %d JBOD, but FW reports %d\n",
MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
ret = -EINVAL;
}
if (ret == DCMD_TIMEOUT)
dev_warn(&instance->pdev->dev, "%s DCMD timed out, continue without JBOD sequence map\n",
__func__);
if (ret == DCMD_SUCCESS)
instance->pd_seq_map_id++;
megasas_return_cmd(instance, cmd); return ret;
}
/* * megasas_get_ld_map_info - Returns FW's ld_map structure * @instance: Adapter soft state * @pend: Pend the command or not * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO * dcmd.mbox.b[0] - number of LDs being sync'd * dcmd.mbox.b[1] - 0 - complete command immediately. * - 1 - pend till config change * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and * uses extended struct MR_FW_RAID_MAP_EXT
*/ staticint
megasas_get_ld_map_info(struct megasas_instance *instance)
{ int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; void *ci;
dma_addr_t ci_h = 0;
u32 size_map_info; struct fusion_context *fusion;
cmd = megasas_get_cmd(instance);
if (!cmd) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n"); return -ENOMEM;
}
fusion = instance->ctrl_context;
if (!fusion) {
megasas_return_cmd(instance, cmd); return -ENXIO;
}
fusion->fast_path_io = 0; if (!megasas_get_ld_map_info(instance)) { if (MR_ValidateMapInfo(instance, instance->map_id)) {
fusion->fast_path_io = 1; return 0;
}
} return 1;
}
/* * megasas_sync_map_info - Returns FW's ld_map structure * @instance: Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW.
*/ int
megasas_sync_map_info(struct megasas_instance *instance)
{ int i; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd;
u16 num_lds; struct fusion_context *fusion; struct MR_LD_TARGET_SYNC *ci = NULL; struct MR_DRV_RAID_MAP_ALL *map; struct MR_LD_RAID *raid; struct MR_LD_TARGET_SYNC *ld_sync;
dma_addr_t ci_h = 0;
u32 size_map_info;
cmd = megasas_get_cmd(instance);
if (!cmd) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n"); return -ENOMEM;
}
fusion = instance->ctrl_context;
if (!fusion) {
megasas_return_cmd(instance, cmd); return 1;
}
if (!fusion->ld_drv_map[i]) {
fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz);
if (!fusion->ld_drv_map[i]) {
dev_err(&instance->pdev->dev, "Could not allocate memory for local map" " size requested: %d\n",
fusion->drv_map_sz); goto ld_drv_map_alloc_fail;
}
}
}
for (i = 0; i < 2; i++) {
fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
fusion->max_map_sz,
&fusion->ld_map_phys[i],
GFP_KERNEL); if (!fusion->ld_map[i]) {
dev_err(&instance->pdev->dev, "Could not allocate memory for map info %s:%d\n",
__func__, __LINE__); goto ld_map_alloc_fail;
}
}
return 0;
ld_map_alloc_fail: for (i = 0; i < 2; i++) { if (fusion->ld_map[i])
dma_free_coherent(&instance->pdev->dev,
fusion->max_map_sz,
fusion->ld_map[i],
fusion->ld_map_phys[i]);
}
ld_drv_map_alloc_fail: for (i = 0; i < 2; i++) { if (fusion->ld_drv_map[i]) { if (is_vmalloc_addr(fusion->ld_drv_map[i]))
vfree(fusion->ld_drv_map[i]); else
free_pages((ulong)fusion->ld_drv_map[i],
fusion->drv_map_pages);
}
}
if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame)
dma_free_coherent(&instance->pdev->dev,
IOC_INIT_FRAME_SIZE,
fusion->ioc_init_cmd->frame,
fusion->ioc_init_cmd->frame_phys_addr);
kfree(fusion->ioc_init_cmd);
}
/** * megasas_init_adapter_fusion - Initializes the FW * @instance: Adapter soft state * * This is the main function for initializing firmware.
*/ static u32
megasas_init_adapter_fusion(struct megasas_instance *instance)
{ struct fusion_context *fusion;
u32 scratch_pad_1; int i = 0, count;
u32 status_reg;
/* * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
*/
instance->max_mfi_cmds =
MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
megasas_configure_queue_sizes(instance);
scratch_pad_1 = megasas_readl(instance,
&instance->reg_set->outbound_scratch_pad_1); /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, * Firmware support extended IO chain frame which is 4 times more than * legacy Firmware. * Legacy Firmware - Frame size is (8 * 128) = 1K * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
*/ if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
instance->max_chain_frame_sz =
((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO; else
instance->max_chain_frame_sz =
((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
instance->max_chain_frame_sz,
MEGASAS_CHAIN_FRAME_SZ_MIN);
instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
}
for (i = 0 ; i < count; i++)
fusion->last_reply_idx[i] = 0;
/* * For fusion adapters, 3 commands for IOCTL and 8 commands * for driver's internal DCMDs.
*/
instance->max_scsi_cmds = instance->max_fw_cmds -
(MEGASAS_FUSION_INTERNAL_CMDS +
MEGASAS_FUSION_IOCTL_CMDS);
sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++)
atomic_set(&fusion->busy_mq_poll[i], 0);
if (megasas_alloc_ioc_init_frame(instance)) return 1;
/* * Allocate memory for descriptors * Create a pool of commands
*/ if (megasas_alloc_cmds(instance)) goto fail_alloc_mfi_cmds; if (megasas_alloc_cmds_fusion(instance)) goto fail_alloc_cmds;
if (megasas_ioc_init_fusion(instance)) {
status_reg = instance->instancet->read_fw_status_reg(instance); if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) &&
(status_reg & MFI_RESET_ADAPTER)) { /* Do a chip reset and then retry IOC INIT once */ if (megasas_adp_reset_wait_for_ready
(instance, true, 0) == FAILED) goto fail_ioc_init;
megasas_display_intel_branding(instance); if (megasas_get_ctrl_info(instance)) {
dev_err(&instance->pdev->dev, "Could not get controller info. Fail from %s %d\n",
__func__, __LINE__); goto fail_ioc_init;
}
/* Check the fw state */
fw_state = instance->instancet->read_fw_status_reg(instance) &
MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT) {
dma_state = instance->instancet->read_fw_status_reg(instance) &
MFI_STATE_DMADONE; /* Start collecting crash, if DMA bit is done */ if (instance->crash_dump_drv_support &&
instance->crash_dump_app_support && dma_state) {
megasas_fusion_crash_dump(instance);
} else { if (instance->unload == 0) {
status = megasas_reset_fusion(instance->host, 0); if (status != SUCCESS) {
dev_err(&instance->pdev->dev, "Failed from %s %d, do not re-arm timer\n",
__func__, __LINE__); return;
}
}
}
}
if (instance->fw_fault_work_q)
queue_delayed_work(instance->fw_fault_work_q,
&instance->fw_fault_work,
msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
}
int
megasas_fusion_start_watchdog(struct megasas_instance *instance)
{ /* Check if the Fault WQ is already started */ if (instance->fw_fault_work_q) return SUCCESS;
if (instance->fw_fault_work_q) {
wq = instance->fw_fault_work_q;
instance->fw_fault_work_q = NULL; if (!cancel_delayed_work_sync(&instance->fw_fault_work))
flush_workqueue(wq);
destroy_workqueue(wq);
}
}
/** * map_cmd_status - Maps FW cmd status to OS cmd status * @fusion: fusion context * @scmd: Pointer to cmd * @status: status of cmd returned by FW * @ext_status: ext status of cmd returned by FW * @data_length: command data length * @sense: command sense data
*/ staticvoid
map_cmd_status(struct fusion_context *fusion, struct scsi_cmnd *scmd, u8 status, u8 ext_status,
u32 data_length, u8 *sense)
{
u8 cmd_type; int resid;
/* * If the IO request is partially completed, then MR FW will * update "io_request->DataLength" field with actual number of * bytes transferred.Driver will set residual bytes count in * SCSI command structure.
*/
resid = (scsi_bufflen(scmd) - data_length);
scsi_set_resid(scmd, resid);
if (resid &&
((cmd_type == READ_WRITE_LDIO) ||
(cmd_type == READ_WRITE_SYSPDIO)))
scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len" " requested/completed 0x%x/0x%x\n",
status, scsi_bufflen(scmd), data_length); break;
case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND:
scmd->result = DID_BAD_TARGET << 16; break; case MFI_STAT_CONFIG_SEQ_MISMATCH:
scmd->result = DID_IMM_RETRY << 16; break; default:
scmd->result = DID_ERROR << 16; break;
}
}
/** * megasas_is_prp_possible - * Checks if native NVMe PRPs can be built for the IO * * @instance: Adapter soft state * @scmd: SCSI command from the mid-layer * @sge_count: scatter gather element count. * * Returns: true: PRPs can be built * false: IEEE SGLs needs to be built
*/ staticbool
megasas_is_prp_possible(struct megasas_instance *instance, struct scsi_cmnd *scmd, int sge_count)
{
u32 data_length = 0; struct scatterlist *sg_scmd; bool build_prp = false;
u32 mr_nvme_pg_size;
/* * NVMe uses one PRP for each page (or part of a page) * look at the data length - if 4 pages or less then IEEE is OK * if > 5 pages then we need to build a native SGL * if > 4 and <= 5 pages, then check physical address of 1st SG entry * if this first size in the page is >= the residual beyond 4 pages * then use IEEE, otherwise use native SGL
*/
/* * Nvme has a very convoluted prp format. One prp is required * for each page or partial page. Driver need to split up OS sg_list * entries if it is longer than one page or cross a page * boundary. Driver also have to insert a PRP list pointer entry as * the last entry in each physical page of the PRP list. * * NOTE: The first PRP "entry" is actually placed in the first * SGL entry in the main message as IEEE 64 format. The 2nd * entry in the main message is the chain element, and the rest * of the PRP entries are built in the contiguous pcie buffer.
*/
page_mask = mr_nvme_pg_size - 1;
ptr_sgl = (u64 *)cmd->sg_frame;
ptr_sgl_phys = cmd->sg_frame_phys_addr;
memset(ptr_sgl, 0, instance->max_chain_frame_sz);
/* Build chain frame element which holds all prps except first*/
main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *)
((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64));
/* Build first prp, sge need not to be page aligned*/
ptr_first_sgl = sgl_ptr;
sg_scmd = scsi_sglist(scmd);
sge_addr = sg_dma_address(sg_scmd);
sge_len = sg_dma_len(sg_scmd);
/** * megasas_make_sgl_fusion - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @sgl_ptr: SGL to be filled in * @cmd: cmd we are working on * @sge_count: sge count *
*/ staticvoid
megasas_make_sgl_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, struct megasas_cmd_fusion *cmd, int sge_count)
{ int i, sg_processed; struct scatterlist *os_sgl; struct fusion_context *fusion;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.