void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
{ if (qla4_83xx_flash_lock(ha))
ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
/* * We got the lock, or someone else is holding the lock * since we are restting, forcefully unlock
*/
qla4_83xx_flash_unlock(ha);
}
int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
{ int timeout = 0;
uint32_t status = 0; int ret_val = QLA_SUCCESS;
uint32_t first_owner = 0;
uint32_t tmo_owner = 0;
uint32_t lock_id;
uint32_t func_num;
uint32_t lock_cnt;
while (status == 0) {
status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK); if (status) { /* Increment Counter (8-31) and update func_num (0-7) on
* getting a successful lock */
lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id); break;
}
if (timeout == 0) /* Save counter + ID of function holding the lock for
* first failure */
first_owner = ha->isp_ops->rd_reg_direct(ha,
QLA83XX_DRV_LOCK_ID);
if (++timeout >=
(QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
func_num = tmo_owner & 0xFF;
lock_cnt = tmo_owner >> 8;
ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
__func__, ha->func_num, func_num, lock_cnt,
(first_owner & 0xFF));
if (first_owner != tmo_owner) { /* Some other driver got lock, OR same driver * got lock again (counter value changed), when * we were waiting for lock.
* Retry for another 2 sec */
ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
__func__, ha->func_num);
timeout = 0;
} else { /* Same driver holding lock > 2sec.
* Force Recovery */
ret_val = qla4_83xx_lock_recovery(ha); if (ret_val == QLA_SUCCESS) { /* Recovered and got lock */
ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
__func__, ha->func_num); break;
} /* Recovery Failed, some other function
* has the lock, wait for 2secs and retry */
ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
__func__, ha->func_num);
timeout = 0;
}
}
msleep(QLA83XX_DRV_LOCK_MSLEEP);
}
return ret_val;
}
void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
{ int id;
id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
if ((id & 0xFF) != ha->func_num) {
ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
__func__, ha->func_num, (id & 0xFF)); return;
}
/* Keep lock counter value, update the ha->func_num to 0xFF */
qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
}
struct device_info { int func_num; int device_type; int port_num;
};
int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
{
uint32_t drv_active;
uint32_t dev_part, dev_part1, dev_part2; int i; struct device_info device_map[16]; int func_nibble; int nibble; int nic_present = 0; int iscsi_present = 0; int iscsi_func_low = 0;
/* Use the dev_partition register to determine the PCI function number
* and then check drv_active register to see which driver is loaded */
dev_part1 = qla4_83xx_rd_reg(ha,
ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
/* Each function has 4 bits in dev_partition Info register,
* Lower 2 bits - device type, Upper 2 bits - physical port number */
dev_part = dev_part1; for (i = nibble = 0; i <= 15; i++, nibble++) {
func_nibble = dev_part & (0xF << (nibble * 4));
func_nibble >>= (nibble * 4);
device_map[i].func_num = i;
device_map[i].device_type = func_nibble & 0x3;
device_map[i].port_num = func_nibble & 0xC;
if (device_map[i].device_type == NIC_CLASS) { if (drv_active & (1 << device_map[i].func_num)) {
nic_present++; break;
}
} elseif (device_map[i].device_type == ISCSI_CLASS) { if (drv_active & (1 << device_map[i].func_num)) { if (!iscsi_present ||
iscsi_func_low > device_map[i].func_num)
iscsi_func_low = device_map[i].func_num;
iscsi_present++;
}
}
/* For function_num[8..15] get info from dev_part2 register */ if (nibble == 7) {
nibble = 0;
dev_part = dev_part2;
}
}
/* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
* present. */ if (!nic_present && (ha->func_num == iscsi_func_low)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: can reset - NIC not present and lower iSCSI function is %d\n",
__func__, ha->func_num)); return 1;
}
return 0;
}
/** * qla4_83xx_need_reset_handler - Code to start reset sequence * @ha: pointer to adapter structure * * Note: IDC lock must be held upon entry
**/ void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
{
uint32_t dev_state, drv_state, drv_active; unsignedlong reset_timeout, dev_init_timeout;
ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
__func__);
if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
__func__));
qla4_8xxx_set_rst_ready(ha);
/* Non-reset owners ACK Reset and wait for device INIT state
* as part of Reset Recovery by Reset Owner */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
do { if (time_after_eq(jiffies, dev_init_timeout)) {
ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
__func__); break;
}
do {
val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE); if (val == PHAN_INITIALIZE_COMPLETE) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Command Peg initialization complete. State=0x%x\n",
__func__, val));
ret_val = QLA_SUCCESS; break;
}
msleep(CRB_CMDPEG_CHECK_DELAY);
} while (--retries);
return ret_val;
}
/** * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till * value read ANDed with test_mask is equal to test_result. * * @ha : Pointer to adapter structure * @addr : CRB register address * @duration : Poll for total of "duration" msecs * @test_mask : Mask value read with "test_mask" * @test_result : Compare (value&test_mask) with test_result.
**/ staticint qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr, int duration, uint32_t test_mask,
uint32_t test_result)
{
uint32_t value;
uint8_t retries; int ret_val = QLA_SUCCESS;
/** * qla4_83xx_rmw_crb_reg - Read Modify Write crb register * * This function read value from raddr, AND with test_mask, * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. * * @ha : Pointer to adapter structure * @raddr : CRB address to read from * @waddr : CRB address to write to * @p_rmw_hdr : header with shift/or/xor values.
**/ staticvoid qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
uint32_t waddr, struct qla4_83xx_rmw *p_rmw_hdr)
{
uint32_t value;
if (p_rmw_hdr->index_a)
value = ha->reset_tmplt.array[p_rmw_hdr->index_a]; else
qla4_83xx_rd_reg_indirect(ha, raddr, &value);
value &= p_rmw_hdr->test_mask;
value <<= p_rmw_hdr->shl;
value >>= p_rmw_hdr->shr;
value |= p_rmw_hdr->or_value;
value ^= p_rmw_hdr->xor_value;
for (i = 0; i < p_hdr->count; i++, p_entry++) {
qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2); if (p_hdr->delay)
udelay((uint32_t)(p_hdr->delay));
}
}
for (i = 0; i < p_hdr->count; i++, p_entry++) {
qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2); if (p_hdr->delay)
udelay((uint32_t)(p_hdr->delay));
}
}
for (i = 0; i < p_hdr->count; i++, p_entry++) {
qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
p_rmw_hdr); if (p_hdr->delay)
udelay((uint32_t)(p_hdr->delay));
}
}
if (ha->reset_tmplt.seq_error == 0) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Reset sequence completed SUCCESSFULLY.\n",
__func__));
} else {
ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
__func__);
}
}
/** * qla4_83xx_process_reset_template - Process reset template. * * Process all entries in reset template till entry with SEQ_END opcode, * which indicates end of the reset template processing. Each entry has a * Reset Entry header, entry opcode/command, with size of the entry, number * of entries in sub-sequence and delay in microsecs or timeout in millisecs. * * @ha : Pointer to adapter structure * @p_buff : Common reset entry header.
**/ staticvoid qla4_83xx_process_reset_template(struct scsi_qla_host *ha, char *p_buff)
{ int index, entries; struct qla4_83xx_reset_entry_hdr *p_hdr; char *p_entry = p_buff;
/* Set Host Interrupt register to 1, to tell the firmware that * a mailbox command is pending. Firmware after reading the
* mailbox command, clears the host interrupt register */
writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
}
void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
{ int intr_status;
if (ql4xdontresethba)
qla4_83xx_set_idc_dontreset(ha);
if (dev_state == QLA8XXX_DEV_READY) { /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
* recovery */ if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
__func__);
rval = QLA_ERROR; goto exit_isp_reset;
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
__func__));
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
QLA8XXX_DEV_NEED_RESET);
} else { /* If device_state is NEED_RESET, go ahead with * Reset,irrespective of ql4xdontresethba. This is to allow a * non-reset-owner to force a reset. Non-reset-owner sets * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset * and then forces a Reset by setting device_state to
* NEED_RESET. */
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW state already set to NEED_RESET\n",
__func__));
}
/* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on * priority and which drivers are present. Unlike ISP8022, the function
* setting NEED_RESET, may not be the Reset owner. */ if (qla4_83xx_can_perform_reset(ha))
set_bit(AF_8XXX_RST_OWNER, &ha->flags);
if (rval == QLA_SUCCESS)
clear_bit(AF_FW_RECOVERY, &ha->flags);
return rval;
}
staticvoid qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
{
u32 val = 0, val1 = 0; int i;
qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
/* Port 0 Rx Buffer Pause Threshold Registers. */
DEBUG2(ql4_printk(KERN_INFO, ha, "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); for (i = 0; i < 8; i++) {
qla4_83xx_rd_reg_indirect(ha,
QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
DEBUG2(pr_info("0x%x ", val));
}
DEBUG2(pr_info("\n"));
/* Port 1 Rx Buffer Pause Threshold Registers. */
DEBUG2(ql4_printk(KERN_INFO, ha, "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); for (i = 0; i < 8; i++) {
qla4_83xx_rd_reg_indirect(ha,
QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
DEBUG2(pr_info("0x%x ", val));
}
DEBUG2(pr_info("\n"));
/* Port 0 RxB Traffic Class Max Cell Registers. */
DEBUG2(ql4_printk(KERN_INFO, ha, "Port 0 RxB Traffic Class Max Cell Registers[3..0]:")); for (i = 0; i < 4; i++) {
qla4_83xx_rd_reg_indirect(ha,
QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
DEBUG2(pr_info("0x%x ", val));
}
DEBUG2(pr_info("\n"));
/* Port 1 RxB Traffic Class Max Cell Registers. */
DEBUG2(ql4_printk(KERN_INFO, ha, "Port 1 RxB Traffic Class Max Cell Registers[3..0]:")); for (i = 0; i < 4; i++) {
qla4_83xx_rd_reg_indirect(ha,
QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
DEBUG2(pr_info("0x%x ", val));
}
DEBUG2(pr_info("\n"));
/* Port 0 RxB Rx Traffic Class Stats. */
DEBUG2(ql4_printk(KERN_INFO, ha, "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]")); for (i = 7; i >= 0; i--) {
qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
(val | (i << 29)));
qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
DEBUG2(pr_info("0x%x ", val));
}
DEBUG2(pr_info("\n"));
/* Port 1 RxB Rx Traffic Class Stats. */
DEBUG2(ql4_printk(KERN_INFO, ha, "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]")); for (i = 7; i >= 0; i--) {
qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
(val | (i << 29)));
qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
DEBUG2(pr_info("0x%x ", val));
}
DEBUG2(ql4_printk(KERN_INFO, ha, "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
val, val1));
}
staticvoid __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
{ int i;
/* set SRE-Shim Control Register */
qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
QLA83XX_SET_PAUSE_VAL);
for (i = 0; i < 8; i++) { /* Port 0 Rx Buffer Pause Threshold Registers. */
qla4_83xx_wr_reg_indirect(ha,
QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
QLA83XX_SET_PAUSE_VAL); /* Port 1 Rx Buffer Pause Threshold Registers. */
qla4_83xx_wr_reg_indirect(ha,
QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
QLA83XX_SET_PAUSE_VAL);
}
for (i = 0; i < 4; i++) { /* Port 0 RxB Traffic Class Max Cell Registers. */
qla4_83xx_wr_reg_indirect(ha,
QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
QLA83XX_SET_TC_MAX_CELL_VAL); /* Port 1 RxB Traffic Class Max Cell Registers. */
qla4_83xx_wr_reg_indirect(ha,
QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
QLA83XX_SET_TC_MAX_CELL_VAL);
}
ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
}
/** * qla4_83xx_eport_init - Initialize EPort. * @ha: Pointer to host adapter structure. * * If EPort hardware is in reset state before disabling pause, there would be * serious hardware wedging issues. To prevent this perform eport init everytime * before disabling pause frames.
**/ staticvoid qla4_83xx_eport_init(struct scsi_qla_host *ha)
{ /* Clear the 8 registers */
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
/* Write any value to Reset Control register */
qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
}
void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
{
ha->isp_ops->idc_lock(ha); /* Before disabling pause frames, ensure that eport is not in reset */
qla4_83xx_eport_init(ha);
qla4_83xx_dump_pause_control_regs(ha);
__qla4_83xx_disable_pause(ha);
ha->isp_ops->idc_unlock(ha);
}
/** * qla4_83xx_is_detached - Check if we are marked invisible. * @ha: Pointer to host adapter structure.
**/ int qla4_83xx_is_detached(struct scsi_qla_host *ha)
{
uint32_t drv_active;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.