/* bnx2i_hwi.c: QLogic NetXtreme II iSCSI driver. * * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * Copyright (c) 2014, QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com) * Maintained by: QLogic-Storage-Upstream@qlogic.com
*/
/** * bnx2i_get_cid_num - get cid from ep * @ep: endpoint pointer * * Only applicable to 57710 family of devices
*/ static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
{
u32 cid;
/** * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type * @hba: Adapter for which adjustments is to be made * * Only applicable to 57710 family of devices
*/ staticvoid bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
{
u32 num_elements_per_pg;
if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { if (!is_power_of_2(hba->max_sqes))
hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
if (!is_power_of_2(hba->max_rqes))
hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
}
/* Adjust each queue size if the user selection does not * yield integral num of page buffers
*/ /* adjust SQ */
num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; if (hba->max_sqes < num_elements_per_pg)
hba->max_sqes = num_elements_per_pg; elseif (hba->max_sqes % num_elements_per_pg)
hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
~(num_elements_per_pg - 1);
/** * bnx2i_get_link_state - get network interface link state * @hba: adapter instance pointer * * updates adapter structure flag based on netdev state
*/ staticvoid bnx2i_get_link_state(struct bnx2i_hba *hba)
{ if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); else
clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
}
/** * bnx2i_iscsi_license_error - displays iscsi license related error message * @hba: adapter instance pointer * @error_code: error classification * * Puts out an error log when driver is unable to offload iscsi connection * due to license restrictions
*/ staticvoid bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
{ if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED) /* iSCSI offload not supported on this device */
printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
hba->netdev->name); if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED) /* iSCSI offload not supported on this LOM device */
printk(KERN_ERR "bnx2i: LOM is not enable to " "offload iSCSI connections, dev=%s\n",
hba->netdev->name);
set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
}
/** * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification * @ep: endpoint (transport identifier) structure * @action: action, ARM or DISARM. For now only ARM_CQE is used * * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt * the driver. EQ event is generated CQ index is hit or at least 1 CQ is * outstanding and on chip timer expires
*/ int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
{ struct bnx2i_5771x_cq_db *cq_db;
u16 cq_index;
u16 next_index = 0;
u32 num_active_cmds;
/* Coalesce CQ entries only on 10G devices */ if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) return 0;
/* Do not update CQ DB multiple times before firmware writes * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious * interrupts and other unwanted results
*/
cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
if (action != CNIC_ARM_CQE_FP) if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF) return 0;
/** * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer * @bnx2i_conn: iscsi connection on which RQ event occurred * @ptr: driver buffer to which RQ buffer contents is to * be copied * @len: length of valid data inside RQ buf * * Copies RQ buffer contents from shared (DMA'able) memory region to * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and * scsi sense info
*/ void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
{ if (!bnx2i_conn->ep->qp.rqe_left) return;
/** * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell * @bnx2i_conn: iscsi connection on which event to post * @count: number of RQ buffer being posted to chip * * No need to ring hardware doorbell for 57710 family of devices
*/ void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
{ struct bnx2i_5771x_sq_rq_db *rq_db;
u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); struct bnx2i_endpoint *ep = bnx2i_conn->ep;
if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; if (!hi_bit)
ep->qp.rq_prod_idx |= 0x8000;
} else
ep->qp.rq_prod_idx |= hi_bit;
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
rq_db->prod_idx = ep->qp.rq_prod_idx; /* no need to ring hardware doorbell for 57710 */
} else {
writew(ep->qp.rq_prod_idx,
ep->qp.ctx_base + CNIC_RECV_DOORBELL);
}
}
/** * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine * @bnx2i_conn: iscsi connection to which new SQ entries belong * @count: number of SQ WQEs to post * * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family * of devices. For 5706/5708/5709 new SQ WQE count is written into the * doorbell register
*/ staticvoid bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
{ struct bnx2i_5771x_sq_rq_db *sq_db; struct bnx2i_endpoint *ep = bnx2i_conn->ep;
atomic_inc(&ep->num_active_cmds);
wmb(); /* flush SQ WQE memory before the doorbell is rung */ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
sq_db->prod_idx = ep->qp.sq_prod_idx;
bnx2i_ring_577xx_doorbell(bnx2i_conn);
} else
writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
}
/** * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters * @bnx2i_conn: iscsi connection to which new SQ entries belong * @count: number of SQ WQEs to post * * this routine will update SQ driver parameters and ring the doorbell
*/ staticvoid bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn, int count)
{ int tmp_cnt;
/** * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware * @bnx2i_conn: iscsi connection * @task: transport layer's command structure pointer which is requesting * a WQE to sent to chip for further processing * * prepare and post an iSCSI Login request WQE to CNIC firmware
*/ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn, struct iscsi_task *task)
{ struct bnx2i_login_request *login_wqe; struct iscsi_login_req *login_hdr;
u32 dword;
switch (tmfabort_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) { case ISCSI_TM_FUNC_ABORT_TASK: case ISCSI_TM_FUNC_TASK_REASSIGN:
ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); if (!ctask || !ctask->sc) /* * the iscsi layer must have completed the cmd while * was starting up. * * Note: In the case of a SCSI cmd timeout, the task's * sc is still active; hence ctask->sc != 0 * In this case, the task must be aborted
*/ return 0;
/** * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware * @bnx2i_conn: iscsi connection * @mtask: driver command structure which is requesting * a WQE to sent to chip for further processing * * prepare and post an iSCSI Text request WQE to CNIC firmware
*/ int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn, struct iscsi_task *mtask)
{ struct bnx2i_text_request *text_wqe; struct iscsi_text *text_hdr;
u32 dword;
/** * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware * @bnx2i_conn: iscsi connection * @cmd: driver command structure which is requesting * a WQE to sent to chip for further processing * * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
*/ int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn, struct bnx2i_cmd *cmd)
{ struct bnx2i_cmd_request *scsi_cmd_wqe;
scsi_cmd_wqe = (struct bnx2i_cmd_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
/** * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware * @bnx2i_conn: iscsi connection * @task: transport layer's command structure pointer which is * requesting a WQE to sent to chip for further processing * @datap: payload buffer pointer * @data_len: payload data length * @unsol: indicated whether nopout pdu is unsolicited pdu or * in response to target's NOPIN w/ TTT != FFFFFFFF * * prepare and post a nopout request WQE to CNIC firmware
*/ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, struct iscsi_task *task, char *datap, int data_len, int unsol)
{ struct bnx2i_endpoint *ep = bnx2i_conn->ep; struct bnx2i_nop_out_request *nopout_wqe; struct iscsi_nopout *nopout_hdr;
/** * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware * @bnx2i_conn: iscsi connection * @task: transport layer's command structure pointer which is * requesting a WQE to sent to chip for further processing * * prepare and post logout request WQE to CNIC firmware
*/ int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn, struct iscsi_task *task)
{ struct bnx2i_logout_request *logout_wqe; struct iscsi_logout *logout_hdr;
/* 5771x requires conn context id to be passed as is */ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
update_wqe->context_id = bnx2i_conn->ep->ep_cid; else
update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
update_wqe->conn_flags = 0; if (conn->hdrdgst_en)
update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST; if (conn->datadgst_en)
update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST; if (conn->session->initial_r2t_en)
update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T; if (conn->session->imm_data_en)
update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
/** * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w * @hba: adapter structure pointer * * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w * This results in iSCSi support validation and on-chip context manager * initialization. Firmware completes this handshake with a CQE carrying * the result of iscsi support validation. Parameter carried by * iscsi init request determines the number of offloaded connection and * tolerance level for iscsi protocol violation this hba/chip can support
*/ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
{ struct kwqe *kwqe_arr[3]; struct iscsi_kwqe_init1 iscsi_init; struct iscsi_kwqe_init2 iscsi_init2; int rc = 0;
u64 mask64;
/** * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd * @bnx2i_conn: iscsi connection * * Firmware advances RQ producer index for every unsolicited PDU even if * payload data length is '0'. This function makes corresponding * adjustments on the driver side to match this f/w behavior
*/ staticvoid bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
{ char dummy_rq_data[2];
bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
bnx2i_put_rq_buf(bnx2i_conn, 1);
}
cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); if (!task)
printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
spin_unlock(&session->back_lock);
complete(&bnx2i_conn->cmd_cleanup_cmpl);
}
/** * bnx2i_percpu_io_thread - thread per cpu for ios * * @arg: ptr to bnx2i_percpu_info structure
*/ int bnx2i_percpu_io_thread(void *arg)
{ struct bnx2i_percpu_s *p = arg; struct bnx2i_work *work, *tmp;
LIST_HEAD(work_list);
set_user_nice(current, MIN_NICE);
while (!kthread_should_stop()) {
spin_lock_bh(&p->p_work_lock); while (!list_empty(&p->work_list)) {
list_splice_init(&p->work_list, &work_list);
spin_unlock_bh(&p->p_work_lock);
list_for_each_entry_safe(work, tmp, &work_list, list) {
list_del_init(&work->list); /* work allocated in the bh, freed here */
bnx2i_process_scsi_cmd_resp(work->session,
work->bnx2i_conn,
&work->cqe);
atomic_dec(&work->bnx2i_conn->work_cnt);
kfree(work);
}
spin_lock_bh(&p->p_work_lock);
}
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&p->p_work_lock);
schedule();
}
__set_current_state(TASK_RUNNING);
return 0;
}
/** * bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread * @session: iscsi session * @bnx2i_conn: bnx2i connection * @cqe: pointer to newly DMA'ed CQE entry for processing * * this function is called by generic KCQ handler to queue all pending cmd * completion CQEs * * The implementation is to queue the cmd response based on the * last recorded command for the given connection. The * cpu_id gets recorded upon task_xmit. No out-of-order completion!
*/ staticint bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, struct bnx2i_conn *bnx2i_conn, struct bnx2i_nop_in_msg *cqe)
{ struct bnx2i_work *bnx2i_work = NULL; struct bnx2i_percpu_s *p = NULL; struct iscsi_task *task; struct scsi_cmnd *sc; int rc = 0;
switch (nopin->op_code) { case ISCSI_OP_SCSI_CMD_RSP: case ISCSI_OP_SCSI_DATA_IN: /* Run the kthread engine only for data cmds
All other cmds will be completed in this bh! */
bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin); goto done; case ISCSI_OP_LOGIN_RSP:
bnx2i_process_login_resp(session, bnx2i_conn,
qp->cq_cons_qe); break; case ISCSI_OP_SCSI_TMFUNC_RSP:
bnx2i_process_tmf_resp(session, bnx2i_conn,
qp->cq_cons_qe); break; case ISCSI_OP_TEXT_RSP:
bnx2i_process_text_resp(session, bnx2i_conn,
qp->cq_cons_qe); break; case ISCSI_OP_LOGOUT_RSP:
bnx2i_process_logout_resp(session, bnx2i_conn,
qp->cq_cons_qe); break; case ISCSI_OP_NOOP_IN: if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
qp->cq_cons_qe))
tgt_async_msg = 1; break; case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
qp->cq_cons_qe); break; case ISCSI_OP_ASYNC_EVENT:
bnx2i_process_async_mesg(session, bnx2i_conn,
qp->cq_cons_qe);
tgt_async_msg = 1; break; case ISCSI_OP_REJECT:
bnx2i_process_reject_mesg(session, bnx2i_conn,
qp->cq_cons_qe); break; case ISCSI_OPCODE_CLEANUP_RESPONSE:
bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
qp->cq_cons_qe); break; default:
printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
nopin->op_code);
}
ADD_STATS_64(hba, rx_pdus, 1);
ADD_STATS_64(hba, rx_bytes, nopin->data_length);
done: if (!tgt_async_msg) { if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
printk(KERN_ALERT "bnx2i (%s): no active cmd! " "op 0x%x\n",
hba->netdev->name,
nopin->op_code); else
atomic_dec(&bnx2i_conn->ep->num_active_cmds);
}
cqe_out: /* clear out in production version only, till beta keep opcode * field intact, will be helpful in debugging (context dump) * nopin->op_code = 0;
*/
cqe_cnt++;
qp->cqe_exp_seq_sn++; if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
if (!bnx2i_conn) {
printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); return;
} if (!bnx2i_conn->ep) {
printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); return;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.