/* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
*/
staticconststruct ahd_phase_table_entry ahd_phase_table[] =
{
{ P_DATAOUT, NOP, "in Data-out phase" },
{ P_DATAIN, INITIATOR_ERROR, "in Data-in phase" },
{ P_DATAOUT_DT, NOP, "in DT Data-out phase" },
{ P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" },
{ P_COMMAND, NOP, "in Command phase" },
{ P_MESGOUT, NOP, "in Message-out phase" },
{ P_STATUS, INITIATOR_ERROR, "in Status phase" },
{ P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
{ P_BUSFREE, NOP, "while idle" },
{ 0, NOP, "in unknown phase" }
};
/* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count.
*/ staticconst u_int num_phases = ARRAY_SIZE(ahd_phase_table) - 1;
/* Our Sequencer Program */ #include"aic79xx_seq.h"
/* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped.
*/ int
ahd_is_paused(struct ahd_softc *ahd)
{ return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
}
/* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections.
*/ void
ahd_pause(struct ahd_softc *ahd)
{
ahd_outb(ahd, HCNTRL, ahd->pause);
/* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops.
*/ while (ahd_is_paused(ahd) == 0)
;
}
/* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition.
*/ void
ahd_unpause(struct ahd_softc *ahd)
{ /* * Automatically restore our modes to those saved * prior to the first change of the mode.
*/ if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
&& ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
ahd_reset_cmds_pending(ahd);
ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
}
if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
ahd_outb(ahd, HCNTRL, ahd->unpause);
high_addr = ahd_le32toh(sg->len) & 0x7F000000;
scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
}
scb->hscb->datacnt = sg->len;
} /* * Note where to find the SG entries in bus space. * We also set the full residual flag which the * sequencer will clear as soon as a data transfer * occurs.
*/
scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
}
/*********************** Miscellaneous Support Functions ***********************/ /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair.
*/ struct ahd_initiator_tinfo *
ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
u_int remote_id, struct ahd_tmode_tstate **tstate)
{ /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target.
*/ if (channel == 'B')
our_id += 8;
*tstate = ahd->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]);
}
uint16_t
ahd_inw(struct ahd_softc *ahd, u_int port)
{ /* * Read high byte first as some registers increment * or have other side effects when the low byte is * read.
*/
uint16_t r = ahd_inb(ahd, port+1) << 8; return r | ahd_inb(ahd, port);
}
void
ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
{ /* * Write low byte first to accommodate registers * such as PRGMCNT where the order maters.
*/
ahd_outb(ahd, port, value & 0xFF);
ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
}
/* * Workaround PCI-X Rev A. hardware bug. * After a host read of SCB memory, the chip * may become confused into thinking prefetch * was required. This starts the discard timer * running and can cause an unexpected discard * timer interrupt. The work around is to read * a normal register prior to the exhaustion of * the discard timer. The mode pointer register * has no side effects and so serves well for * this purpose. * * Razor #528
*/
value = ahd_inb(ahd, offset); if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
ahd_inb(ahd, MODE_PTR); return (value);
}
/* * Our queuing method is a bit tricky. The card * knows in advance which HSCB (by address) to download, * and we can't disappoint it. To achieve this, the next * HSCB to download is saved off in ahd->next_queued_hscb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG.
*/
q_hscb = ahd->next_queued_hscb;
q_hscb_map = ahd->next_queued_hscb_map;
saved_hscb_busaddr = q_hscb->hscb_busaddr;
memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
q_hscb->hscb_busaddr = saved_hscb_busaddr;
q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
/* Now define the mapping from tag to SCB in the scbindex */
ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
}
/* * Tell the sequencer about a new transaction to execute.
*/ void
ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
{
ahd_swap_with_next_hscb(ahd, scb);
if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
panic("Attempt to queue invalid SCB tag %x\n",
SCB_GET_TAG(scb));
/* * Keep a history of SCBs we've downloaded in the qinfifo.
*/
ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
ahd->qinfifonext++;
/* * Catch an interrupt from the adapter
*/ int
ahd_intr(struct ahd_softc *ahd)
{
u_int intstat;
if ((ahd->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt.
*/ return (0);
}
/* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases.
*/ if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
&& (ahd_check_cmdcmpltqueues(ahd) != 0))
intstat = CMDCMPLT; else
intstat = ahd_inb(ahd, INTSTAT);
if ((intstat & INT_PEND) == 0) return (0);
if (intstat & CMDCMPLT) {
ahd_outb(ahd, CLRINT, CLRCMDINT);
/* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again.
*/ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { if (ahd_is_paused(ahd)) { /* * Potentially lost SEQINT. * If SEQINTCODE is non-zero, * simulate the SEQINT.
*/ if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
intstat |= SEQINT;
}
} else {
ahd_flush_device_writes(ahd);
}
ahd_run_qoutfifo(ahd);
ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
ahd->cmdcmplt_total++; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0)
ahd_run_tqinfifo(ahd, /*paused*/FALSE); #endif
}
/* * Handle statuses that may invalidate our cached * copy of INTSTAT separately.
*/ if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { /* Hot eject. Do nothing */
} elseif (intstat & HWERRINT) {
ahd_handle_hwerrint(ahd);
} elseif ((intstat & (PCIINT|SPLTINT)) != 0) {
ahd->bus_intr(ahd);
} else {
if ((intstat & SEQINT) != 0)
ahd_handle_seqint(ahd, intstat);
/* * Determine if the current connection has a packetized * agreement. This does not necessarily mean that we * are currently in a packetized transfer. We could * just as easily be sending or receiving a message.
*/ staticint
ahd_currently_packetized(struct ahd_softc *ahd)
{
ahd_mode_state saved_modes; int packetized;
saved_modes = ahd_save_modes(ahd); if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { /* * The packetized bit refers to the last * connection, not the current one. Check * for non-zero LQISTATE instead.
*/
ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
packetized = ahd_inb(ahd, LQISTATE) != 0;
} else {
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED;
}
ahd_restore_modes(ahd, saved_modes); return (packetized);
}
/************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero
*/ staticvoid
ahd_restart(struct ahd_softc *ahd)
{
/* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not.
*/
ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
/* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter.
*/
ahd_outb(ahd, CLRINT, CLRSEQINT);
/************************* Input/Output Queues ********************************/ /* * Flush and completed commands that are sitting in the command * complete queues down on the chip but have yet to be dma'ed back up.
*/ staticvoid
ahd_flush_qoutfifo(struct ahd_softc *ahd)
{ struct scb *scb;
ahd_mode_state saved_modes;
u_int saved_scbptr;
u_int ccscbctl;
u_int scbid;
u_int next_scbid;
saved_modes = ahd_save_modes(ahd);
/* * Flush the good status FIFO for completed packetized commands.
*/
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
saved_scbptr = ahd_get_scbptr(ahd); while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
u_int fifo_mode;
u_int i;
scbid = ahd_inw(ahd, GSFIFO);
scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) {
printk("%s: Warning - GSFIFO SCB %d invalid\n",
ahd_name(ahd), scbid); continue;
} /* * Determine if this transaction is still active in * any FIFO. If it is, we must flush that FIFO to * the host before completing the command.
*/
fifo_mode = 0;
rescan_fifos: for (i = 0; i < 2; i++) { /* Toggle to the other mode. */
fifo_mode ^= 1;
ahd_set_modes(ahd, fifo_mode, fifo_mode);
if (ahd_scb_active_in_fifo(ahd, scb) == 0) continue;
ahd_run_data_fifo(ahd, scb);
/* * Running this FIFO may cause a CFG4DATA for * this same transaction to assert in the other * FIFO or a new snapshot SAVEPTRS interrupt * in this FIFO. Even running a FIFO may not * clear the transaction if we are still waiting * for data to drain to the host. We must loop * until the transaction is not active in either * FIFO just to be sure. Reset our loop counter * so we will visit both FIFOs again before * declaring this transaction finished. We * also delay a bit so that status has a chance * to change before we look at this FIFO again.
*/
ahd_delay(200); goto rescan_fifos;
}
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
ahd_set_scbptr(ahd, scbid); if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0
&& ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0
|| (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR)
& SG_LIST_NULL) != 0)) {
u_int comp_head;
/* * The transfer completed with a residual. * Place this SCB on the complete DMA list * so that we update our in-core copy of the * SCB before completing the command.
*/
ahd_outb(ahd, SCB_SCSI_STATUS, 0);
ahd_outb(ahd, SCB_SGPTR,
ahd_inb_scbram(ahd, SCB_SGPTR)
| SG_STATUS_VALID);
ahd_outw(ahd, SCB_TAG, scbid);
ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL);
comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); if (SCBID_IS_NULL(comp_head)) {
ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid);
ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid);
} else {
u_int tail;
/* * Setup for command channel portion of flush.
*/
ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
/* * Wait for any inprogress DMA to complete and clear DMA state * if this is for an SCB in the qinfifo.
*/ while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) {
if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { if ((ccscbctl & ARRDONE) != 0) break;
} elseif ((ccscbctl & CCSCBDONE) != 0) break;
ahd_delay(200);
} /* * We leave the sequencer to cleanup in the case of DMA's to * update the qoutfifo. In all other cases (DMA's to the * chip or a push of an SCB from the COMPLETE_DMA_SCB list), * we disable the DMA engine so that the sequencer will not * attempt to handle the DMA completion.
*/ if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0)
ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN));
/* * Complete any SCBs that just finished * being DMA'ed into the qoutfifo.
*/
ahd_run_qoutfifo(ahd);
saved_scbptr = ahd_get_scbptr(ahd); /* * Manually update/complete any completed SCBs that are waiting to be * DMA'ed back up to the host.
*/
scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) {
uint8_t *hscb_ptr;
u_int i;
ahd_set_scbptr(ahd, scbid);
next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) {
printk("%s: Warning - DMA-up and complete " "SCB %d invalid\n", ahd_name(ahd), scbid); continue;
}
hscb_ptr = (uint8_t *)scb->hscb; for (i = 0; i < sizeof(struct hardware_scb); i++)
*hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i);
/* * Determine if an SCB for a packetized transaction * is active in a FIFO.
*/ staticint
ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb)
{
/* * The FIFO is only active for our transaction if * the SCBPTR matches the SCB's ID and the firmware * has installed a handler for the FIFO or we have * a pending SAVEPTRS or CFG4DATA interrupt.
*/ if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb)
|| ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0
&& (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) return (0);
return (1);
}
/* * Run a data fifo to completion for a transaction we know * has completed across the SCSI bus (good status has been * received). We are already set to the correct FIFO mode * on entry to this routine. * * This function attempts to operate exactly as the firmware * would when running this FIFO. Care must be taken to update * this routine any time the firmware's FIFO algorithm is * changed.
*/ staticvoid
ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb)
{
u_int seqintsrc;
/* * Mark the SCB as having a FIFO in use.
*/
ahd_outb(ahd, SCB_FIFO_USE_COUNT,
ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1);
/* * Install a "fake" handler for this FIFO.
*/
ahd_outw(ahd, LONGJMP_ADDR, 0);
/* * Notify the hardware that we have satisfied * this sequencer interrupt.
*/
ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA);
} elseif ((seqintsrc & SAVEPTRS) != 0) {
uint32_t sgptr;
uint32_t resid;
if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { /* * Snapshot Save Pointers. All that * is necessary to clear the snapshot * is a CLRCHN.
*/ goto clrchn;
}
/* * Disable S/G fetch so the DMA engine * is available to future users.
*/ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
ahd_outb(ahd, CCSGCTL, 0);
ahd_outb(ahd, SG_STATE, 0);
/* * Flush the data FIFO. Strickly only * necessary for Rev A parts.
*/
ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH);
/* * Calculate residual.
*/
sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
resid = ahd_inl(ahd, SHCNT);
resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24;
ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { /* * Must back up to the correct S/G element. * Typically this just means resetting our * low byte to the offset in the SG_CACHE, * but if we wrapped, we have to correct * the other bytes of the sgptr too.
*/ if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0
&& (sgptr & 0x80) == 0)
sgptr -= 0x100;
sgptr &= ~0xFF;
sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW)
& SG_ADDR_MASK;
ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0);
} elseif ((resid & AHD_SG_LEN_MASK) == 0) {
ahd_outb(ahd, SCB_RESIDUAL_SGPTR,
sgptr | SG_LIST_NULL);
} /* * Save Pointers.
*/
ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR));
ahd_outl(ahd, SCB_DATACNT, resid);
ahd_outl(ahd, SCB_SGPTR, sgptr);
ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS);
ahd_outb(ahd, SEQIMODE,
ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); /* * If the data is to the SCSI bus, we are * done, otherwise wait for FIFOEMP.
*/ if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) goto clrchn;
} elseif ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) {
uint32_t sgptr;
uint64_t data_addr;
uint32_t data_len;
u_int dfcntrl;
/* * Disable S/G fetch so the DMA engine * is available to future users. We won't * be using the DMA engine to load segments.
*/ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) {
ahd_outb(ahd, CCSGCTL, 0);
ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
}
/* * Wait for the DMA engine to notice that the * host transfer is enabled and that there is * space in the S/G FIFO for new segments before * loading more segments.
*/ if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0
&& (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) {
/* * Determine the offset of the next S/G * element to load.
*/
sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg;
/* * Advertise the segment to the hardware.
*/
dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { /* * Use SCSIENWRDIS so that SCSIEN * is never modified by this * operation.
*/
dfcntrl |= SCSIENWRDIS;
}
ahd_outb(ahd, DFCNTRL, dfcntrl);
}
} elseif ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) {
/* * Transfer completed to the end of SG list * and has flushed to the host.
*/
ahd_outb(ahd, SCB_SGPTR,
ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); goto clrchn;
} elseif ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) {
clrchn: /* * Clear any handler for this FIFO, decrement * the FIFO use count for the SCB, and release * the FIFO.
*/
ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
ahd_outb(ahd, SCB_FIFO_USE_COUNT,
ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1);
ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
}
}
/* * Look for entries in the QoutFIFO that have completed. * The valid_tag completion field indicates the validity * of the entry - the valid value toggles each time through * the queue. We use the sg_status field in the completion * entry to avoid referencing the hscb if the completion * occurred with no errors and no residual. sg_status is * a copy of the first byte (little endian) of the sgptr * hscb field.
*/ staticvoid
ahd_run_qoutfifo(struct ahd_softc *ahd)
{ struct ahd_completion *completion; struct scb *scb;
u_int scb_index;
if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0)
panic("ahd_run_qoutfifo recursion");
ahd->flags |= AHD_RUNNING_QOUTFIFO;
ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); for (;;) {
completion = &ahd->qoutfifo[ahd->qoutfifonext];
if (completion->valid_tag != ahd->qoutfifonext_valid_tag) break;
/************************* Interrupt Handling *********************************/ staticvoid
ahd_handle_hwerrint(struct ahd_softc *ahd)
{ /* * Some catastrophic hardware error has occurred. * Print it for the user and disable the controller.
*/ int i; int error;
error = ahd_inb(ahd, ERROR); for (i = 0; i < num_errors; i++) { if ((error & ahd_hard_errors[i].errno) != 0)
printk("%s: hwerrint, %s\n",
ahd_name(ahd), ahd_hard_errors[i].errmesg);
}
ahd_dump_card_state(ahd);
panic("BRKADRINT");
/* Tell everyone that this HBA is no longer available */
ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
CAM_NO_HBA);
/* Tell the system that this controller has gone away. */
ahd_free(ahd);
}
#ifdef AHD_DEBUG staticvoid
ahd_dump_sglist(struct scb *scb)
{ int i;
if (scb->sg_count > 0) { if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg_list;
sg_list = (struct ahd_dma64_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) {
uint64_t addr;
/* * Save the sequencer interrupt code and clear the SEQINT * bit. We will unpause the sequencer, if appropriate, * after servicing the request.
*/
seqintcode = ahd_inb(ahd, SEQINTCODE);
ahd_outb(ahd, CLRINT, CLRSEQINT); if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { /* * Unpause the sequencer and let it clear * SEQINT by writing NO_SEQINT to it. This * will cause the sequencer to be paused again, * which is the expected state of this routine.
*/
ahd_unpause(ahd); while (!ahd_is_paused(ahd))
;
ahd_outb(ahd, CLRINT, CLRSEQINT);
}
ahd_update_modes(ahd); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0)
printk("%s: Handle Seqint Called for code %d\n",
ahd_name(ahd), seqintcode); #endif switch (seqintcode) { case ENTERING_NONPACK:
{ struct scb *scb;
u_int scbid;
AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
scbid = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { /* * Somehow need to know if this * is from a selection or reselection. * From that, we can determine target * ID so we at least have an I_T nexus.
*/
} else {
ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
ahd_outb(ahd, SAVED_LUN, scb->hscb->lun);
ahd_outb(ahd, SEQ_FLAGS, 0x0);
} if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0
&& (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * Phase change after read stream with * CRC error with P0 asserted on last * packet.
*/ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
printk("%s: Assuming LQIPHASE_NLQ with " "P0 assertion\n", ahd_name(ahd)); #endif
} #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
printk("%s: Entering NONPACK\n", ahd_name(ahd)); #endif break;
} case INVALID_SEQINT:
printk("%s: Invalid Sequencer interrupt occurred, " "resetting channel.\n",
ahd_name(ahd)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
ahd_dump_card_state(ahd); #endif
ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; case STATUS_OVERRUN:
{ struct scb *scb;
u_int scbid;
switch (bus_phase) { case P_DATAOUT: case P_DATAIN: case P_DATAOUT_DT: case P_DATAIN_DT: case P_MESGOUT: case P_STATUS: case P_MESGIN:
ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
printk("%s: Issued Bus Reset.\n", ahd_name(ahd)); break; case P_COMMAND:
{ struct ahd_devinfo devinfo; struct scb *scb;
u_int scbid;
/* * If a target takes us into the command phase * assume that it has been externally reset and * has thus lost our previous packetized negotiation * agreement. Since we have not sent an identify * message and may not have fully qualified the * connection, we change our command to TUR, assert * ATN and ABORT the task when we go to message in * phase. The OSM will see the REQUEUE_REQUEST * status and retry the command.
*/
scbid = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) {
printk("Invalid phase with no valid SCB. " "Resetting bus.\n");
ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break;
}
ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
SCB_GET_TARGET(ahd, scb),
SCB_GET_LUN(scb),
SCB_GET_CHANNEL(ahd, scb),
ROLE_INITIATOR);
ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
AHD_TRANS_ACTIVE, /*paused*/TRUE);
ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0,
AHD_TRANS_ACTIVE, /*paused*/TRUE); /* Hand-craft TUR command */
ahd_outb(ahd, SCB_CDB_STORE, 0);
ahd_outb(ahd, SCB_CDB_STORE+1, 0);
ahd_outb(ahd, SCB_CDB_STORE+2, 0);
ahd_outb(ahd, SCB_CDB_STORE+3, 0);
ahd_outb(ahd, SCB_CDB_STORE+4, 0);
ahd_outb(ahd, SCB_CDB_STORE+5, 0);
ahd_outb(ahd, SCB_CDB_LEN, 6);
scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
scb->hscb->control |= MK_MESSAGE;
ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
ahd_outb(ahd, MSG_OUT, HOST_MSG);
ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); /* * The lun is 0, regardless of the SCB's lun * as we have not sent an identify message.
*/
ahd_outb(ahd, SAVED_LUN, 0);
ahd_outb(ahd, SEQ_FLAGS, 0);
ahd_assert_atn(ahd);
scb->flags &= ~SCB_PACKETIZED;
scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET;
ahd_freeze_devq(ahd, scb);
ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
ahd_freeze_scb(scb);
/* * Allow the sequencer to continue with * non-pack processing.
*/
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
ahd_outb(ahd, CLRLQOINT1, 0);
} #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
ahd_print_path(ahd, scb);
printk("Unexpected command phase from " "packetized target\n");
} #endif break;
}
} break;
} case CFG4OVERRUN:
{ struct scb *scb;
u_int scb_index;
#ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
ahd_inb(ahd, MODE_PTR));
} #endif
scb_index = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { /* * Attempt to transfer to an SCB that is * not outstanding.
*/
ahd_assert_atn(ahd);
ahd_outb(ahd, MSG_OUT, HOST_MSG);
ahd->msgout_buf[0] = ABORT_TASK;
ahd->msgout_len = 1;
ahd->msgout_index = 0;
ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; /* * Clear status received flag to prevent any * attempt to complete this bogus SCB.
*/
ahd_outb(ahd, SCB_CONTROL,
ahd_inb_scbram(ahd, SCB_CONTROL)
& ~STATUS_RCVD);
} break;
} case DUMP_CARD_STATE:
{
ahd_dump_card_state(ahd); break;
} case PDATA_REINIT:
{ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
printk("%s: PDATA_REINIT - DFCNTRL = 0x%x " "SG_CACHE_SHADOW = 0x%x\n",
ahd_name(ahd), ahd_inb(ahd, DFCNTRL),
ahd_inb(ahd, SG_CACHE_SHADOW));
} #endif
ahd_reinitialize_dataptrs(ahd); break;
} case HOST_MSG_LOOP:
{ struct ahd_devinfo devinfo;
/* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop.
*/
ahd_fetch_devinfo(ahd, &devinfo); if (ahd->msg_type == MSG_TYPE_NONE) { struct scb *scb;
u_int scb_index;
u_int bus_phase;
bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN
&& bus_phase != P_MESGOUT) {
printk("ahd_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message.
*/
ahd_dump_card_state(ahd);
ahd_clear_intstat(ahd);
ahd_restart(ahd); return;
}
lastphase = ahd_inb(ahd, LASTPHASE);
printk("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n",
ahd_name(ahd), 'A',
SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
lastphase, ahd_inb(ahd, SCSISIGI));
ahd_restart(ahd); return;
} case DATA_OVERRUN:
{ /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was.
*/ struct scb *scb;
u_int scbindex; #ifdef AHD_DEBUG
u_int lastphase; #endif
/* * Set this and it will take effect when the * target does a command complete.
*/
ahd_freeze_devq(ahd, scb);
ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
ahd_freeze_scb(scb); break;
} case MKMSG_FAILED:
{ struct ahd_devinfo devinfo; struct scb *scb;
u_int scbid;
ahd_fetch_devinfo(ahd, &devinfo);
printk("%s:%c:%d:%d: Attempt to issue message failed\n",
ahd_name(ahd), devinfo.channel, devinfo.target,
devinfo.lun);
scbid = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL
&& (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO.
*/
ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
SCB_GET_CHANNEL(ahd, scb),
SCB_GET_LUN(scb), SCB_GET_TAG(scb),
ROLE_INITIATOR, /*status*/0,
SEARCH_REMOVE);
ahd_outb(ahd, SCB_CONTROL,
ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); break;
} case TASKMGMT_FUNC_COMPLETE:
{
u_int scbid; struct scb *scb;
switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK:
tag = SCB_GET_TAG(scb);
fallthrough; case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET:
lun = scb->hscb->lun;
error = CAM_REQ_ABORTED;
ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', lun, tag, ROLE_INITIATOR,
error); break; case SIU_TASKMGMT_LUN_RESET:
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.20Bemerkung:
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.