// SPDX-License-Identifier: GPL-2.0-or-later /* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <wolf@cologne.de> * Stefan Esser <se@mi.Uni-Koeln.de> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> * *-----------------------------------------------------------------------------
*/
#include <linux/slab.h> #include <asm/param.h> /* for timeouts in units of HZ */
/* * Print a buffer in hexadecimal format with a ".\n" at end.
*/ staticvoid sym_printl_hex(u_char *p, int n)
{ while (n-- > 0)
printf (" %x", *p++);
printf (".\n");
}
/* * Print something that tells about extended errors.
*/ void sym_print_xerr(struct scsi_cmnd *cmd, int x_status)
{ if (x_status & XE_PARITY_ERR) {
sym_print_addr(cmd, "unrecovered SCSI parity error.\n");
} if (x_status & XE_EXTRA_DATA) {
sym_print_addr(cmd, "extraneous data discarded.\n");
} if (x_status & XE_BAD_PHASE) {
sym_print_addr(cmd, "illegal scsi phase (4/5).\n");
} if (x_status & XE_SODL_UNRUN) {
sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n");
} if (x_status & XE_SWIDE_OVRUN) {
sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n");
}
}
/* * Return a string for SCSI BUS mode.
*/ staticchar *sym_scsi_bus_mode(int mode)
{ switch(mode) { case SMODE_HVD: return"HVD"; case SMODE_SE: return"SE"; case SMODE_LVD: return"LVD";
} return"??";
}
/* * Soft reset the chip. * * Raising SRST when the chip is running may cause * problems on dual function chips (see below). * On the other hand, LVD devices need some delay * to settle and report actual BUS mode in STEST4.
*/ staticvoid sym_chip_reset (struct sym_hcb *np)
{
OUTB(np, nc_istat, SRST);
INB(np, nc_mbox1);
udelay(10);
OUTB(np, nc_istat, 0);
INB(np, nc_mbox1);
udelay(2000); /* For BUS MODE to settle */
}
/* * Really soft reset the chip.:) * * Some 896 and 876 chip revisions may hang-up if we set * the SRST (soft reset) bit at the wrong time when SCRIPTS * are running. * So, we need to abort the current operation prior to * soft resetting the chip.
*/ staticvoid sym_soft_reset (struct sym_hcb *np)
{
u_char istat = 0; int i;
OUTB(np, nc_istat, CABRT); for (i = 100000 ; i ; --i) {
istat = INB(np, nc_istat); if (istat & SIP) {
INW(np, nc_sist);
} elseif (istat & DIP) { if (INB(np, nc_dstat) & ABRT) break;
}
udelay(5);
}
OUTB(np, nc_istat, 0); if (!i)
printf("%s: unable to abort current chip operation, " "ISTAT=0x%02x.\n", sym_name(np), istat);
do_chip_reset:
sym_chip_reset(np);
}
/* * Start reset process. * * The interrupt handler will reinitialize the chip.
*/ staticvoid sym_start_reset(struct sym_hcb *np)
{
sym_reset_scsi_bus(np, 1);
}
int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int)
{
u32 term; int retv = 0;
sym_soft_reset(np); /* Soft reset the chip */ if (enab_int)
OUTW(np, nc_sien, RST); /* * Enable Tolerant, reset IRQD if present and * properly set IRQ mode, prior to resetting the bus.
*/
OUTB(np, nc_stest3, TE);
OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM));
OUTB(np, nc_scntl1, CRST);
INB(np, nc_mbox1);
udelay(200);
if (!SYM_SETUP_SCSI_BUS_CHECK) goto out; /* * Check for no terminators or SCSI bus shorts to ground. * Read SCSI data bus, data parity bits and control signals. * We are expecting RESET to be TRUE and other signals to be * FALSE.
*/
term = INB(np, nc_sstat0);
term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */
((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */
((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */
INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */
if (!np->maxwide)
term &= 0x3ffff;
if (term != (2<<7)) {
printf("%s: suspicious SCSI data while resetting the BUS.\n",
sym_name(np));
printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n",
sym_name(np),
(np->features & FE_WIDE) ? "dp1,d15-8," : "",
(u_long)term, (u_long)(2<<7)); if (SYM_SETUP_SCSI_BUS_CHECK == 1)
retv = 1;
}
out:
OUTB(np, nc_scntl1, 0); return retv;
}
/* * Select SCSI clock frequency
*/ staticvoid sym_selectclock(struct sym_hcb *np, u_char scntl3)
{ /* * If multiplier not present or not selected, leave here.
*/ if (np->multiplier <= 1) {
OUTB(np, nc_scntl3, scntl3); return;
}
if (sym_verbose >= 2)
printf ("%s: enabling clock multiplier\n", sym_name(np));
OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */ /* * Wait for the LCKFRQ bit to be set if supported by the chip. * Otherwise wait 50 micro-seconds (at least).
*/ if (np->features & FE_LCKFRQ) { int i = 20; while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0)
udelay(20); if (!i)
printf("%s: the chip cannot lock the frequency\n",
sym_name(np));
} else {
INB(np, nc_mbox1);
udelay(50+10);
}
OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */
OUTB(np, nc_scntl3, scntl3);
OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */
}
/* * Determine the chip's clock frequency. * * This is essential for the negotiation of the synchronous * transfer rate. * * Note: we have to return the correct value. * THERE IS NO SAFE DEFAULT VALUE. * * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. * 53C860 and 53C875 rev. 1 support fast20 transfers but * do not have a clock doubler and so are provided with a * 80 MHz clock. All other fast20 boards incorporate a doubler * and so should be delivered with a 40 MHz clock. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base * clock and provide a clock quadrupler (160 Mhz).
*/
/* * calculate SCSI clock frequency (in KHz)
*/ staticunsigned getfreq (struct sym_hcb *np, int gen)
{ unsignedint ms = 0; unsignedint f;
/* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned).
*/
OUTW(np, nc_sien, 0); /* mask all scsi interrupts */
INW(np, nc_sist); /* clear pending scsi interrupt */
OUTB(np, nc_dien, 0); /* mask all dma interrupts */
INW(np, nc_sist); /* another one, just to be sure :) */ /* * The C1010-33 core does not report GEN in SIST, * if this interrupt is masked in SIEN. * I don't know yet if the C1010-66 behaves the same way.
*/ if (np->features & FE_C10) {
OUTW(np, nc_sien, GEN);
OUTB(np, nc_istat1, SIRQD);
}
OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */
OUTB(np, nc_stime1, 0); /* disable general purpose timer */
OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ while (!(INW(np, nc_sist) & GEN) && ms++ < 100000)
udelay(1000/4); /* count in 1/4 of ms */
OUTB(np, nc_stime1, 0); /* disable general purpose timer */ /* * Undo C1010-33 specific settings.
*/ if (np->features & FE_C10) {
OUTW(np, nc_sien, 0);
OUTB(np, nc_istat1, 0);
} /* * set prescaler to divide by whatever 0 means * 0 ought to choose divide by 2, but appears * to set divide by 3.5 mode in my 53c810 ...
*/
OUTB(np, nc_scntl3, 0);
/* * adjust for prescaler, and convert into KHz
*/
f = ms ? ((1 << gen) * (4340*4)) / ms : 0;
/* * The C1010-33 result is biased by a factor * of 2/3 compared to earlier chips.
*/ if (np->features & FE_C10)
f = (f * 2) / 3;
np->multiplier = 1;
f1 = 40000; /* * True with 875/895/896/895A with clock multiplier selected
*/ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (sym_verbose >= 2)
printf ("%s: clock multiplier found\n", sym_name(np));
np->multiplier = mult;
}
/* * If multiplier not found or scntl3 not 7,5,3, * reset chip and get frequency from general purpose timer. * Otherwise trust scntl3 BIOS setting.
*/ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */
f1 = sym_getfreq (np);
if (sym_verbose)
printf ("%s: chip clock is %uKHz\n", sym_name(np), f1);
if (f1 < 45000) f1 = 40000; elseif (f1 < 55000) f1 = 50000; else f1 = 80000;
if (f1 < 80000 && mult > 1) { if (sym_verbose >= 2)
printf ("%s: clock multiplier assumed\n",
sym_name(np));
np->multiplier = mult;
}
} else { if ((scntl3 & 7) == 3) f1 = 40000; elseif ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000;
/* * Get/probe PCI clock frequency
*/ staticint sym_getpciclock (struct sym_hcb *np)
{ int f = 0;
/* * For now, we only need to know about the actual * PCI BUS clock frequency for C1010-66 chips.
*/ #if 1 if (np->features & FE_66MHZ) { #else if (1) { #endif
OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
f = sym_getfreq(np);
OUTB(np, nc_stest1, 0);
}
np->pciclk_khz = f;
return f;
}
/* * SYMBIOS chip clock divisor table. * * Divisors are multiplied by 10,000,000 in order to make * calculations more simple.
*/ #define _5M 5000000 staticconst u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
/* * Get clock factor and sync divisor for a given * synchronous factor period.
*/ staticint
sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp)
{
u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */
u32 fak; /* Sync factor in sxfer */
u32 per; /* Period in tenths of ns */
u32 kpc; /* (per * clk) */ int ret;
/* * Compute the synchronous period in tenths of nano-seconds
*/ if (dt && sfac <= 9) per = 125; elseif (sfac <= 10) per = 250; elseif (sfac == 11) per = 303; elseif (sfac == 12) per = 500; else per = 40 * sfac;
ret = per;
kpc = per * clk; if (dt)
kpc <<= 1;
/* * For earliest C10 revision 0, we cannot use extra * clocks for the setting of the SCSI clocking. * Note that this limits the lowest sync data transfer * to 5 Mega-transfers per second and may result in * using higher clock divisors.
*/ #if 1 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { /* * Look for the lowest clock divisor that allows an * output speed not faster than the period.
*/ while (div > 0) {
--div; if (kpc > (div_10M[div] << 2)) {
++div; break;
}
}
fak = 0; /* No extra clocks */ if (div == np->clock_divn) { /* Are we too fast ? */
ret = -1;
}
*divp = div;
*fakp = fak; return ret;
} #endif
/* * Look for the greatest clock divisor that allows an * input speed faster than the period.
*/ while (--div > 0) if (kpc >= (div_10M[div] << 2)) break;
/* * Calculate the lowest clock factor that allows an output * speed not faster than the period, and the max output speed. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT.
*/ if (dt) {
fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */
} else {
fak = (kpc - 1) / div_10M[div] + 1 - 4; /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */
}
/* * Check against our hardware limits, or bugs :).
*/ if (fak > 2) {
fak = 2;
ret = -1;
}
/* * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, * 128 transfers. All chips support at least 16 transfers * bursts. The 825A, 875 and 895 chips support bursts of up * to 128 transfers and the 895A and 896 support bursts of up * to 64 transfers. All other chips support up to 16 * transfers bursts. * * For PCI 32 bit data transfers each transfer is a DWORD. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. * * We use log base 2 (burst length) as internal code, with * value 0 meaning "burst disabled".
*/
/* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. * We cannot reset the chip prior to reading the * IO registers, since informations will be lost. * Since the SCRIPTS processor may be running, this * is not safe on paper, but it seems to work quite * well. :)
*/ staticvoid sym_save_initial_setting (struct sym_hcb *np)
{
np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a;
np->sv_scntl3 = INB(np, nc_scntl3) & 0x07;
np->sv_dmode = INB(np, nc_dmode) & 0xce;
np->sv_dcntl = INB(np, nc_dcntl) & 0xa8;
np->sv_ctest3 = INB(np, nc_ctest3) & 0x01;
np->sv_ctest4 = INB(np, nc_ctest4) & 0x80;
np->sv_gpcntl = INB(np, nc_gpcntl);
np->sv_stest1 = INB(np, nc_stest1);
np->sv_stest2 = INB(np, nc_stest2) & 0x20;
np->sv_stest4 = INB(np, nc_stest4); if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */
np->sv_scntl4 = INB(np, nc_scntl4);
np->sv_ctest5 = INB(np, nc_ctest5) & 0x04;
} else
np->sv_ctest5 = INB(np, nc_ctest5) & 0x24;
}
/* * Set SCSI BUS mode. * - LVD capable chips (895/895A/896/1010) report the current BUS mode * through the STEST4 IO register. * - For previous generation chips (825/825A/875), the user has to tell us * how to check against HVD, since a 100% safe algorithm is not possible.
*/ staticvoid sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram)
{ if (np->scsi_mode) return;
/* * Prepare io register values used by sym_start_up() * according to selected and supported features.
*/ staticint sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram)
{ struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev;
u_char burst_max;
u32 period; int i;
np->maxwide = (np->features & FE_WIDE) ? 1 : 0;
/* * Guess the frequency of the chip's clock.
*/ if (np->features & (FE_ULTRA3 | FE_ULTRA2))
np->clock_khz = 160000; elseif (np->features & FE_ULTRA)
np->clock_khz = 80000; else
np->clock_khz = 40000;
/* * Get the clock multiplier factor.
*/ if (np->features & FE_QUAD)
np->multiplier = 4; elseif (np->features & FE_DBLR)
np->multiplier = 2; else
np->multiplier = 1;
/* * Measure SCSI clock frequency for chips * it may vary from assumed one.
*/ if (np->features & FE_VARCLK)
sym_getclock(np, np->multiplier);
/* * Divisor to be used for async (timer pre-scaler).
*/
i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) {
++i; break;
}
}
np->rv_scntl3 = i+1;
/* * The C1010 uses hardwired divisors for async. * So, we just throw away, the async. divisor.:-)
*/ if (np->features & FE_C10)
np->rv_scntl3 = 0;
/* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds.
*/
period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
/* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
*/ if (np->minsync < 25 &&
!(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
np->minsync = 25; elseif (np->minsync < 12 &&
!(np->features & (FE_ULTRA2|FE_ULTRA3)))
np->minsync = 12;
/* * Maximum synchronous period factor supported by the chip.
*/
period = div64_ul(11 * div_10M[np->clock_divn - 1], 4 * np->clock_khz);
np->maxsync = period > 2540 ? 254 : period / 10;
/* * If chip is a C1010, guess the sync limits in DT mode.
*/ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { if (np->clock_khz == 160000) {
np->minsync_dt = 9;
np->maxsync_dt = 50;
np->maxoffs_dt = nvram->type ? 62 : 31;
}
}
/* * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. * This chip and the 860 Rev 1 may wrongly use PCI cache line * based transactions on LOAD/STORE instructions. So we have * to prevent these chips from using such PCI transactions in * this driver. The generic ncr driver that does not use * LOAD/STORE instructions does not need this work-around.
*/ if ((pdev->device == PCI_DEVICE_ID_NCR_53C810 &&
pdev->revision >= 0x10 && pdev->revision <= 0x11) ||
(pdev->device == PCI_DEVICE_ID_NCR_53C860 &&
pdev->revision <= 0x1))
np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
/* * Select all supported special features. * If we are using on-board RAM for scripts, prefetch (PFEN) * does not help, but burst op fetch (BOF) does. * Disabling PFEN makes sure BOF will be used.
*/ if (np->features & FE_ERL)
np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF)
np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP)
np->rv_dmode |= ERMP; /* Enable Read Multiple */ #if 1 if ((np->features & FE_PFEN) && !np->ram_ba) #else if (np->features & FE_PFEN) #endif
np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE)
np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE)
np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS)
np->rv_ctest5 |= DFS; /* Dma Fifo Size */
/* * Select some other
*/
np->rv_ctest4 |= MPEE; /* Master parity checking */
np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
/* * Get parity checking, host ID and verbose mode from NVRAM
*/
np->myaddr = 255;
np->scsi_mode = 0;
sym_nvram_setup_host(shost, np, nvram);
/* * Get SCSI addr of host adapter (set by bios?).
*/ if (np->myaddr == 255) {
np->myaddr = INB(np, nc_scid) & 0x07; if (!np->myaddr)
np->myaddr = SYM_SETUP_HOST_ID;
}
/* * Set LED support from SCRIPTS. * Ignore this feature for boards known to use a * specific GPIO wiring and for the 895A, 896 * and 1010 that drive the LED directly.
*/ if ((SYM_SETUP_SCSI_LED ||
(nvram->type == SYM_SYMBIOS_NVRAM ||
(nvram->type == SYM_TEKRAM_NVRAM &&
pdev->device == PCI_DEVICE_ID_NCR_53C895))) &&
!(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
np->features |= FE_LED0;
/* * Set irq mode.
*/ switch(SYM_SETUP_IRQ_MODE & 3) { case 2:
np->rv_dcntl |= IRQM; break; case 1:
np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break;
}
/* * Configure targets according to driver setup. * If NVRAM present get targets setup from NVRAM.
*/ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { struct sym_tcb *tp = &np->target[i];
/* * Test the pci bus snoop logic :-( * * Has to be called with interrupts disabled.
*/ #ifdef CONFIG_SCSI_SYM53C8XX_MMIO staticint sym_regtest(struct sym_hcb *np)
{ registervolatile u32 data; /* * chip registers may NOT be cached. * write 0xffffffff to a read only register area, * and try to read it back.
*/
data = 0xffffffff;
OUTL(np, nc_dstat, data);
data = INL(np, nc_dstat); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif
printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
(unsigned) data); return 0x10;
} return 0;
} #else staticinlineint sym_regtest(struct sym_hcb *np)
{ return 0;
} #endif
staticint sym_snooptest(struct sym_hcb *np)
{
u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; int i, err;
err = sym_regtest(np); if (err) return err;
restart_test: /* * Enable Master Parity Checking as we intend * to enable it for normal operations.
*/
OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE)); /* * init
*/
pc = SCRIPTZ_BA(np, snooptest);
host_wr = 1;
sym_wr = 2; /* * Set memory and register.
*/
np->scratch = cpu_to_scr(host_wr);
OUTL(np, nc_temp, sym_wr); /* * Start script (exchange values)
*/
OUTL(np, nc_dsa, np->hcb_ba);
OUTL_DSP(np, pc); /* * Wait 'til done (with timeout)
*/ for (i=0; i<SYM_SNOOP_TIMEOUT; i++) if (INB(np, nc_istat) & (INTF|SIP|DIP)) break; if (i>=SYM_SNOOP_TIMEOUT) {
printf ("CACHE TEST FAILED: timeout.\n"); return (0x20);
} /* * Check for fatal DMA errors.
*/
dstat = INB(np, nc_dstat); #if 1 /* Band aiding for broken hardwares that fail PCI parity */ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) {
printf ("%s: PCI DATA PARITY ERROR DETECTED - " "DISABLING MASTER DATA PARITY CHECKING.\n",
sym_name(np));
np->rv_ctest4 &= ~MPEE; goto restart_test;
} #endif if (dstat & (MDPE|BF|IID)) {
printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); return (0x80);
} /* * Save termination position.
*/
pc = INL(np, nc_dsp); /* * Read memory and register.
*/
host_rd = scr_to_cpu(np->scratch);
sym_rd = INL(np, nc_scratcha);
sym_bk = INL(np, nc_temp); /* * Check termination position.
*/ if (pc != SCRIPTZ_BA(np, snoopend)+8) {
printf ("CACHE TEST FAILED: script execution failed.\n");
printf ("start=%08lx, pc=%08lx, end=%08lx\n",
(u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc,
(u_long) SCRIPTZ_BA(np, snoopend) +8); return (0x40);
} /* * Show results.
*/ if (host_wr != sym_rd) {
printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n",
(int) host_wr, (int) sym_rd);
err |= 1;
} if (host_rd != sym_wr) {
printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n",
(int) sym_wr, (int) host_rd);
err |= 2;
} if (sym_bk != sym_wr) {
printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n",
(int) sym_wr, (int) sym_bk);
err |= 4;
}
return err;
}
/* * log message for real hard errors * * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc). * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. * * exception register: * ds: dstat * si: sist * * SCSI bus lines: * so: control lines as driven by chip. * si: control lines as seen by chip. * sd: scsi data lines as seen by chip. * * wide/fastmode: * sx: sxfer (see the manual) * s3: scntl3 (see the manual) * s4: scntl4 (see the manual) * * current script command: * dsp: script address (relative to start of script). * dbc: first word of script command. * * First 24 register of the chip: * r0..rf
*/ staticvoid sym_log_hard_error(struct Scsi_Host *shost, u_short sist, u_char dstat)
{ struct sym_hcb *np = sym_get_hcb(shost);
u32 dsp; int script_ofs; int script_size; char *script_name;
u_char *script_base; int i;
/* * Look up the chip table. * * Return a pointer to the chip entry if found, * zero otherwise.
*/ struct sym_chip *
sym_lookup_chip_table (u_short device_id, u_char revision)
{ struct sym_chip *chip; int i;
for (i = 0; i < sym_num_devs; i++) {
chip = &sym_dev_table[i]; if (device_id != chip->device_id) continue; if (revision > chip->revision_id) continue; return chip;
}
return NULL;
}
#if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Lookup the 64 bit DMA segments map. * This is only used if the direct mapping * has been unsuccessful.
*/ int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s)
{ int i;
if (!use_dac(np)) goto weird;
/* Look up existing mappings */ for (i = SYM_DMAP_SIZE-1; i > 0; i--) { if (h == np->dmap_bah[i]) return i;
} /* If direct mapping is free, get it */ if (!np->dmap_bah[s]) gotonew; /* Collision -> lookup free mappings */ for (s = SYM_DMAP_SIZE-1; s > 0; s--) { if (!np->dmap_bah[s]) gotonew;
}
weird:
panic("sym: ran out of 64 bit DMA segment registers"); return -1; new:
np->dmap_bah[s] = h;
np->dmap_dirty = 1; return s;
}
/* * Update IO registers scratch C..R so they will be * in sync. with queued CCB expectations.
*/ staticvoid sym_update_dmap_regs(struct sym_hcb *np)
{ int o, i;
if (!np->dmap_dirty) return;
o = offsetof(struct sym_reg, nc_scrx[0]); for (i = 0; i < SYM_DMAP_SIZE; i++) {
OUTL_OFF(np, o, np->dmap_bah[i]);
o += 4;
}
np->dmap_dirty = 0;
} #endif
/* Enforce all the fiddly SPI rules and the chip limitations */ staticvoid sym_check_goals(struct sym_hcb *np, struct scsi_target *starget, struct sym_trans *goal)
{ if (!spi_support_wide(starget))
goal->width = 0;
/* Some targets fail to properly negotiate DT in SE mode */ if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN))
goal->dt = 0;
if (goal->dt) { /* all DT transfers must be wide */
goal->width = 1; if (goal->offset > np->maxoffs_dt)
goal->offset = np->maxoffs_dt; if (goal->period < np->minsync_dt)
goal->period = np->minsync_dt; if (goal->period > np->maxsync_dt)
goal->period = np->maxsync_dt;
} else {
goal->iu = goal->qas = 0; if (goal->offset > np->maxoffs)
goal->offset = np->maxoffs; if (goal->period < np->minsync)
goal->period = np->minsync; if (goal->period > np->maxsync)
goal->period = np->maxsync;
}
}
/* * Prepare the next negotiation message if needed. * * Fill in the part of message buffer that contains the * negotiation and the nego_status field of the CCB. * Returns the size of the message in bytes.
*/ staticint sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr)
{ struct sym_tcb *tp = &np->target[cp->target]; struct scsi_target *starget = tp->starget; struct sym_trans *goal = &tp->tgoal; int msglen = 0; int nego;
sym_check_goals(np, starget, goal);
/* * Many devices implement PPR in a buggy way, so only use it if we * really want to.
*/ if (goal->renego == NS_PPR || (goal->offset &&
(goal->iu || goal->dt || goal->qas || (goal->period < 0xa)))) {
nego = NS_PPR;
} elseif (goal->renego == NS_WIDE || goal->width) {
nego = NS_WIDE;
} elseif (goal->renego == NS_SYNC || goal->offset) {
nego = NS_SYNC;
} else {
goal->check_nego = 0;
nego = 0;
}
if (nego) {
tp->nego_cp = cp; /* Keep track a nego will be performed */ if (DEBUG_FLAGS & DEBUG_NEGO) {
sym_print_nego_msg(np, cp->target,
nego == NS_SYNC ? "sync msgout" :
nego == NS_WIDE ? "wide msgout" : "ppr msgout", msgptr);
}
}
return msglen;
}
/* * Insert a job into the start queue.
*/ void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp)
{
u_short qidx;
#ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations.
*/ if (np->last_cp && np->iarb_count < np->iarb_max) {
np->last_cp->host_flags |= HF_HINT_IARB;
++np->iarb_count;
} else
np->iarb_count = 0;
np->last_cp = cp; #endif
#if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Make SCRIPTS aware of the 64 bit DMA * segment registers not being up-to-date.
*/ if (np->dmap_dirty)
cp->host_xflags |= HX_DMAP_DIRTY; #endif
/* * Insert first the idle task and then our job. * The MBs should ensure proper ordering.
*/
qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0;
/* * Paranoia, as usual. :-)
*/
assert(!lp->started_tags || !lp->started_no_tag);
/* * Try to start as many commands as asked by caller. * Prevent from having both tagged and untagged * commands queued to the device at the same time.
*/ while (maxn--) {
qp = sym_remque_head(&lp->waiting_ccbq); if (!qp) break;
cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); if (cp->tag != NO_TAG) { if (lp->started_no_tag ||
lp->started_tags >= lp->started_max) {
sym_insque_head(qp, &lp->waiting_ccbq); break;
}
lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba);
lp->head.resel_sa =
cpu_to_scr(SCRIPTA_BA(np, resel_tag));
++lp->started_tags;
} else { if (lp->started_no_tag || lp->started_tags) {
sym_insque_head(qp, &lp->waiting_ccbq); break;
}
lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
lp->head.resel_sa =
cpu_to_scr(SCRIPTA_BA(np, resel_no_tag));
++lp->started_no_tag;
}
cp->started = 1;
sym_insque_tail(qp, &lp->started_ccbq);
sym_put_start_queue(np, cp);
}
} #endif/* SYM_OPT_HANDLE_DEVICE_QUEUEING */
/* * The chip may have completed jobs. Look at the DONE QUEUE. * * On paper, memory read barriers may be needed here to * prevent out of order LOADs by the CPU from having * prefetched stale data prior to DMA having occurred.
*/ staticint sym_wakeup_done (struct sym_hcb *np)
{ struct sym_ccb *cp; int i, n;
u32 dsa;
n = 0;
i = np->dqueueget;
/* MEMORY_READ_BARRIER(); */ while (1) {
dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break;
np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2)
i = 0;
cp = sym_ccb_from_dsa(np, dsa); if (cp) {
MEMORY_READ_BARRIER();
sym_complete_ok (np, cp);
++n;
} else
printf ("%s: bad DSA (%x) in done queue.\n",
sym_name(np), (u_int) dsa);
}
np->dqueueget = i;
return n;
}
/* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped.
*/ staticvoid sym_flush_comp_queue(struct sym_hcb *np, int cam_status)
{
SYM_QUEHEAD *qp; struct sym_ccb *cp;
/* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET.
*/ staticvoid sym_flush_busy_queue (struct sym_hcb *np, int cam_status)
{ /* * Move all active CCBs to the COMP queue * and flush this queue.
*/
sym_que_splice(&np->busy_ccbq, &np->comp_ccbq);
sym_que_init(&np->busy_ccbq);
sym_flush_comp_queue(np, cam_status);
}
/* * Reset chip if asked, otherwise just clear fifos.
*/ if (reason == 1)
sym_soft_reset(np); else {
OUTB(np, nc_stest3, TE|CSF);
OUTONB(np, nc_ctest3, CLF);
}
/* * Clear Start Queue
*/
phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) {
np->squeue[i] = cpu_to_scr(np->idletask_ba);
np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
}
np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
/* * Start at first entry.
*/
np->squeueput = 0;
/* * Clear Done Queue
*/
phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) {
np->dqueue[i] = 0;
np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
}
np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
/* * Start at first entry.
*/
np->dqueueget = 0;
/* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS.
*/
np->fw_patch(shost);
/* * Wakeup all pending jobs.
*/
sym_flush_busy_queue(np, DID_RESET);
/* * Init chip.
*/
OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */
INB(np, nc_mbox1);
udelay(2000); /* The 895 needs time for the bus mode to settle */
OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */
OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
/* * For now, disable AIP generation on C1010-66.
*/ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_66)
OUTB(np, nc_aipcntl1, DISAIP);
/* * C10101 rev. 0 errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :)
*/ if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 &&
pdev->revision < 1)
OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30);
/* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-))
*/ if (pdev->device == PCI_DEVICE_ID_NCR_53C875)
OUTB(np, nc_ctest0, (1<<5)); elseif (pdev->device == PCI_DEVICE_ID_NCR_53C896)
np->rv_ccntl0 |= DPR;
/* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers.
*/ if (np->features & (FE_DAC|FE_NOPM)) {
OUTB(np, nc_ccntl0, np->rv_ccntl0);
OUTB(np, nc_ccntl1, np->rv_ccntl1);
}
#if SYM_CONF_DMA_ADDRESSING_MODE == 2 /* * Set up scratch C and DRS IO registers to map the 32 bit * DMA address range our data structures are located in.
*/ if (use_dac(np)) {
np->dmap_bah[0] = 0; /* ??? */
OUTL(np, nc_scrx[0], np->dmap_bah[0]);
OUTL(np, nc_drs, np->dmap_bah[0]);
} #endif
/* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses.
*/ if (np->features & FE_NOPM) {
OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle));
OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle));
}
/* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control.
*/ if (np->features & FE_LED0)
OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01); elseif (np->features & FE_LEDC)
OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20);
/* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization).
*/ if (np->features & (FE_ULTRA2|FE_ULTRA3)) {
OUTONW(np, nc_sien, SBMC); if (reason == 0) {
INB(np, nc_mbox1);
mdelay(100);
INW(np, nc_sist);
}
np->scsi_mode = INB(np, nc_stest4) & SMODE;
}
/* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode.
*/ for (i=0;i<SYM_CONF_MAX_TARGET;i++) { struct sym_tcb *tp = &np->target[i];
tp->to_reset = 0;
tp->head.sval = 0;
tp->head.wval = np->rv_scntl3;
tp->head.uval = 0; if (tp->lun0p)
tp->lun0p->to_clear = 0; if (tp->lunmp) { int ln;
for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++) if (tp->lunmp[ln])
tp->lunmp[ln]->to_clear = 0;
}
}
/* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. * We do the download preferently from the CPU. * For platforms that may not support PCI memory mapping, * we use simple SCRIPTS that performs MEMORY MOVEs.
*/
phys = SCRIPTA_BA(np, init); if (np->ram_ba) { if (sym_verbose >= 2)
printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np));
memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz); if (np->features & FE_RAM8K) {
memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz);
phys = scr_to_cpu(np->scr_ram_seg);
OUTL(np, nc_mmws, phys);
OUTL(np, nc_mmrs, phys);
OUTL(np, nc_sfs, phys);
phys = SCRIPTB_BA(np, start64);
}
}
np->istat_sem = 0;
OUTL(np, nc_dsa, np->hcb_ba);
OUTL_DSP(np, phys);
/* * Notify the XPT about the RESET condition.
*/ if (reason != 0)
sym_xpt_async_bus_reset(np);
}
/* * Switch trans mode for current job and its target.
*/ staticvoid sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs,
u_char per, u_char wide, u_char div, u_char fak)
{
SYM_QUEHEAD *qp;
u_char sval, wval, uval; struct sym_tcb *tp = &np->target[target];
/* * Stop there if sync parameters are unchanged.
*/ if (tp->head.sval == sval &&
tp->head.wval == wval &&
tp->head.uval == uval) return;
tp->head.sval = sval;
tp->head.wval = wval;
tp->head.uval = uval;
/* * Disable extended Sreq/Sack filtering if per < 50. * Not supported on the C1010.
*/ if (per < 50 && !(np->features & FE_C10))
OUTOFFB(np, nc_stest2, EXT);
/* * set actual value and sync_status
*/
OUTB(np, nc_sxfer, tp->head.sval);
OUTB(np, nc_scntl3, tp->head.wval);
if (np->features & FE_C10) {
OUTB(np, nc_scntl4, tp->head.uval);
}
/* * patch ALL busy ccbs of this target.
*/
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp;
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->target != target) continue;
cp->phys.select.sel_scntl3 = tp->head.wval;
cp->phys.select.sel_sxfer = tp->head.sval; if (np->features & FE_C10) {
cp->phys.select.sel_scntl4 = tp->head.uval;
}
}
}
/* * We received a WDTR. * Let everything be aware of the changes.
*/ staticvoid sym_setwide(struct sym_hcb *np, int target, u_char wide)
{ struct sym_tcb *tp = &np->target[target]; struct scsi_target *starget = tp->starget;
/* * generic recovery from scsi interrupt * * The doc says that when the chip gets an SCSI interrupt, * it tries to stop in an orderly fashion, by completing * an instruction fetch that had started or by flushing * the DMA fifo for a write to memory that was executing. * Such a fashion is not enough to know if the instruction * that was just before the current DSP value has been * executed or not. * * There are some small SCRIPTS sections that deal with * the start queue and the done queue that may break any * assomption from the C code if we are interrupted * inside, so we reset if this happens. Btw, since these * SCRIPTS sections are executed while the SCRIPTS hasn't * started SCSI operations, it is very unlikely to happen. * * All the driver data structures are supposed to be * allocated from the same 4 GB memory window, so there * is a 1 to 1 relationship between DSA and driver data * structures. Since we are careful :) to invalidate the * DSA when we complete a command or when the SCRIPTS * pushes a DSA into a queue, we can trust it when it * points to a CCB.
*/ staticvoid sym_recover_scsi_int (struct sym_hcb *np, u_char hsts)
{
u32 dsp = INL(np, nc_dsp);
u32 dsa = INL(np, nc_dsa); struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
/* * If we haven't been interrupted inside the SCRIPTS * critical pathes, we can safely restart the SCRIPTS * and trust the DSA value if it matches a CCB.
*/ if ((!(dsp > SCRIPTA_BA(np, getjob_begin) &&
dsp < SCRIPTA_BA(np, getjob_end) + 1)) &&
(!(dsp > SCRIPTA_BA(np, ungetjob) &&
dsp < SCRIPTA_BA(np, reselect) + 1)) &&
(!(dsp > SCRIPTB_BA(np, sel_for_abort) &&
dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) &&
(!(dsp > SCRIPTA_BA(np, done) &&
dsp < SCRIPTA_BA(np, done_end) + 1))) {
OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */ /* * If we have a CCB, let the SCRIPTS call us back for * the handling of the error with SCRATCHA filled with * STARTPOS. This way, we will be able to freeze the * device queue and requeue awaiting IOs.
*/ if (cp) {
cp->host_status = hsts;
OUTL_DSP(np, SCRIPTA_BA(np, complete_error));
} /* * Otherwise just restart the SCRIPTS.
*/ else {
OUTL(np, nc_dsa, 0xffffff);
OUTL_DSP(np, SCRIPTA_BA(np, start));
}
} else goto reset_all;
/* * chip exception handler for SCSI bus mode change * * spi2-r12 11.2.3 says a transceiver mode change must * generate a reset event and a device that detects a reset * event shall initiate a hard reset. It says also that a * device that detects a mode change shall set data transfer * mode to eight bit asynchronous, etc... * So, just reinitializing all except chip should be enough.
*/ staticvoid sym_int_sbmc(struct Scsi_Host *shost)
{ struct sym_hcb *np = sym_get_hcb(shost);
u_char scsi_mode = INB(np, nc_stest4) & SMODE;
/* * Notify user.
*/
printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np),
sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode));
/* * Should suspend command processing for a few seconds and * reinitialize all except the chip.
*/
sym_start_up(shost, 2);
}
/* * chip exception handler for SCSI parity error. * * When the chip detects a SCSI parity error and is * currently executing a (CH)MOV instruction, it does * not interrupt immediately, but tries to finish the * transfer of the current scatter entry before * interrupting. The following situations may occur: * * - The complete scatter entry has been transferred * without the device having changed phase. * The chip will then interrupt with the DSP pointing * to the instruction that follows the MOV. * * - A phase mismatch occurs before the MOV finished * and phase errors are to be handled by the C code. * The chip will then interrupt with both PAR and MA * conditions set. * * - A phase mismatch occurs before the MOV finished and * phase errors are to be handled by SCRIPTS. * The chip will load the DSP with the phase mismatch * JUMP address and interrupt the host processor.
*/ staticvoid sym_int_par (struct sym_hcb *np, u_short sist)
{
u_char hsts = INB(np, HS_PRT);
u32 dsp = INL(np, nc_dsp);
u32 dbc = INL(np, nc_dbc);
u32 dsa = INL(np, nc_dsa);
u_char sbcl = INB(np, nc_sbcl);
u_char cmd = dbc >> 24; int phase = cmd & 7; struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
/* * Check that the chip is connected to the SCSI BUS.
*/ if (!(INB(np, nc_scntl1) & ISCON)) {
sym_recover_scsi_int(np, HS_UNEXPECTED); return;
}
/* * If the nexus is not clearly identified, reset the bus. * We will try to do better later.
*/ if (!cp) goto reset_all;
/* * Check instruction was a MOV, direction was INPUT and * ATN is asserted.
*/ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) goto reset_all;
/* * Keep track of the parity error.
*/
OUTONB(np, HF_PRT, HF_EXT_ERR);
cp->xerr_status |= XE_PARITY_ERR;
/* * Prepare the message to send to the device.
*/
np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
/* * If the old phase was DATA IN phase, we have to deal with * the 3 situations described above. * For other input phases (MSG IN and STATUS), the device * must resend the whole thing that failed parity checking * or signal error. So, jumping to dispatcher should be OK.
*/ if (phase == 1 || phase == 5) { /* Phase mismatch handled by SCRIPTS */ if (dsp == SCRIPTB_BA(np, pm_handle))
OUTL_DSP(np, dsp); /* Phase mismatch handled by the C code */ elseif (sist & MA)
sym_int_ma (np); /* No phase mismatch occurred */ else {
sym_set_script_dp (np, cp, dsp);
OUTL_DSP(np, SCRIPTA_BA(np, dispatch));
}
} elseif (phase == 7) /* We definitely cannot handle parity errors */ #if 1 /* in message-in phase due to the relection */ goto reset_all; /* path and various message anticipations. */ #else
OUTL_DSP(np, SCRIPTA_BA(np, clrack)); #endif else
OUTL_DSP(np, SCRIPTA_BA(np, dispatch)); return;
reset_all:
sym_start_reset(np); return;
}
/* * chip exception handler for phase errors. * * We have to construct a new transfer descriptor, * to transfer the rest of the current block.
*/ staticvoid sym_int_ma (struct sym_hcb *np)
{
u32 dbc;
u32 rest;
u32 dsp;
u32 dsa;
u32 nxtdsp;
u32 *vdsp;
u32 oadr, olen;
u32 *tblp;
u32 newcmd;
u_int delta;
u_char cmd;
u_char hflags, hflags0; struct sym_pmc *pm; struct sym_ccb *cp;
/* * Donnot take into account dma fifo and various buffers in * INPUT phase since the chip flushes everything before * raising the MA interrupt for interrupted INPUT phases. * For DATA IN phase, we will check for the SWIDE later.
*/ if ((cmd & 7) != 1 && (cmd & 7) != 5) {
u_char ss0, ss2;
/* * The data in the dma fifo has not been transferred to * the target -> add the amount to the rest * and clear the data. * Check the sstat2 register in case of wide transfer.
*/
rest += delta;
ss0 = INB(np, nc_sstat0); if (ss0 & OLF) rest++; if (!(np->features & FE_C10)) if (ss0 & ORF) rest++; if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
ss2 = INB(np, nc_sstat2); if (ss2 & OLF1) rest++; if (!(np->features & FE_C10)) if (ss2 & ORF1) rest++;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.