// SPDX-License-Identifier: GPL-2.0-only /* * sata_mv.c - Marvell SATA support * * Copyright 2008-2009: Marvell Corporation, all rights reserved. * Copyright 2005: EMC Corporation, all rights reserved. * Copyright 2005 Red Hat, Inc. All rights reserved. * * Originally written by Brett Russ. * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. * * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
*/
/* * sata_mv TODO list: * * --> Develop a low-power-consumption strategy, and implement it. * * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. * * --> [Experiment, Marvell value added] Is it possible to use target * mode to cross-connect two Linux boxes with Marvell cards? If so, * creating LibATA target mode support would be very interesting. * * Target mode, for those without docs, is the ability to directly * connect two SATA ports.
*/
/* * 80x1-B2 errata PCI#11: * * Users of the 6041/6081 Rev.B2 chips (current is C0) * should be careful to insert those cards only onto PCI-X bus #0, * and only in device slots 0..7, not higher. The chips may not * work correctly otherwise (note: this is a pretty rare condition).
*/
/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
MAX_COAL_IO_COUNT = 255, /* completed I/O count */
MV_PCI_REG_BASE = 0,
/* * Per-chip ("all ports") interrupt coalescing feature. * This is only for GEN_II / GEN_IIE hardware. * * Coalescing defers the interrupt until either the IO_THRESHOLD * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
*/
COAL_REG_BASE = 0x18000,
IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
HC_IRQ_CAUSE = 0x14,
DMA_IRQ = (1 << 0), /* shift by port # */
HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
DEV_IRQ = (1 << 8), /* shift by port # */
/* * Per-HC (Host-Controller) interrupt coalescing feature. * This is present on all chip generations. * * Coalescing defers the interrupt until either the IO_THRESHOLD * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
*/
HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
SOC_LED_CTRL = 0x2c,
SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ /* with dev activity LED */
/* * We keep a local cache of a few frequently accessed port * registers here, to avoid having to read them (very slow) * when switching between EDMA and non-EDMA modes.
*/ struct mv_cached_regs {
u32 fiscfg;
u32 ltmode;
u32 haltcond;
u32 unknown_rsvd;
};
/* * Needed on some devices that require their clocks to be enabled. * These are optional: if the platform device does not have any * clocks, they won't be used. Also, if the underlying hardware * does not support the common clock framework (CONFIG_HAVE_CLK=n), * all the clock operations become no-ops (see clk.h).
*/ struct clk *clk; struct clk **port_clks; /* * Some devices have a SATA PHY which can be enabled/disabled * in order to save power. These are optional: if the platform * devices does not have any phy, they won't be used.
*/ struct phy **port_phys; /* * These consistent DMA memory pools give us guaranteed * alignment for hardware-accessed data structures, * and less memory waste in accomplishing the alignment.
*/ struct dma_pool *crqb_pool; struct dma_pool *crpb_pool; struct dma_pool *sg_tbl_pool;
};
staticinlineunsignedint mv_hc_from_port(unsignedint port)
{ return port >> MV_PORT_HC_SHIFT;
}
staticinlineunsignedint mv_hardport_from_port(unsignedint port)
{ return port & MV_PORT_MASK;
}
/* * Consolidate some rather tricky bit shift calculations. * This is hot-path stuff, so not a function. * Simple code, with two return values, so macro rather than inline. * * port is the sole input, in range 0..7. * shift is one output, for use with main_irq_cause / main_irq_mask registers. * hardport is the other output, in range 0..3. * * Note that port and hardport may be the same variable in some cases.
*/ #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
{ \
shift = mv_hc_from_port(port) * HC_SHIFT; \
hardport = mv_hardport_from_port(port); \
shift += hardport * 2; \
}
/** * mv_save_cached_regs - (re-)initialize cached port registers * @ap: the port whose registers we are caching * * Initialize the local cache of port registers, * so that reading them over and over again can * be avoided on the hotter paths of this driver. * This saves a few microseconds each time we switch * to/from EDMA mode to perform (eg.) a drive cache flush.
*/ staticvoid mv_save_cached_regs(struct ata_port *ap)
{ void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data;
/** * mv_write_cached_reg - write to a cached port register * @addr: hardware address of the register * @old: pointer to cached value of the register * @new: new value for the register * * Write a new value to a cached register, * but only if the value is different from before.
*/ staticinlinevoid mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
{ if (new != *old) { unsignedlong laddr;
*old = new; /* * Workaround for 88SX60x1-B2 FEr SATA#13: * Read-after-write is needed to prevent generating 64-bit * write cycles on the PCI bus for SATA interface registers * at offsets ending in 0x4 or 0xc. * * Looks like a lot of fuss, but it avoids an unnecessary * +1 usec read-after-write delay for unaffected registers.
*/
laddr = (unsignedlong)addr & 0xffff; if (laddr >= 0x300 && laddr <= 0x33c) {
laddr &= 0x000f; if (laddr == 0x4 || laddr == 0xc) {
writelfl(new, addr); /* read after write */ return;
}
}
writel(new, addr); /* unaffected by the errata */
}
}
staticvoid mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
{ /* * When writing to the main_irq_mask in hardware, * we must ensure exclusivity between the interrupt coalescing bits * and the corresponding individual port DONE_IRQ bits. * * Note that this register is really an "IRQ enable" register, * not an "IRQ mask" register as Marvell's naming might suggest.
*/ if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
mask &= ~DONE_IRQ_0_3; if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
mask &= ~DONE_IRQ_4_7;
writelfl(mask, hpriv->main_irq_mask_addr);
}
/* * mv_start_edma - Enable eDMA engine * @pp: port private data * * Verify the local cache of the eDMA state is accurate with a * WARN_ON. * * LOCKING: * Inherited from caller.
*/ staticvoid mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, struct mv_port_priv *pp, u8 protocol)
{ int want_ncq = (protocol == ATA_PROT_NCQ);
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); if (want_ncq != using_ncq)
mv_stop_edma(ap);
} if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { struct mv_host_priv *hpriv = ap->host->private_data;
/* * Wait for the EDMA engine to finish transactions in progress. * No idea what a good "timeout" value might be, but measurements * indicate that it often requires hundreds of microseconds * with two drives in-use. So we use the 15msec value above * as a rough guess at what even more drives might require.
*/ for (i = 0; i < timeout; ++i) {
u32 edma_stat = readl(port_mmio + EDMA_STATUS); if ((edma_stat & empty_idle) == empty_idle) break;
udelay(per_loop);
} /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
}
/** * mv_stop_edma_engine - Disable eDMA engine * @port_mmio: io base address * * LOCKING: * Inherited from caller.
*/ staticint mv_stop_edma_engine(void __iomem *port_mmio)
{ int i;
/* Disable eDMA. The disable bit auto clears. */
writelfl(EDMA_DS, port_mmio + EDMA_CMD);
/* Wait for the chip to confirm eDMA is off. */ for (i = 10000; i > 0; i--) {
u32 reg = readl(port_mmio + EDMA_CMD); if (!(reg & EDMA_EN)) return 0;
udelay(10);
} return -EIO;
}
for (b = 0; b < bytes; ) { for (w = 0, o = 0; b < bytes && w < 4; w++) {
o += scnprintf(linebuf + o, sizeof(linebuf) - o, "%08x ", readl(start + b));
b += sizeof(u32);
}
dev_dbg(dev, "%s: %p: %s\n",
__func__, start + b, linebuf);
}
}
for (b = 0; b < bytes; ) { for (w = 0, o = 0; b < bytes && w < 4; w++) {
(void) pci_read_config_dword(pdev, b, &dw);
o += snprintf(linebuf + o, sizeof(linebuf) - o, "%08x ", dw);
b += sizeof(u32);
}
dev_dbg(&pdev->dev, "%s: %02x: %s\n",
__func__, b, linebuf);
}
}
switch (sc_reg_in) { case SCR_STATUS: case SCR_CONTROL: case SCR_ERROR:
ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); break; case SCR_ACTIVE:
ofs = SATA_ACTIVE; /* active is not with the others */ break; default:
ofs = 0xffffffffU; break;
} return ofs;
}
if (ofs != 0xffffffffU) { void __iomem *addr = mv_ap_base(link->ap) + ofs; struct mv_host_priv *hpriv = link->ap->host->private_data; if (sc_reg_in == SCR_CONTROL) { /* * Workaround for 88SX60x1 FEr SATA#26: * * COMRESETs have to take care not to accidentally * put the drive to sleep when writing SCR_CONTROL. * Setting bits 12..15 prevents this problem. * * So if we see an outbound COMMRESET, set those bits. * Ditto for the followup write that clears the reset. * * The proprietary driver does this for * all chip versions, and so do we.
*/ if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
val |= 0xf000;
if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) { void __iomem *lp_phy_addr =
mv_ap_base(link->ap) + LP_PHY_CTL; /* * Set PHY speed according to SControl speed.
*/
u32 lp_phy_val =
LP_PHY_CTL_PIN_PU_PLL |
LP_PHY_CTL_PIN_PU_RX |
LP_PHY_CTL_PIN_PU_TX;
staticvoid mv6_dev_config(struct ata_device *adev)
{ /* * Deal with Gen-II ("mv6") hardware quirks/restrictions: * * Gen-II does not support NCQ over a port multiplier * (no FIS-based switching).
*/ if (adev->flags & ATA_DFLAG_NCQ) { if (sata_pmp_attached(adev->link->ap)) {
adev->flags &= ~ATA_DFLAG_NCQ;
ata_dev_info(adev, "NCQ disabled for command-based switching\n");
}
}
}
/* * Don't allow new commands if we're in a delayed EH state * for NCQ and/or FIS-based switching.
*/ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) return ATA_DEFER_PORT;
/* PIO commands need exclusive link: no other commands [DMA or PIO] * can run concurrently. * set excl_link when we want to send a PIO command in DMA mode * or a non-NCQ command in NCQ mode. * When we receive a command from that link, and there are no * outstanding commands, mark a flag to clear excl_link and let * the command go through.
*/ if (unlikely(ap->excl_link)) { if (link == ap->excl_link) { if (ap->nr_active_links) return ATA_DEFER_PORT;
qc->flags |= ATA_QCFLAG_CLEAR_EXCL; return 0;
} else return ATA_DEFER_PORT;
}
/* * If the port is completely idle, then allow the new qc.
*/ if (ap->nr_active_links == 0) return 0;
/* * The port is operating in host queuing mode (EDMA) with NCQ * enabled, allow multiple NCQ commands. EDMA also allows * queueing multiple DMA commands but libata core currently * doesn't allow it.
*/ if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { if (ata_is_ncq(qc->tf.protocol)) return 0; else {
ap->excl_link = link; return ATA_DEFER_PORT;
}
}
return ATA_DEFER_PORT;
}
staticvoid mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
{ struct mv_port_priv *pp = ap->private_data; void __iomem *port_mmio;
/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
old = readl(hpriv->base + GPIO_PORT_CTL); if (want_ncq) new = old | (1 << 22); else new = old & ~(1 << 22); if (new != old)
writel(new, hpriv->base + GPIO_PORT_CTL);
}
/* * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma * @ap: Port being initialized * * There are two DMA modes on these chips: basic DMA, and EDMA. * * Bit-0 of the "EDMA RESERVED" register enables/disables use * of basic DMA on the GEN_IIE versions of the chips. * * This bit survives EDMA resets, and must be set for basic DMA * to function, and should be cleared when EDMA is active.
*/ staticvoid mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
{ struct mv_port_priv *pp = ap->private_data;
u32 new, *old = &pp->cached.unknown_rsvd;
if (enable_bmdma) new = *old | 1; else new = *old & ~1;
mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
}
/* * SOC chips have an issue whereby the HDD LEDs don't always blink * during I/O when NCQ is enabled. Enabling a special "LED blink" mode * of the SOC takes care of it, generating a steady blink rate when * any drive on the chip is active. * * Unfortunately, the blink mode is a global hardware setting for the SOC, * so we must use it whenever at least one port on the SOC has NCQ enabled. * * We turn "LED blink" off when NCQ is not in use anywhere, because the normal * LED operation works then, and provides better (more accurate) feedback. * * Note that this code assumes that an SOC never has more than one HC onboard.
*/ staticvoid mv_soc_led_blink_enable(struct ata_port *ap)
{ struct ata_host *host = ap->host; struct mv_host_priv *hpriv = host->private_data; void __iomem *hc_mmio;
u32 led_ctrl;
if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) return;
/* disable led-blink only if no ports are using NCQ */ for (port = 0; port < hpriv->n_ports; port++) { struct ata_port *this_ap = host->ports[port]; struct mv_port_priv *pp = this_ap->private_data;
} elseif (IS_GEN_IIE(hpriv)) { int want_fbs = sata_pmp_attached(ap); /* * Possible future enhancement: * * The chip can use FBS with non-NCQ, if we allow it, * But first we need to have the error handling in place * for this mode (datasheet section 7.3.15.4.2.3). * So disallow non-NCQ FBS for now.
*/
want_fbs &= want_ncq;
if (pp->crqb) {
dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
pp->crqb = NULL;
} if (pp->crpb) {
dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
pp->crpb = NULL;
} /* * For GEN_I, there's no NCQ, so we have only a single sg_tbl. * For later hardware, we have one unique sg_tbl per NCQ tag.
*/ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { if (pp->sg_tbl[tag]) { if (tag == 0 || !IS_GEN_I(hpriv))
dma_pool_free(hpriv->sg_tbl_pool,
pp->sg_tbl[tag],
pp->sg_tbl_dma[tag]);
pp->sg_tbl[tag] = NULL;
}
}
}
/** * mv_port_start - Port specific init/start routine. * @ap: ATA channel to manipulate * * Allocate and point to DMA memory, init port private memory, * zero indices. * * LOCKING: * Inherited from caller.
*/ staticint mv_port_start(struct ata_port *ap)
{ struct device *dev = ap->host->dev; struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp; unsignedlong flags; int tag;
pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); if (!pp->crqb) return -ENOMEM;
pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); if (!pp->crpb) goto out_port_free_dma_mem;
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
ap->flags |= ATA_FLAG_AN; /* * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. * For later hardware, we need one unique sg_tbl per NCQ tag.
*/ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { if (tag == 0 || !IS_GEN_I(hpriv)) {
pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
GFP_KERNEL, &pp->sg_tbl_dma[tag]); if (!pp->sg_tbl[tag]) goto out_port_free_dma_mem;
} else {
pp->sg_tbl[tag] = pp->sg_tbl[0];
pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
}
}
/** * mv_port_stop - Port specific cleanup/stop routine. * @ap: ATA channel to manipulate * * Stop DMA, cleanup port memory. * * LOCKING: * This routine uses the host lock to protect the DMA stop.
*/ staticvoid mv_port_stop(struct ata_port *ap)
{ unsignedlong flags;
/** * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries * @qc: queued command whose SG list to source from * * Populate the SG list and mark the last entry. * * LOCKING: * Inherited from caller.
*/ staticvoid mv_fill_sg(struct ata_queued_cmd *qc)
{ struct mv_port_priv *pp = qc->ap->private_data; struct scatterlist *sg; struct mv_sg *mv_sg, *last_sg = NULL; unsignedint si;
/** * mv_sff_irq_clear - Clear hardware interrupt after DMA. * @ap: Port associated with this ATA transaction. * * We need this only for ATAPI bmdma transactions, * as otherwise we experience spurious interrupts * after libata-sff handles the bmdma interrupts.
*/ staticvoid mv_sff_irq_clear(struct ata_port *ap)
{
mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
}
/** * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. * @qc: queued command to check for chipset/DMA compatibility. * * The bmdma engines cannot handle speculative data sizes * (bytecount under/over flow). So only allow DMA for * data transfer commands with known data sizes. * * LOCKING: * Inherited from caller.
*/ staticint mv_check_atapi_dma(struct ata_queued_cmd *qc)
{ struct scsi_cmnd *scmd = qc->scsicmd;
if (scmd) { switch (scmd->cmnd[0]) { case READ_6: case READ_10: case READ_12: case WRITE_6: case WRITE_10: case WRITE_12: case GPCMD_READ_CD: case GPCMD_SEND_DVD_STRUCTURE: case GPCMD_SEND_CUE_SHEET: return 0; /* DMA is safe */
}
} return -EOPNOTSUPP; /* use PIO instead */
}
/** * mv_bmdma_stop_ap - Stop BMDMA transfer * @ap: port to stop * * Clears the ATA_DMA_START flag in the bmdma control register * * LOCKING: * Inherited from caller.
*/ staticvoid mv_bmdma_stop_ap(struct ata_port *ap)
{ void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd;
/** * mv_bmdma_status - Read BMDMA status * @ap: port for which to retrieve DMA status. * * Read and return equivalent of the sff BMDMA status register. * * LOCKING: * Inherited from caller.
*/ static u8 mv_bmdma_status(struct ata_port *ap)
{ void __iomem *port_mmio = mv_ap_base(ap);
u32 reg, status;
/* * Other bits are valid only if ATA_DMA_ACTIVE==0, * and the ATA_DMA_INTR bit doesn't exist.
*/
reg = readl(port_mmio + BMDMA_STATUS); if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE; elseif (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; else { /* * Just because DMA_ACTIVE is 0 (DMA completed), * this does _not_ mean the device is "done". * So we should not yet be signalling ATA_DMA_INTR * in some cases. Eg. DSM/TRIM, and perhaps others.
*/
mv_bmdma_stop_ap(ap); if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
status = 0; else
status = ATA_DMA_INTR;
} return status;
}
staticvoid mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
{ struct ata_taskfile *tf = &qc->tf; /* * Workaround for 88SX60x1 FEr SATA#24. * * Chip may corrupt WRITEs if multi_count >= 4kB. * Note that READs are unaffected. * * It's not clear if this errata really means "4K bytes", * or if it always happens for multi_count > 7 * regardless of device sector_size. * * So, for safety, any write with multi_count > 7 * gets converted here into a regular PIO write instead:
*/ if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { if (qc->dev->multi_count > 7) { switch (tf->command) { case ATA_CMD_WRITE_MULTI:
tf->command = ATA_CMD_PIO_WRITE; break; case ATA_CMD_WRITE_MULTI_FUA_EXT:
tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
fallthrough; case ATA_CMD_WRITE_MULTI_EXT:
tf->command = ATA_CMD_PIO_WRITE_EXT; break;
}
}
}
}
/** * mv_qc_prep - Host specific command preparation. * @qc: queued command to prepare * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it handles prep of the CRQB * (command request block), does some sanity checking, and calls * the SG load routine. * * LOCKING: * Inherited from caller.
*/ staticenum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
{ struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data;
__le16 *cw; struct ata_taskfile *tf = &qc->tf;
u16 flags = 0; unsigned in_index;
switch (tf->protocol) { case ATA_PROT_DMA: if (tf->command == ATA_CMD_DSM) return AC_ERR_OK;
fallthrough; case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO:
mv_rw_multi_errata_sata24(qc); return AC_ERR_OK; default: return AC_ERR_OK;
}
/* Fill in command request block
*/ if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
flags |= qc->hw_tag << CRQB_TAG_SHIFT;
flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx;
/* Sadly, the CRQB cannot accommodate all registers--there are * only 11 bytes...so we must pick and choose required * registers based on the command. So, we drop feature and * hob_feature for [RW] DMA commands, but they are needed for * NCQ. NCQ will drop hob_nsect, which is not needed there * (nsect is used only for the tag; feat/hob_feat hold true nsect).
*/ switch (tf->command) { case ATA_CMD_READ: case ATA_CMD_READ_EXT: case ATA_CMD_WRITE: case ATA_CMD_WRITE_EXT: case ATA_CMD_WRITE_FUA_EXT:
mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); break; case ATA_CMD_FPDMA_READ: case ATA_CMD_FPDMA_WRITE:
mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); break; default: /* The only other commands EDMA supports in non-queued and * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none * of which are defined/used by Linux. If we get here, this * driver needs work.
*/
ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
tf->command); return AC_ERR_INVALID;
}
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return AC_ERR_OK;
mv_fill_sg(qc);
return AC_ERR_OK;
}
/** * mv_qc_prep_iie - Host specific command preparation. * @qc: queued command to prepare * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it handles prep of the CRQB * (command request block), does some sanity checking, and calls * the SG load routine. * * LOCKING: * Inherited from caller.
*/ staticenum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
{ struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; struct mv_crqb_iie *crqb; struct ata_taskfile *tf = &qc->tf; unsigned in_index;
u32 flags = 0;
if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ)) return AC_ERR_OK; if (tf->command == ATA_CMD_DSM) return AC_ERR_OK; /* use bmdma for this */
/* Fill in Gen IIE command request block */ if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return AC_ERR_OK;
mv_fill_sg(qc);
return AC_ERR_OK;
}
/** * mv_sff_check_status - fetch device status, if valid * @ap: ATA port to fetch status from * * When using command issue via mv_qc_issue_fis(), * the initial ATA_BUSY state does not show up in the * ATA status (shadow) register. This can confuse libata! * * So we have a hook here to fake ATA_BUSY for that situation, * until the first time a BUSY, DRQ, or ERR bit is seen. * * The rest of the time, it simply returns the ATA status register.
*/ static u8 mv_sff_check_status(struct ata_port *ap)
{
u8 stat = ioread8(ap->ioaddr.status_addr); struct mv_port_priv *pp = ap->private_data;
if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; else
stat = ATA_BUSY;
} return stat;
}
/** * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register * @ap: ATA port to send a FIS * @fis: fis to be sent * @nwords: number of 32-bit words in the fis
*/ staticunsignedint mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
{ void __iomem *port_mmio = mv_ap_base(ap);
u32 ifctl, old_ifctl, ifstat; int i, timeout = 200, final_word = nwords - 1;
/* Send all words of the FIS except for the final word */ for (i = 0; i < final_word; ++i)
writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
/* Flag end-of-transmission, and then send the final word */
writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
/* * Wait for FIS transmission to complete. * This typically takes just a single iteration.
*/ do {
ifstat = readl(port_mmio + SATA_IFSTAT);
} while (!(ifstat & 0x1000) && --timeout);
/* Restore original port configuration */
writelfl(old_ifctl, port_mmio + SATA_IFCTL);
/* See if it worked */ if ((ifstat & 0x3000) != 0x1000) {
ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
__func__, ifstat); return AC_ERR_OTHER;
} return 0;
}
/** * mv_qc_issue_fis - Issue a command directly as a FIS * @qc: queued command to start * * Note that the ATA shadow registers are not updated * after command issue, so the device will appear "READY" * if polled, even while it is BUSY processing the command. * * So we use a status hook to fake ATA_BUSY until the drive changes state. *
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.28 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.