/* * drivers/dma/fsl_raid.c * * Freescale RAID Engine device driver * * Author: * Harninder Rai <harninder.rai@freescale.com> * Naveen Burmi <naveenburmi@freescale.com> * * Rewrite: * Xuelin Shi <xuelin.shi@freescale.com> * * Copyright (c) 2010-2014 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Theory of operation: * * General capabilities: * RAID Engine (RE) block is capable of offloading XOR, memcpy and P/Q * calculations required in RAID5 and RAID6 operations. RE driver * registers with Linux's ASYNC layer as dma driver. RE hardware * maintains strict ordering of the requests through chained * command queueing. * * Data flow: * Software RAID layer of Linux (MD layer) maintains RAID partitions, * strips, stripes etc. It sends requests to the underlying ASYNC layer * which further passes it to RE driver. ASYNC layer decides which request * goes to which job ring of RE hardware. For every request processed by * RAID Engine, driver gets an interrupt unless coalescing is set. The * per job ring interrupt handler checks the status register for errors, * clears the interrupt and leave the post interrupt processing to the irq * thread.
*/ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/dmaengine.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/slab.h>
/* Per Job Ring interrupt handler */ static irqreturn_t fsl_re_isr(int irq, void *data)
{ struct fsl_re_chan *re_chan;
u32 irqstate, status;
re_chan = dev_get_drvdata((struct device *)data);
irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status); if (!irqstate) return IRQ_NONE;
/* * There's no way in upper layer (read MD layer) to recover from * error conditions except restart everything. In long term we * need to do something more than just crashing
*/ if (irqstate & FSL_RE_ERROR) {
status = in_be32(&re_chan->jrregs->jr_status);
dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n",
irqstate, status);
}
spin_lock_irqsave(&re_chan->desc_lock, lock_flag); if (!list_empty(&re_chan->free_q)) { /* take one desc from free_q */
desc = list_first_entry(&re_chan->free_q, struct fsl_re_desc, node);
list_del(&desc->node);
if (scf) { /* compute q = src0*coef0^src1*coef1^..., * is GF(8) mult */ for (i = 0; i < save_src_cnt; i++)
xor->gfm[i] = scf[i]; if (cont_q)
xor->gfm[i++] = 1;
} else { /* compute P, that is XOR all srcs */ for (i = 0; i < src_cnt; i++)
xor->gfm[i] = 1;
}
/* Filling frame 0 of compound frame descriptor with CDB */
cf = desc->cf_addr;
fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0);
/* Fill CFD's 1st frame with dest buffer */
fill_cfd_frame(cf, 1, len, dest, 0);
/* Fill CFD's rest of the frames with source buffers */ for (i = 2, j = 0; j < save_src_cnt; i++, j++)
fill_cfd_frame(cf, i, len, src[j], 0);
if (cont_q)
fill_cfd_frame(cf, i++, len, dest, 0);
/* Setting the final bit in the last source buffer frame in CFD */
cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
return &desc->async_tx;
}
/* * Prep function for P parity calculation.In RAID Engine terminology, * XOR calculation is called GenQ calculation done through GenQ command
*/ staticstruct dma_async_tx_descriptor *fsl_re_prep_dma_xor( struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsignedint src_cnt, size_t len, unsignedlong flags)
{ /* NULL let genq take all coef as 1 */ return fsl_re_prep_dma_genq(chan, dest, src, src_cnt, NULL, len, flags);
}
/* * Prep function for P/Q parity calculation.In RAID Engine terminology, * P/Q calculation is called GenQQ done through GenQQ command
*/ staticstruct dma_async_tx_descriptor *fsl_re_prep_dma_pq( struct dma_chan *chan, dma_addr_t *dest, dma_addr_t *src, unsignedint src_cnt, constunsignedchar *scf, size_t len, unsignedlong flags)
{ struct fsl_re_chan *re_chan; struct fsl_re_desc *desc; struct fsl_re_pq_cdb *pq; struct fsl_re_cmpnd_frame *cf;
u32 cdb;
u8 *p; int gfmq_len, i, j; unsignedint save_src_cnt = src_cnt;
re_chan = container_of(chan, struct fsl_re_chan, chan); if (len > FSL_RE_MAX_DATA_LEN) {
dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n",
len, FSL_RE_MAX_DATA_LEN); return NULL;
}
/* * RE requires at least 2 sources, if given only one source, we pass the * second source same as the first one. * With only one source, generating P is meaningless, only generate Q.
*/ if (src_cnt == 1) { struct dma_async_tx_descriptor *tx;
dma_addr_t dma_src[2]; unsignedchar coef[2];
/* * During RAID6 array creation, Linux's MD layer gets P and Q * calculated separately in two steps. But our RAID Engine has * the capability to calculate both P and Q with a single command * Hence to merge well with MD layer, we need to provide a hook * here and call re_jq_prep_dma_genq() function
*/
/* Setting the final bit in the last source buffer frame in CFD */
cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
return &desc->async_tx;
}
/* * Prep function for memcpy. In RAID Engine, memcpy is done through MOVE * command. Logic of this function will need to be modified once multipage * support is added in Linux's MD/ASYNC Layer
*/ staticstruct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsignedlong flags)
{ struct fsl_re_chan *re_chan; struct fsl_re_desc *desc;
size_t length; struct fsl_re_cmpnd_frame *cf; struct fsl_re_move_cdb *move;
u32 cdb;
dev = &ofdev->dev;
re_priv = dev_get_drvdata(dev);
dma_dev = &re_priv->dma_dev;
chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM;
/* create platform device for chan node */
chan_ofdev = of_platform_device_create(np, NULL, dev); if (!chan_ofdev) {
dev_err(dev, "Not able to create ofdev for jr %d\n", q);
ret = -EINVAL; goto err_free;
}
/* read reg property from dts */
rc = of_property_read_u32(np, "reg", &ptr); if (rc) {
dev_err(dev, "Reg property not found in jr %d\n", q);
ret = -ENODEV; goto err_free;
}
chan->jrregs = (struct fsl_re_chan_cfg *)((u8 *)re_priv->re_regs +
off + ptr);
/* read irq property from dts */
chan->irq = irq_of_parse_and_map(np, 0); if (!chan->irq) {
dev_err(dev, "No IRQ defined for JR %d\n", q);
ret = -ENODEV; goto err_free;
}
ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev); if (ret) {
dev_err(dev, "Unable to register interrupt for JR %d\n", q);
ret = -EINVAL; goto err_free;
}
chan->inb_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
GFP_KERNEL, &chan->inb_phys_addr); if (!chan->inb_ring_virt_addr) {
dev_err(dev, "No dma memory for inb_ring_virt_addr\n");
ret = -ENOMEM; goto err_free;
}
chan->oub_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
GFP_KERNEL, &chan->oub_phys_addr); if (!chan->oub_ring_virt_addr) {
dev_err(dev, "No dma memory for oub_ring_virt_addr\n");
ret = -ENOMEM; goto err_free_1;
}
/* Program the Inbound/Outbound ring base addresses and size */
out_be32(&chan->jrregs->inbring_base_h,
chan->inb_phys_addr & FSL_RE_ADDR_BIT_MASK);
out_be32(&chan->jrregs->oubring_base_h,
chan->oub_phys_addr & FSL_RE_ADDR_BIT_MASK);
out_be32(&chan->jrregs->inbring_base_l,
chan->inb_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
out_be32(&chan->jrregs->oubring_base_l,
chan->oub_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
out_be32(&chan->jrregs->inbring_size,
FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
out_be32(&chan->jrregs->oubring_size,
FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
/* Read LIODN value from u-boot */
status = in_be32(&chan->jrregs->jr_config_1) & FSL_RE_REG_LIODN_MASK;
/* Program the CFG reg */
out_be32(&chan->jrregs->jr_config_1,
FSL_RE_CFG1_CBSI | FSL_RE_CFG1_CBS0 | status);
if (!re_priv->cf_desc_pool) {
dev_err(dev, "No memory for fsl re_cf desc pool\n"); return -ENOMEM;
}
re_priv->hw_desc_pool = dmam_pool_create("fsl_re_hw_desc_pool", dev, sizeof(struct fsl_re_hw_desc) * FSL_RE_RING_SIZE,
FSL_RE_FRAME_ALIGN, 0); if (!re_priv->hw_desc_pool) {
dev_err(dev, "No memory for fsl re_hw desc pool\n"); return -ENOMEM;
}
dev_set_drvdata(dev, re_priv);
/* Parse Device tree to find out the total number of JQs present */
for_each_compatible_node(np, NULL, "fsl,raideng-v1.0-job-queue") {
rc = of_property_read_u32(np, "reg", &off); if (rc) {
dev_err(dev, "Reg property not found in JQ node\n");
of_node_put(np); return -ENODEV;
} /* Find out the Job Rings present under each JQ */
for_each_child_of_node(np, child) {
rc = of_device_is_compatible(child, "fsl,raideng-v1.0-job-ring"); if (rc) {
fsl_re_chan_probe(ofdev, child, ridx++, off);
re_priv->total_chans++;
}
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.