/* * struct pq_scribble_page - space to hold throwaway P or Q buffer for * synchronous gen_syndrome
*/ staticstruct page *pq_scribble_page;
/* the struct page *blocks[] parameter passed to async_gen_syndrome() * and async_syndrome_val() contains the 'P' destination address at * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] * * note: these are macros as they are used as lvalues
*/ #define P(b, d) (b[d-2]) #define Q(b, d) (b[d-1])
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); /* if we are submitting additional pqs, leave the chain open, * clear the callback parameters, and leave the destination * buffers mapped
*/ if (src_cnt > pq_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig; if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
} if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
/* Drivers force forward progress in case they can not provide * a descriptor
*/ for (;;) {
dma_dest[0] = unmap->addr[disks - 2];
dma_dest[1] = unmap->addr[disks - 1];
tx = dma->device_prep_dma_pq(chan, dma_dest,
&unmap->addr[src_off],
pq_src_cnt,
&scfs[src_off], unmap->len,
dma_flags); if (likely(tx)) break;
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
for (i = 0; i < disks; i++) { if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
srcs[i] = raid6_get_zero_page();
} else {
srcs[i] = page_address(blocks[i]) + offsets[i];
if (i < disks - 2) {
stop = i; if (start == -1)
start = i;
}
}
} if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
BUG_ON(!raid6_call.xor_syndrome); if (start >= 0)
raid6_call.xor_syndrome(disks, start, stop, len, srcs);
} else
raid6_call.gen_syndrome(disks, len, srcs);
async_tx_sync_epilog(submit);
}
staticinlinebool
is_dma_pq_aligned_offs(struct dma_device *dev, unsignedint *offs, int src_cnt, size_t len)
{ int i;
for (i = 0; i < src_cnt; i++) { if (!is_dma_pq_aligned(dev, offs[i], 0, len)) returnfalse;
} returntrue;
}
/** * async_gen_syndrome - asynchronously calculate a raid6 syndrome * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 * @offsets: offset array into each block (src and dest) to start transaction * @disks: number of blocks (including missing P or Q, see below) * @len: length of operation in bytes * @submit: submission/completion modifiers * * General note: This routine assumes a field of GF(2^8) with a * primitive polynomial of 0x11d and a generator of {02}. * * 'disks' note: callers can optionally omit either P or Q (but not * both) from the calculation by setting blocks[disks-2] or * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= * PAGE_SIZE as a temporary buffer of this size is used in the * synchronous path. 'disks' always accounts for both destination * buffers. If any source buffers (blocks[i] where i < disks - 2) are * set to NULL those buffers will be replaced with the raid6_zero_page * in the synchronous path and omitted in the hardware-asynchronous * path.
*/ struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsignedint *offsets, int disks,
size_t len, struct async_submit_ctl *submit)
{ int src_cnt = disks - 2; struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&P(blocks, disks), 2,
blocks, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dmaengine_unmap_data *unmap = NULL;
/** * async_syndrome_val - asynchronously validate a raid6 syndrome * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 * @offsets: common offset into each block (src and dest) to start transaction * @disks: number of blocks (including missing P or Q, see below) * @len: length of operation in bytes * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set * @spare: temporary result buffer for the synchronous case * @s_off: spare buffer page offset * @submit: submission / completion modifiers * * The same notes from async_gen_syndrome apply to the 'blocks', * and 'disks' parameters of this routine. The synchronous path * requires a temporary result buffer and submit->scribble to be * specified.
*/ struct dma_async_tx_descriptor *
async_syndrome_val(struct page **blocks, unsignedint *offsets, int disks,
size_t len, enum sum_check_flags *pqres, struct page *spare, unsignedint s_off, struct async_submit_ctl *submit)
{ struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx; unsignedchar coefs[MAX_DISKS]; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(disks < 4 || disks > MAX_DISKS);
if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned_offs(device, offsets, disks, len)) { struct device *dev = device->dev;
dma_addr_t pq[2]; int i, j = 0, src_cnt = 0;
/* caller must provide a temporary result buffer and * allow the input parameters to be preserved
*/
BUG_ON(!spare || !scribble);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
/* recompute p and/or q into the temporary buffer and then * check to see the result matches the current value
*/
tx = NULL;
*pqres = 0; if (p_src) {
init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
NULL, NULL, scribble);
tx = async_xor_offs(spare, s_off,
blocks, offsets, disks-2, len, submit);
async_tx_quiesce(&tx);
p = page_address(p_src) + p_off;
s = page_address(spare) + s_off;
*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.