/* Loop over each preallocated buffer slot. */ #define fec_for_each_prealloc_buffer(__i) \ for (__i = 0; __i < DM_VERITY_FEC_BUF_PREALLOC; __i++)
/* Loop over each extra buffer slot. */ #define fec_for_each_extra_buffer(io, __i) \ for (__i = DM_VERITY_FEC_BUF_PREALLOC; __i < DM_VERITY_FEC_BUF_MAX; __i++)
/* Loop over each allocated buffer. */ #define fec_for_each_buffer(io, __i) \ for (__i = 0; __i < (io)->nbufs; __i++)
/* Loop over each RS block in each allocated buffer. */ #define fec_for_each_buffer_rs_block(io, __i, __j) \
fec_for_each_buffer(io, __i) \ for (__j = 0; __j < 1 << DM_VERITY_FEC_BUF_RS_BITS; __j++)
/* * Return a pointer to the current RS block when called inside * fec_for_each_buffer_rs_block.
*/ staticinline u8 *fec_buffer_rs_block(struct dm_verity *v, struct dm_verity_fec_io *fio, unsignedint i, unsignedint j)
{ return &fio->bufs[i][j * v->fec->rsn];
}
/* * Return an index to the current RS block when called inside * fec_for_each_buffer_rs_block.
*/ staticinlineunsignedint fec_buffer_rs_index(unsignedint i, unsignedint j)
{ return (i << DM_VERITY_FEC_BUF_RS_BITS) + j;
}
/* * Decode all RS blocks from buffers and copy corrected bytes into fio->output * starting from block_offset.
*/ staticint fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io, struct dm_verity_fec_io *fio, u64 rsb, int byte_index, unsignedint block_offset, int neras)
{ int r, corrected = 0, res; struct dm_buffer *buf; unsignedint n, i, j, offset, par_buf_offset = 0;
uint16_t par_buf[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
u8 *par, *block; struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
par = fec_read_parity(v, rsb, block_offset, &offset,
par_buf_offset, &buf, bio->bi_ioprio); if (IS_ERR(par)) return PTR_ERR(par);
/* * Decode the RS blocks we have in bufs. Each RS block results in * one corrected target byte and consumes fec->roots parity bytes.
*/
fec_for_each_buffer_rs_block(fio, n, i) {
block = fec_buffer_rs_block(v, fio, n, i); for (j = 0; j < v->fec->roots - par_buf_offset; j++)
par_buf[par_buf_offset + j] = par[offset + j]; /* Decode an RS block using Reed-Solomon */
res = decode_rs8(fio->rs, block, par_buf, v->fec->rsn,
NULL, neras, fio->erasures, 0, NULL); if (res < 0) {
r = res; goto error;
}
/* * Read data blocks that are part of the RS block and deinterleave as much as * fits into buffers. Check for erasure locations if @neras is non-NULL.
*/ staticint fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
u64 rsb, u64 target, unsignedint block_offset, int *neras)
{ bool is_zero; int i, j, target_index = -1; struct dm_buffer *buf; struct dm_bufio_client *bufio; struct dm_verity_fec_io *fio = fec_io(io);
u64 block, ileaved;
u8 *bbuf, *rs_block;
u8 want_digest[HASH_MAX_DIGESTSIZE]; unsignedint n, k; struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
if (neras)
*neras = 0;
if (WARN_ON(v->digest_size > sizeof(want_digest))) return -EINVAL;
/* * read each of the rsn data blocks that are part of the RS block, and * interleave contents to available bufs
*/ for (i = 0; i < v->fec->rsn; i++) {
ileaved = fec_interleave(v, rsb * v->fec->rsn + i);
/* * target is the data block we want to correct, target_index is * the index of this block within the rsn RS blocks
*/ if (ileaved == target)
target_index = i;
/* assume the block is corrupted */ if (neras && *neras <= v->fec->roots)
fio->erasures[(*neras)++] = i;
continue;
}
/* locate erasures if the block is on the data device */ if (bufio == v->fec->data_bufio &&
verity_hash_for_block(v, io, block, want_digest,
&is_zero) == 0) { /* skip known zero blocks entirely */ if (is_zero) goto done;
/* * skip if we have already found the theoretical * maximum number (i.e. fec->roots) of erasures
*/ if (neras && *neras <= v->fec->roots &&
fec_is_erasure(v, io, want_digest, bbuf))
fio->erasures[(*neras)++] = i;
}
/* * deinterleave and copy the bytes that fit into bufs, * starting from block_offset
*/
fec_for_each_buffer_rs_block(fio, n, j) {
k = fec_buffer_rs_index(n, j) + block_offset;
/* * Allocate RS control structure and FEC buffers from preallocated mempools, * and attempt to allocate as many extra buffers as available.
*/ staticint fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{ unsignedint n;
if (!fio->rs)
fio->rs = mempool_alloc(&v->fec->rs_pool, GFP_NOIO);
fec_for_each_prealloc_buffer(n) { if (fio->bufs[n]) continue;
fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOWAIT); if (unlikely(!fio->bufs[n])) {
DMERR("failed to allocate FEC buffer"); return -ENOMEM;
}
}
/* try to allocate the maximum number of buffers */
fec_for_each_extra_buffer(fio, n) { if (fio->bufs[n]) continue;
fio->bufs[n] = mempool_alloc(&v->fec->extra_pool, GFP_NOWAIT); /* we can manage with even one buffer if necessary */ if (unlikely(!fio->bufs[n])) break;
}
fio->nbufs = n;
if (!fio->output)
fio->output = mempool_alloc(&v->fec->output_pool, GFP_NOIO);
return 0;
}
/* * Initialize buffers and clear erasures. fec_read_bufs() assumes buffers are * zeroed before deinterleaving.
*/ staticvoid fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{ unsignedint n;
/* * Decode all RS blocks in a single data block and return the target block * (indicated by @offset) in fio->output. If @use_erasures is non-zero, uses * hashes to locate erasures.
*/ staticint fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io, struct dm_verity_fec_io *fio, u64 rsb, u64 offset, bool use_erasures)
{ int r, neras = 0; unsignedint pos;
r = fec_alloc_bufs(v, fio); if (unlikely(r < 0)) return r;
/* * For RS(M, N), the continuous FEC data is divided into blocks of N * bytes. Since block size may not be divisible by N, the last block * is zero padded when decoding. * * Each byte of the block is covered by a different RS(M, N) code, * and each code is interleaved over N blocks to make it less likely * that bursty corruption will leave us in unrecoverable state.
*/
/* * The base RS block we can feed to the interleaver to find out all * blocks required for decoding.
*/
rsb = offset - res * (v->fec->rounds << v->data_dev_block_bits);
/* * Locating erasures is slow, so attempt to recover the block without * them first. Do a second attempt with erasures if the corruption is * bad enough.
*/
r = fec_decode_rsb(v, io, fio, rsb, offset, false); if (r < 0) {
r = fec_decode_rsb(v, io, fio, rsb, offset, true); if (r < 0) goto done;
}
/* * Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr.
*/ int verity_fec_ctr_alloc(struct dm_verity *v)
{ struct dm_verity_fec *f;
f = kzalloc(sizeof(struct dm_verity_fec), GFP_KERNEL); if (!f) {
v->ti->error = "Cannot allocate FEC structure"; return -ENOMEM;
}
v->fec = f;
return 0;
}
/* * Validate arguments and preallocate memory. Must be called after arguments * have been parsed using verity_fec_parse_opt_args.
*/ int verity_fec_ctr(struct dm_verity *v)
{ struct dm_verity_fec *f = v->fec; struct dm_target *ti = v->ti;
u64 hash_blocks, fec_blocks; int ret;
if (!verity_fec_is_enabled(v)) {
verity_fec_dtr(v); return 0;
}
/* * FEC is computed over data blocks, possible metadata, and * hash blocks. In other words, FEC covers total of fec_blocks * blocks consisting of the following: * * data blocks | hash blocks | metadata (optional) * * We allow metadata after hash blocks to support a use case * where all data is stored on the same device and FEC covers * the entire area. * * If metadata is included, we require it to be available on the * hash device after the hash blocks.
*/
hash_blocks = v->hash_blocks - v->hash_start;
/* * Require matching block sizes for data and hash devices for * simplicity.
*/ if (v->data_dev_block_bits != v->hash_dev_block_bits) {
ti->error = "Block sizes must match to use FEC"; return -EINVAL;
}
f->rounds = f->blocks; if (sector_div(f->rounds, f->rsn))
f->rounds++;
/* * Due to optional metadata, f->blocks can be larger than * data_blocks and hash_blocks combined.
*/ if (f->blocks < v->data_blocks + hash_blocks || !f->rounds) {
ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS; return -EINVAL;
}
/* * Metadata is accessed through the hash device, so we require * it to be large enough.
*/
f->hash_blocks = f->blocks - v->data_blocks; if (dm_bufio_get_device_size(v->bufio) < f->hash_blocks) {
ti->error = "Hash device is too small for "
DM_VERITY_OPT_FEC_BLOCKS; return -E2BIG;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.