/* check if we can map the pages contiguously: */
__bio_for_each_segment(bv, bio, iter, start) { if (iter.bi_size != start.bi_size &&
bv.bv_offset) goto bounce;
enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type);
mempool_t *workspace_pool = &c->compress_workspace[opt]; if (unlikely(!mempool_initialized(workspace_pool))) { if (fsck_err(c, compression_type_not_marked_in_sb, "compression type %s set but not marked in superblock",
__bch2_compression_types[crc.compression_type]))
ret = bch2_check_set_has_compressed_data(c, opt); else
ret = bch_err_throw(c, compression_workspace_not_initialized); if (ret) goto err;
}
src_data = bio_map_or_bounce(c, src, READ);
switch (crc.compression_type) { case BCH_COMPRESSION_TYPE_lz4_old: case BCH_COMPRESSION_TYPE_lz4:
ret2 = LZ4_decompress_safe_partial(src_data.b, dst_data,
src_len, dst_len, dst_len); if (ret2 != dst_len)
ret = bch_err_throw(c, decompress_lz4); break; case BCH_COMPRESSION_TYPE_gzip: {
z_stream strm = {
.next_in = src_data.b,
.avail_in = src_len,
.next_out = dst_data,
.avail_out = dst_len,
};
if (ret2 != dst_len)
ret = bch_err_throw(c, decompress_zstd); break;
} default:
BUG();
}
err:
fsck_err:
bio_unmap_or_unbounce(c, src_data); return ret;
}
int bch2_bio_uncompress_inplace(struct bch_write_op *op, struct bio *bio)
{ struct bch_fs *c = op->c; struct bch_extent_crc_unpacked *crc = &op->crc; struct bbuf data = { NULL };
size_t dst_len = crc->uncompressed_size << 9; int ret = 0;
/* bio must own its pages: */
BUG_ON(!bio->bi_vcnt);
BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs);
if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max) {
bch2_write_op_error(op, op->pos.offset, "extent too big to decompress (%u > %u)",
crc->uncompressed_size << 9, c->opts.encoded_extent_max); return bch_err_throw(c, decompress_exceeded_max_encoded_extent);
}
data = __bounce_alloc(c, dst_len, WRITE);
ret = __bio_uncompress(c, bio, data.b, *crc);
if (c->opts.no_data_io)
ret = 0;
if (ret) {
bch2_write_op_error(op, op->pos.offset, "%s", bch2_err_str(ret)); goto err;
}
/* * XXX: don't have a good way to assert that the bio was allocated with * enough space, we depend on bch2_move_extent doing the right thing
*/
bio->bi_iter.bi_size = crc->live_size << 9;
switch (compression_type) { case BCH_COMPRESSION_TYPE_lz4: if (compression.level < LZ4HC_MIN_CLEVEL) { int len = src_len; int ret = LZ4_compress_destSize(
src, dst,
&len, dst_len,
workspace); if (len < src_len) return -len;
return ret;
} else { int ret = LZ4_compress_HC(
src, dst,
src_len, dst_len,
compression.level,
workspace);
if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END) return 0;
if (zlib_deflateEnd(&strm) != Z_OK) return 0;
return strm.total_out;
} case BCH_COMPRESSION_TYPE_zstd: { /* * rescale: * zstd max compression level is 22, our max level is 15
*/ unsigned level = min((compression.level * 3) / 2, zstd_max_clevel());
ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max);
ZSTD_CCtx *ctx = zstd_init_cctx(workspace, c->zstd_workspace_size);
/* * ZSTD requires that when we decompress we pass in the exact * compressed size - rounding it up to the nearest sector * doesn't work, so we use the first 4 bytes of the buffer for * that. * * Additionally, the ZSTD code seems to have a bug where it will * write just past the end of the buffer - so subtract a fudge * factor (7 bytes) from the dst buffer size to account for * that.
*/
size_t len = zstd_compress_cctx(ctx,
dst + 4, dst_len - 4 - 7,
src, src_len,
¶ms); if (zstd_is_error(len)) return 0;
mempool_t *workspace_pool = &c->compress_workspace[compression.type]; if (unlikely(!mempool_initialized(workspace_pool))) { if (fsck_err(c, compression_opt_not_marked_in_sb, "compression opt %s set but not marked in superblock",
bch2_compression_opts[compression.type])) {
ret = bch2_check_set_has_compressed_data(c, compression.type); if (ret) /* memory allocation failure, don't compress */ return 0;
} else { return 0;
}
}
/* If it's only one block, don't bother trying to compress: */ if (src->bi_iter.bi_size <= c->opts.block_size) return BCH_COMPRESSION_TYPE_incompressible;
/* * XXX: this algorithm sucks when the compression code doesn't tell us * how much would fit, like LZ4 does:
*/ while (1) { if (*src_len <= block_bytes(c)) {
ret = -1; break;
}
ret = attempt_compress(c, workspace,
dst_data.b, *dst_len,
src_data.b, *src_len,
compression); if (ret > 0) {
*dst_len = ret;
ret = 0; break;
}
/* Didn't fit: should we retry with a smaller amount? */ if (*src_len <= *dst_len) {
ret = -1; break;
}
/* * If ret is negative, it's a hint as to how much data would fit
*/
BUG_ON(-ret >= *src_len);
for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++)
mempool_exit(&c->compress_workspace[i]);
mempool_exit(&c->compression_bounce[WRITE]);
mempool_exit(&c->compression_bounce[READ]);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.