/* * Adiantum is a tweakable, length-preserving encryption mode designed for fast * and secure disk encryption, especially on CPUs without dedicated crypto * instructions. Adiantum encrypts each sector using the XChaCha12 stream * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on * NH and Poly1305, and an invocation of the AES-256 block cipher on a single * 16-byte block. See the paper for details: * * Adiantum: length-preserving encryption for entry-level processors * (https://eprint.iacr.org/2018/720.pdf) * * For flexibility, this implementation also allows other ciphers: * * - Stream cipher: XChaCha12 or XChaCha20 * - Block cipher: any with a 128-bit block size and 256-bit key * * This implementation doesn't currently allow other ε-∆U hash functions, i.e. * HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC * but still provably as secure, and also the ε-∆U hash function of HBSH is * formally defined to take two inputs (tweak, message) which makes it difficult * to wrap with the crypto_shash API. Rather, some details need to be handled * here. Nevertheless, if needed in the future, support for other ε-∆U hash * functions could be added here.
*/
/* * Size of right-hand part of input data, in bytes; also the size of the block * cipher's block size and the hash function's output.
*/ #define BLOCKCIPHER_BLOCK_SIZE 16
/* Size of the block cipher key (K_E) in bytes */ #define BLOCKCIPHER_KEY_SIZE 32
/* Size of the hash key (K_H) in bytes */ #define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
/* * The specification allows variable-length tweaks, but Linux's crypto API * currently only allows algorithms to support a single length. The "natural" * tweak length for Adiantum is 16, since that fits into one Poly1305 block for * the best performance. But longer tweaks are useful for fscrypt, to avoid * needing to derive per-file keys. So instead we use two blocks, or 32 bytes.
*/ #define TWEAK_SIZE 32
/* * Buffer for right-hand part of data, i.e. * * P_L => P_M => C_M => C_R when encrypting, or * C_R => C_M => P_M => P_L when decrypting. * * Also used to build the IV for the stream cipher.
*/ union {
u8 bytes[XCHACHA_IV_SIZE];
__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
le128 bignum; /* interpret as element of Z/(2^{128}Z) */
} rbuf;
bool enc; /* true if encrypting, false if decrypting */
/* * The result of the Poly1305 ε-∆U hash function applied to * (bulk length, tweak)
*/
le128 header_hash;
/* Sub-requests, must be last */ union { struct shash_desc hash_desc; struct skcipher_request streamcipher_req;
} u;
};
/* * Given the XChaCha stream key K_S, derive the block cipher key K_E and the * hash key K_H as follows: * * K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191) * * Note that this denotes using bits from the XChaCha keystream, which here we * get indirectly by encrypting a buffer containing all 0's.
*/ staticint adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key, unsignedint keylen)
{ struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct {
u8 iv[XCHACHA_IV_SIZE];
u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE]; struct scatterlist sg; struct crypto_wait wait; struct skcipher_request req; /* must be last */
} *data;
u8 *keyp; int err;
/* Set the stream cipher key (K_S) */
crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(tctx->streamcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen); if (err) return err;
/* * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the * result to rctx->header_hash. This is the calculation * * H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T) * * from the procedure in section 6.4 of the Adiantum paper. The resulting value * is reused in both the first and second hash steps. Specifically, it's added * to the result of an independently keyed ε-∆U hash function (for equal length * inputs only) taken over the left-hand part (the "bulk") of the message, to * give the overall Adiantum hash of the (tweak, left-hand part) pair.
*/ staticvoid adiantum_hash_header(struct skcipher_request *req)
{ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); conststruct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); struct adiantum_request_ctx *rctx = skcipher_request_ctx(req); constunsignedint bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; struct {
__le64 message_bits;
__le64 padding;
} header = {
.message_bits = cpu_to_le64((u64)bulk_len * 8)
}; struct poly1305_state state;
/* If decrypting, decrypt C_M with the block cipher to get P_M */ if (!rctx->enc)
crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
rctx->rbuf.bytes);
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
(u8 *)&digest);
memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
kunmap_local(virt);
} else { /* Slow path that works for any source scatterlist */
err = adiantum_hash_message(req, src, src_nents, &digest);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
bulk_len, sizeof(le128), 0);
} if (err) return err;
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
/* If encrypting, encrypt P_M with the block cipher to get C_M */ if (enc)
crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
rctx->rbuf.bytes);
/* Initialize the rest of the XChaCha IV (first part is C_M) */
BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
rctx->rbuf.words[4] = cpu_to_le32(1);
rctx->rbuf.words[5] = 0;
rctx->rbuf.words[6] = 0;
rctx->rbuf.words[7] = 0;
/* * XChaCha needs to be done on all the data except the last 16 bytes; * for disk encryption that usually means 4080 or 496 bytes. But ChaCha * implementations tend to be most efficient when passed a whole number * of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes. * And here it doesn't matter whether the last 16 bytes are written to, * as the second hash step will overwrite them. Thus, round the XChaCha * length up to the next 64-byte boundary if possible.
*/
stream_len = bulk_len; if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
/* * Check for a supported set of inner algorithms. * See the comment at the beginning of this file.
*/ staticbool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg, struct crypto_alg *blockcipher_alg, struct shash_alg *hash_alg)
{ if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0) returnfalse;
if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE) returnfalse; if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE) returnfalse;
if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0) returnfalse;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask; /* * The block cipher is only invoked once per message, so for long * messages (e.g. sectors for disk encryption) its performance doesn't * matter as much as that of the stream cipher and hash function. Thus, * weigh the block cipher's ->cra_priority less.
*/
inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
2 * hash_alg->base.cra_priority +
blockcipher_alg->cra_priority) / 7;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.