/* * Set if trying to decrypt an inauthentic ciphertext with this * algorithm might result in EINVAL rather than EBADMSG, due to other * validation the algorithm does on the inputs such as length checks.
*/ unsignedint einval_allowed : 1;
/* * Set if this algorithm requires that the IV be located at the end of * the AAD buffer, in addition to being given in the normal way. The * behavior when the two IV copies differ is implementation-defined.
*/ unsignedint aad_iv : 1;
};
struct alg_test_desc { constchar *alg; constchar *generic_driver; int (*test)(conststruct alg_test_desc *desc, constchar *driver,
u32 type, u32 mask); int fips_allowed; /* set if alg is allowed in fips mode */
/* Is the memory region still fully poisoned? */ staticinlinebool testmgr_is_poison(constvoid *addr, size_t len)
{ return memchr_inv(addr, TESTMGR_POISON_BYTE, len) == NULL;
}
/* flush type for hash algorithms */ enum flush_type { /* merge with update of previous buffer(s) */
FLUSH_TYPE_NONE = 0,
/* update with previous buffer(s) before doing this one */
FLUSH_TYPE_FLUSH,
/* likewise, but also export and re-import the intermediate state */
FLUSH_TYPE_REIMPORT,
};
/* finalization function for hash algorithms */ enum finalization_type {
FINALIZATION_TYPE_FINAL, /* use final() */
FINALIZATION_TYPE_FINUP, /* use finup() */
FINALIZATION_TYPE_DIGEST, /* use digest() */
};
/* * Whether the crypto operation will occur in-place, and if so whether the * source and destination scatterlist pointers will coincide (req->src == * req->dst), or whether they'll merely point to two separate scatterlists * (req->src != req->dst) that reference the same underlying memory. * * This is only relevant for algorithm types that support in-place operation.
*/ enum inplace_mode {
OUT_OF_PLACE,
INPLACE_ONE_SGLIST,
INPLACE_TWO_SGLISTS,
};
#define TEST_SG_TOTAL 10000
/** * struct test_sg_division - description of a scatterlist entry * * This struct describes one entry of a scatterlist being constructed to check a * crypto test vector. * * @proportion_of_total: length of this chunk relative to the total length, * given as a proportion out of TEST_SG_TOTAL so that it * scales to fit any test vector * @offset: byte offset into a 2-page buffer at which this chunk will start * @offset_relative_to_alignmask: if true, add the algorithm's alignmask to the * @offset * @flush_type: for hashes, whether an update() should be done now vs. * continuing to accumulate data * @nosimd: if doing the pending update(), do it with SIMD disabled?
*/ struct test_sg_division { unsignedint proportion_of_total; unsignedint offset; bool offset_relative_to_alignmask; enum flush_type flush_type; bool nosimd;
};
/** * struct testvec_config - configuration for testing a crypto test vector * * This struct describes the data layout and other parameters with which each * crypto test vector can be tested. * * @name: name of this config, logged for debugging purposes if a test fails * @inplace_mode: whether and how to operate on the data in-place, if applicable * @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP * @src_divs: description of how to arrange the source scatterlist * @dst_divs: description of how to arrange the dst scatterlist, if applicable * for the algorithm type. Defaults to @src_divs if unset. * @iv_offset: misalignment of the IV in the range [0..MAX_ALGAPI_ALIGNMASK+1], * where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to * the @iv_offset * @key_offset: misalignment of the key, where 0 is default alignment * @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to * the @key_offset * @finalization_type: what finalization function to use for hashes * @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP. * This applies to the parts of the operation that aren't controlled * individually by @nosimd_setkey or @src_divs[].nosimd. * @nosimd_setkey: set the key (if applicable) with SIMD disabled? Requires * !CRYPTO_TFM_REQ_MAY_SLEEP.
*/ struct testvec_config { constchar *name; enum inplace_mode inplace_mode;
u32 req_flags; struct test_sg_division src_divs[XBUFSIZE]; struct test_sg_division dst_divs[XBUFSIZE]; unsignedint iv_offset; unsignedint key_offset; bool iv_offset_relative_to_alignmask; bool key_offset_relative_to_alignmask; enum finalization_type finalization_type; bool nosimd; bool nosimd_setkey;
};
#define TESTVEC_CONFIG_NAMELEN 192
/* * The following are the lists of testvec_configs to test for each algorithm * type when the "fast" crypto self-tests are enabled. They aim to provide good * test coverage, while keeping the test time much shorter than the "full" tests * so that the "fast" tests can be enabled in a wider range of circumstances.
*/
staticbool valid_sg_divisions(conststruct test_sg_division *divs, unsignedint count, int *flags_ret)
{ unsignedint total = 0; unsignedint i;
for (i = 0; i < count && total != TEST_SG_TOTAL; i++) { if (divs[i].proportion_of_total <= 0 ||
divs[i].proportion_of_total > TEST_SG_TOTAL - total) returnfalse;
total += divs[i].proportion_of_total; if (divs[i].flush_type != FLUSH_TYPE_NONE)
*flags_ret |= SGDIVS_HAVE_FLUSHES; if (divs[i].nosimd)
*flags_ret |= SGDIVS_HAVE_NOSIMD;
} return total == TEST_SG_TOTAL &&
memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
}
/* * Check whether the given testvec_config is valid. This isn't strictly needed * since every testvec_config should be valid, but check anyway so that people * don't unknowingly add broken configs that don't do what they wanted.
*/ staticbool valid_testvec_config(conststruct testvec_config *cfg)
{ int flags = 0;
if (cfg->name == NULL) returnfalse;
if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
&flags)) returnfalse;
if (cfg->dst_divs[0].proportion_of_total) { if (!valid_sg_divisions(cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs), &flags)) returnfalse;
} else { if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs))) returnfalse; /* defaults to dst_divs=src_divs */
}
staticint init_test_sglist(struct test_sglist *tsgl)
{ return __testmgr_alloc_buf(tsgl->bufs, 1 /* two pages per buffer */);
}
staticvoid destroy_test_sglist(struct test_sglist *tsgl)
{ return __testmgr_free_buf(tsgl->bufs, 1 /* two pages per buffer */);
}
/** * build_test_sglist() - build a scatterlist for a crypto test * * @tsgl: the scatterlist to build. @tsgl->bufs[] contains an array of 2-page * buffers which the scatterlist @tsgl->sgl[] will be made to point into. * @divs: the layout specification on which the scatterlist will be based * @alignmask: the algorithm's alignmask * @total_len: the total length of the scatterlist to build in bytes * @data: if non-NULL, the buffers will be filled with this data until it ends. * Otherwise the buffers will be poisoned. In both cases, some bytes * past the end of each buffer will be poisoned to help detect overruns. * @out_divs: if non-NULL, the test_sg_division to which each scatterlist entry * corresponds will be returned here. This will match @divs except * that divisions resolving to a length of 0 are omitted as they are * not included in the scatterlist. * * Return: 0 or a -errno value
*/ staticint build_test_sglist(struct test_sglist *tsgl, conststruct test_sg_division *divs, constunsignedint alignmask, constunsignedint total_len, struct iov_iter *data, conststruct test_sg_division *out_divs[XBUFSIZE])
{ struct { conststruct test_sg_division *div;
size_t length;
} partitions[XBUFSIZE]; constunsignedint ndivs = count_test_sg_divisions(divs); unsignedint len_remaining = total_len; unsignedint i;
BUILD_BUG_ON(ARRAY_SIZE(partitions) != ARRAY_SIZE(tsgl->sgl)); if (WARN_ON(ndivs > ARRAY_SIZE(partitions))) return -EINVAL;
/* Calculate the (div, length) pairs */
tsgl->nents = 0; for (i = 0; i < ndivs; i++) { unsignedint len_this_sg =
min(len_remaining,
(total_len * divs[i].proportion_of_total +
TEST_SG_TOTAL / 2) / TEST_SG_TOTAL);
/* Set up the sgl entries and fill the data or poison */
sg_init_table(tsgl->sgl, tsgl->nents); for (i = 0; i < tsgl->nents; i++) { unsignedint offset = partitions[i].div->offset; void *addr;
if (partitions[i].div->offset_relative_to_alignmask)
offset += alignmask;
/* * Verify that a scatterlist crypto operation produced the correct output. * * @tsgl: scatterlist containing the actual output * @expected_output: buffer containing the expected output * @len_to_check: length of @expected_output in bytes * @unchecked_prefix_len: number of ignored bytes in @tsgl prior to real result * @check_poison: verify that the poison bytes after each chunk are intact? * * Return: 0 if correct, -EINVAL if incorrect, -EOVERFLOW if buffer overrun.
*/ staticint verify_correct_output(conststruct test_sglist *tsgl, constchar *expected_output, unsignedint len_to_check, unsignedint unchecked_prefix_len, bool check_poison)
{ unsignedint i;
for (i = 0; i < tsgl->nents; i++) { struct scatterlist *sg = &tsgl->sgl_ptr[i]; unsignedint len = sg->length; unsignedint offset = sg->offset; constchar *actual_output;
if (unchecked_prefix_len) { if (unchecked_prefix_len >= len) {
unchecked_prefix_len -= len; continue;
}
offset += unchecked_prefix_len;
len -= unchecked_prefix_len;
unchecked_prefix_len = 0;
}
len = min(len, len_to_check);
actual_output = page_address(sg_page(sg)) + offset; if (memcmp(expected_output, actual_output, len) != 0) return -EINVAL; if (check_poison &&
!testmgr_is_poison(actual_output + len, TESTMGR_POISON_LEN)) return -EOVERFLOW;
len_to_check -= len;
expected_output += len;
} if (WARN_ON(len_to_check != 0)) return -EINVAL; return 0;
}
for (i = 0; i < tsgl->nents; i++) { if (tsgl->sgl[i].page_link != tsgl->sgl_saved[i].page_link) returntrue; if (tsgl->sgl[i].offset != tsgl->sgl_saved[i].offset) returntrue; if (tsgl->sgl[i].length != tsgl->sgl_saved[i].length) returntrue;
} returnfalse;
}
/* * In-place crypto operations can use the same scatterlist for both the * source and destination (req->src == req->dst), or can use separate * scatterlists (req->src != req->dst) which point to the same * underlying memory. Make sure to test both cases.
*/ if (cfg->inplace_mode == INPLACE_ONE_SGLIST) {
tsgls->dst.sgl_ptr = tsgls->src.sgl;
tsgls->dst.nents = tsgls->src.nents; return 0;
} if (cfg->inplace_mode == INPLACE_TWO_SGLISTS) { /* * For now we keep it simple and only test the case where the * two scatterlists have identical entries, rather than * different entries that split up the same memory differently.
*/
memcpy(tsgls->dst.sgl, tsgls->src.sgl,
tsgls->src.nents * sizeof(tsgls->src.sgl[0]));
memcpy(tsgls->dst.sgl_saved, tsgls->src.sgl,
tsgls->src.nents * sizeof(tsgls->src.sgl[0]));
tsgls->dst.sgl_ptr = tsgls->dst.sgl;
tsgls->dst.nents = tsgls->src.nents; return 0;
} /* Out of place */ return build_test_sglist(&tsgls->dst,
cfg->dst_divs[0].proportion_of_total ?
cfg->dst_divs : cfg->src_divs,
alignmask, dst_total_len, NULL, NULL);
}
/* * Support for testing passing a misaligned key to setkey(): * * If cfg->key_offset is set, copy the key into a new buffer at that offset, * optionally adding alignmask. Else, just use the key directly.
*/ staticint prepare_keybuf(const u8 *key, unsignedint ksize, conststruct testvec_config *cfg, unsignedint alignmask, const u8 **keybuf_ret, const u8 **keyptr_ret)
{ unsignedint key_offset = cfg->key_offset;
u8 *keybuf = NULL, *keyptr = (u8 *)key;
/* * Like setkey_f(tfm, key, ksize), but sometimes misalign the key. * In addition, run the setkey function in no-SIMD context if requested.
*/ #define do_setkey(setkey_f, tfm, key, ksize, cfg, alignmask) \
({ \ const u8 *keybuf, *keyptr; \ int err; \
\
err = prepare_keybuf((key), (ksize), (cfg), (alignmask), \
&keybuf, &keyptr); \ if (err == 0) { \ if ((cfg)->nosimd_setkey) \
crypto_disable_simd_for_test(); \
err = setkey_f((tfm), keyptr, (ksize)); \ if ((cfg)->nosimd_setkey) \
crypto_reenable_simd_for_test(); \
kfree(keybuf); \
} \
err; \
})
/* * The fuzz tests use prandom instead of the normal Linux RNG since they don't * need cryptographically secure random numbers. This greatly improves the * performance of these tests, especially if they are run before the Linux RNG * has been initialized or if they are run on a lockdep-enabled kernel.
*/
staticinline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil)
{ /* * This is slightly biased for non-power-of-2 values of 'ceil', but this * isn't important here.
*/ return prandom_u32_state(rng) % ceil;
}
/* Generate a random length in range [0, max_len], but prefer smaller values */ staticunsignedint generate_random_length(struct rnd_state *rng, unsignedint max_len)
{ unsignedint len = prandom_u32_below(rng, max_len + 1);
switch (prandom_u32_below(rng, 4)) { case 0:
len %= 64; break; case 1:
len %= 256; break; case 2:
len %= 1024; break; default: break;
} if (len && prandom_u32_below(rng, 4) == 0)
len = rounddown_pow_of_two(len); return len;
}
/* Flip a random bit in the given nonempty data buffer */ staticvoid flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t bitpos;
/* Flip a random byte in the given nonempty data buffer */ staticvoid flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size)
{
buf[prandom_u32_below(rng, size)] ^= 0xff;
}
/* Sometimes make some random changes to the given nonempty data buffer */ staticvoid mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t num_flips;
size_t i;
/* Sometimes flip some bits */ if (prandom_u32_below(rng, 4) == 0) {
num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8),
size * 8); for (i = 0; i < num_flips; i++)
flip_random_bit(rng, buf, size);
}
/* Sometimes flip some bytes */ if (prandom_u32_below(rng, 4) == 0) {
num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size); for (i = 0; i < num_flips; i++)
flip_random_byte(rng, buf, size);
}
}
/* Randomly generate 'count' bytes, but sometimes make them "interesting" */ staticvoid generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count)
{
u8 b;
u8 increment;
size_t i;
if (count == 0) return;
switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */ case 0: case 1: /* All the same byte, plus optional mutations */ switch (prandom_u32_below(rng, 4)) { case 0:
b = 0x00; break; case 1:
b = 0xff; break; default:
b = prandom_u8(rng); break;
}
memset(buf, b, count);
mutate_buffer(rng, buf, count); break; case 2: /* Ascending or descending bytes, plus optional mutations */
increment = prandom_u8(rng);
b = prandom_u8(rng); for (i = 0; i < count; i++, b += increment)
buf[i] = b;
mutate_buffer(rng, buf, count); break; default: /* Fully random bytes */
prandom_bytes_state(rng, buf, count);
}
}
/* * Given an algorithm name, build the name of the generic implementation of that * algorithm, assuming the usual naming convention. Specifically, this appends * "-generic" to every part of the name that is not a template name. Examples: * * aes => aes-generic * cbc(aes) => cbc(aes-generic) * cts(cbc(aes)) => cts(cbc(aes-generic)) * rfc7539(chacha20,poly1305) => rfc7539(chacha20-generic,poly1305-generic) * * Return: 0 on success, or -ENAMETOOLONG if the generic name would be too long
*/ staticint build_generic_driver_name(constchar *algname, char driver_name[CRYPTO_MAX_ALG_NAME])
{ constchar *in = algname; char *out = driver_name;
size_t len = strlen(algname);
if (len >= CRYPTO_MAX_ALG_NAME) goto too_long; do { constchar *in_saved = in;
while (*in && *in != '(' && *in != ')' && *in != ',')
*out++ = *in++; if (*in != '(' && in > in_saved) {
len += 8; if (len >= CRYPTO_MAX_ALG_NAME) goto too_long;
memcpy(out, "-generic", 8);
out += 8;
}
} while ((*out++ = *in++) != '\0'); return 0;
too_long:
pr_err("alg: generic driver name for \"%s\" would be too long\n",
algname); return -ENAMETOOLONG;
}
/* * For algorithms implemented as "shash", most bugs will be detected by * both the shash and ahash tests. Test the shash API first so that the * failures involve less indirection, so are easier to debug.
*/
if (desc) {
err = test_shash_vec_cfg(vec, vec_name, cfg, desc, tsgl,
hashstate); if (err) return err;
}
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&rng, &cfg, cfgname, sizeof(cfgname));
err = test_hash_vec_cfg(vec, vec_name, &cfg,
req, desc, tsgl, hashstate); if (err) return err;
cond_resched();
}
} return 0;
}
/* * Generate a hash test vector from the given implementation. * Assumes the buffers in 'vec' were already allocated.
*/ staticvoid generate_random_hash_testvec(struct rnd_state *rng, struct ahash_request *req, struct hash_testvec *vec, unsignedint maxkeysize, unsignedint maxdatasize, char *name, size_t max_namelen)
{ /* Data */
vec->psize = generate_random_length(rng, maxdatasize);
generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize);
/* * Key: length in range [1, maxkeysize], but usually choose maxkeysize. * If algorithm is unkeyed, then maxkeysize == 0 and set ksize = 0.
*/
vec->setkey_error = 0;
vec->ksize = 0; if (maxkeysize) {
vec->ksize = maxkeysize; if (prandom_u32_below(rng, 4) == 0)
vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
vec->setkey_error = crypto_ahash_setkey(
crypto_ahash_reqtfm(req), vec->key, vec->ksize); /* If the key couldn't be set, no need to continue to digest. */ if (vec->setkey_error) goto done;
}
tfm = crypto_alloc_shash(driver, type, mask); if (IS_ERR(tfm)) { if (PTR_ERR(tfm) == -ENOENT || PTR_ERR(tfm) == -EEXIST) { /* * This algorithm is only available through the ahash * API, not the shash API, so skip the shash tests.
*/ return 0;
}
pr_err("alg: hash: failed to allocate shash transform for %s: %ld\n",
driver, PTR_ERR(tfm)); return PTR_ERR(tfm);
}
/* * Always test the ahash API. This works regardless of whether the * algorithm is implemented as ahash or shash.
*/
atfm = crypto_alloc_ahash(driver, type, mask); if (IS_ERR(atfm)) { if (PTR_ERR(atfm) == -ENOENT) return 0;
pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(atfm)); return PTR_ERR(atfm);
}
driver = crypto_ahash_driver_name(atfm);
req = ahash_request_alloc(atfm, GFP_KERNEL); if (!req) {
pr_err("alg: hash: failed to allocate request for %s\n",
driver);
err = -ENOMEM; goto out;
}
/* * If available also test the shash API, to cover corner cases that may * be missed by testing the ahash API only.
*/
err = alloc_shash(driver, type, mask, &stfm, &desc); if (err) goto out;
tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL); if (!tsgl || init_test_sglist(tsgl) != 0) {
pr_err("alg: hash: failed to allocate test buffers for %s\n",
driver);
kfree(tsgl);
tsgl = NULL;
err = -ENOMEM; goto out;
}
statesize = crypto_ahash_statesize(atfm); if (stfm)
statesize = max(statesize, crypto_shash_statesize(stfm));
hashstate = kmalloc(statesize + TESTMGR_POISON_LEN, GFP_KERNEL); if (!hashstate) {
pr_err("alg: hash: failed to allocate hash state buffer for %s\n",
driver);
err = -ENOMEM; goto out;
}
for (i = 0; i < num_vecs; i++) { if (fips_enabled && vecs[i].fips_skip) continue;
/* * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests * first, before setting a key on the tfm. To make this easier, we * require that the unkeyed test vectors (if any) are listed first.
*/
for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) { if (template[nr_unkeyed].ksize) break;
} for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) { if (!template[nr_unkeyed + nr_keyed].ksize) {
pr_err("alg: hash: test vectors for %s out of order, " "unkeyed ones must come first\n", desc->alg); return -EINVAL;
}
maxkeysize = max_t(unsignedint, maxkeysize, template[nr_unkeyed + nr_keyed].ksize);
}
/* Set the key */ if (vec->wk)
crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); else
crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = do_setkey(crypto_aead_setkey, tfm, vec->key, vec->klen,
cfg, alignmask); if (err && err != vec->setkey_error) {
pr_err("alg: aead: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
driver, vec_name, vec->setkey_error, err,
crypto_aead_get_flags(tfm)); return err;
} if (!err && vec->setkey_error) {
pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
driver, vec_name, vec->setkey_error); return -EINVAL;
}
/* Set the authentication tag size */
err = crypto_aead_setauthsize(tfm, authsize); if (err && err != vec->setauthsize_error) {
pr_err("alg: aead: %s setauthsize failed on test vector %s; expected_error=%d, actual_error=%d\n",
driver, vec_name, vec->setauthsize_error, err); return err;
} if (!err && vec->setauthsize_error) {
pr_err("alg: aead: %s setauthsize unexpectedly succeeded on test vector %s; expected_error=%d\n",
driver, vec_name, vec->setauthsize_error); return -EINVAL;
}
if (vec->setkey_error || vec->setauthsize_error) return 0;
/* The IV must be copied to a buffer, as the algorithm may modify it */ if (WARN_ON(ivsize > MAX_IVLEN)) return -EINVAL; if (vec->iv)
memcpy(iv, vec->iv, ivsize); else
memset(iv, 0, ivsize);
/* * Make at least one random change to a (ciphertext, AAD) pair. "Ciphertext" * here means the full ciphertext including the authentication tag. The
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.25 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.