int ima_sha1_idx __ro_after_init; int ima_hash_algo_idx __ro_after_init; /* * Additional number of slots reserved, as needed, for SHA1 * and IMA default algo.
*/ int ima_extra_slots __ro_after_init;
for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo) return ima_algo_array[i].tfm;
staticvoid ima_free_tfm(struct crypto_shash *tfm)
{ int i;
if (tfm == ima_shash_tfm) return;
for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) if (ima_algo_array[i].tfm == tfm) return;
crypto_free_shash(tfm);
}
/** * ima_alloc_pages() - Allocate contiguous pages. * @max_size: Maximum amount of memory to allocate. * @allocated_size: Returned size of actual allocation. * @last_warn: Should the min_size allocation warn or not. * * Tries to do opportunistic allocation for memory first trying to allocate * max_size amount of memory and then splitting that until zero order is * reached. Allocation is tried without generating allocation warnings unless * last_warn is set. Last_warn set affects only last allocation of zero order. * * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL) * * Return pointer to allocated memory, or NULL on failure.
*/ staticvoid *ima_alloc_pages(loff_t max_size, size_t *allocated_size, int last_warn)
{ void *ptr; int order = ima_maxorder;
gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
if (order)
order = min(get_order(max_size), order);
rc = ahash_wait(crypto_ahash_init(req), &wait); if (rc) goto out1;
i_size = i_size_read(file_inode(file));
if (i_size == 0) goto out2;
/* * Try to allocate maximum size of memory. * Fail if even a single page cannot be allocated.
*/
rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); if (!rbuf[0]) {
rc = -ENOMEM; goto out1;
}
/* Only allocate one buffer if that is enough. */ if (i_size > rbuf_size[0]) { /* * Try to allocate secondary buffer. If that fails fallback to * using single buffering. Use previous memory allocation size * as baseline for possible allocation size.
*/
rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
&rbuf_size[1], 0);
}
for (offset = 0; offset < i_size; offset += rbuf_len) { if (!rbuf[1] && offset) { /* Not using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request.
*/
rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3;
} /* read buffer */
rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
rc = integrity_kernel_read(file, offset, rbuf[active],
rbuf_len); if (rc != rbuf_len) { if (rc >= 0)
rc = -EINVAL; /* * Forward current rc, do not overwrite with return value * from ahash_wait()
*/
ahash_wait(ahash_rc, &wait); goto out3;
}
if (rbuf[1] && offset) { /* Using two buffers, and it is not the first * read/request, wait for the completion of the * previous ahash_update() request.
*/
rc = ahash_wait(ahash_rc, &wait); if (rc) goto out3;
}
if (rbuf[1])
active = !active; /* swap buffers, if we use two */
} /* wait for the last update request to complete */
rc = ahash_wait(ahash_rc, &wait);
out3:
ima_free_pages(rbuf[0], rbuf_size[0]);
ima_free_pages(rbuf[1], rbuf_size[1]);
out2: if (!rc) {
ahash_request_set_crypt(req, NULL, hash->digest, 0);
rc = ahash_wait(crypto_ahash_final(req), &wait);
}
out1:
ahash_request_free(req); return rc;
}
tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm);
rc = ima_calc_file_hash_tfm(file, hash, tfm);
ima_free_tfm(tfm);
return rc;
}
/* * ima_calc_file_hash - calculate file hash * * Asynchronous hash (ahash) allows using HW acceleration for calculating * a hash. ahash performance varies for different data sizes on different * crypto accelerators. shash performance might be better for smaller files. * The 'ima.ahash_minsize' module parameter allows specifying the best * minimum file size for using ahash on the system. * * If the ima.ahash_minsize parameter is not specified, this function uses * shash for the hash calculation. If ahash fails, it falls back to using * shash.
*/ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
{
loff_t i_size; int rc; struct file *f = file; bool new_file_instance = false;
/* * For consistency, fail file's opened with the O_DIRECT flag on * filesystems mounted with/without DAX option.
*/ if (file->f_flags & O_DIRECT) {
hash->length = hash_digest_size[ima_hash_algo];
hash->algo = ima_hash_algo; return -EINVAL;
}
/* Open a new file instance in O_RDONLY if we cannot read */ if (!(file->f_mode & FMODE_READ)) { int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
flags |= O_RDONLY;
f = dentry_open(&file->f_path, flags, file->f_cred); if (IS_ERR(f)) return PTR_ERR(f);
new_file_instance = true;
}
i_size = i_size_read(file_inode(f));
if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
rc = ima_calc_file_ahash(f, hash); if (!rc) goto out;
}
for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) { if (i == ima_sha1_idx) continue;
if (i < NR_BANKS(ima_tpm_chip)) {
alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
entry->digests[i].alg_id = alg_id;
}
/* for unmapped TPM algorithms digest is still a padded SHA1 */ if (!ima_algo_array[i].tfm) {
memcpy(entry->digests[i].digest,
entry->digests[ima_sha1_idx].digest,
TPM_DIGEST_SIZE); continue;
}
tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm);
rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
ima_free_tfm(tfm); return rc;
}
int ima_calc_buffer_hash(constvoid *buf, loff_t len, struct ima_digest_data *hash)
{ int rc;
if (ima_ahash_minsize && len >= ima_ahash_minsize) {
rc = calc_buffer_ahash(buf, len, hash); if (!rc) return 0;
}
return calc_buffer_shash(buf, len, hash);
}
staticvoid ima_pcrread(u32 idx, struct tpm_digest *d)
{ if (!ima_tpm_chip) return;
if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
pr_err("Error Communicating to TPM chip\n");
}
/* * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks, * allowing firmware to configure and enable different banks. * * Knowing which TPM bank is read to calculate the boot_aggregate digest * needs to be conveyed to a verifier. For this reason, use the same * hash algorithm for reading the TPM PCRs as for calculating the boot * aggregate digest as stored in the measurement list.
*/ staticint ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, struct crypto_shash *tfm)
{ struct tpm_digest d = { .alg_id = alg_id, .digest = {0} }; int rc;
u32 i;
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
d.alg_id);
rc = crypto_shash_init(shash); if (rc != 0) return rc;
/* cumulative digest over TPM registers 0-7 */ for (i = TPM_PCR0; i < TPM_PCR8; i++) {
ima_pcrread(i, &d); /* now accumulate with current aggregate */
rc = crypto_shash_update(shash, d.digest,
crypto_shash_digestsize(tfm)); if (rc != 0) return rc;
} /* * Extend cumulative digest over TPM registers 8-9, which contain * measurement for the kernel command line (reg. 8) and image (reg. 9) * in a typical PCR allocation. Registers 8-9 are only included in * non-SHA1 boot_aggregate digests to avoid ambiguity.
*/ if (alg_id != TPM_ALG_SHA1) { for (i = TPM_PCR8; i < TPM_PCR10; i++) {
ima_pcrread(i, &d);
rc = crypto_shash_update(shash, d.digest,
crypto_shash_digestsize(tfm));
}
} if (!rc)
crypto_shash_final(shash, digest); return rc;
}
int ima_calc_boot_aggregate(struct ima_digest_data *hash)
{ struct crypto_shash *tfm;
u16 crypto_id, alg_id; int rc, i, bank_idx = -1;
for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; if (crypto_id == hash->algo) {
bank_idx = i; break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.