/* returns pointer to hlist_node */ staticvoid *ima_measurements_start(struct seq_file *m, loff_t *pos)
{
loff_t l = *pos; struct ima_queue_entry *qe;
/* we need a lock since pos could point beyond last element */
rcu_read_lock();
list_for_each_entry_rcu(qe, &ima_measurements, later) { if (!l--) {
rcu_read_unlock(); return qe;
}
}
rcu_read_unlock(); return NULL;
}
/* lock protects when reading beyond last element * against concurrent list-extension
*/
rcu_read_lock();
qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
rcu_read_unlock();
(*pos)++;
void ima_putc(struct seq_file *m, void *data, int datalen)
{ while (datalen--)
seq_putc(m, *(char *)data++);
}
/* print format: * 32bit-le=pcr# * char[n]=template digest * 32bit-le=template name size * char[n]=template name * [eventdata length] * eventdata[n]=template specific data
*/ int ima_measurements_show(struct seq_file *m, void *v)
{ /* the list never shrinks, so we don't need a lock here */ struct ima_queue_entry *qe = v; struct ima_template_entry *e; char *template_name;
u32 pcr, namelen, template_data_len; /* temporary fields */ bool is_ima_template = false; enum hash_algo algo; int i, algo_idx;
/* * 1st: PCRIndex * PCR used defaults to the same (config option) in * little-endian format, unless set in policy
*/
pcr = !ima_canonical_fmt ? e->pcr : (__force u32)cpu_to_le32(e->pcr);
ima_putc(m, &pcr, sizeof(e->pcr));
/* 6th: template specific data */ for (i = 0; i < e->template_desc->num_fields; i++) { enum ima_show_type show = IMA_SHOW_BINARY; conststruct ima_template_field *field =
e->template_desc->fields[i];
if (is_ima_template && strcmp(field->field_id, "d") == 0)
show = IMA_SHOW_BINARY_NO_FIELD_LEN; if (is_ima_template && strcmp(field->field_id, "n") == 0)
show = IMA_SHOW_BINARY_OLD_STRING_FMT;
field->field_show(m, show, &e->template_data[i]);
} return 0;
}
for (i = 0; i < size; i++)
seq_printf(m, "%02x", *(digest + i));
}
/* print in ascii */ staticint ima_ascii_measurements_show(struct seq_file *m, void *v)
{ /* the list never shrinks, so we don't need a lock here */ struct ima_queue_entry *qe = v; struct ima_template_entry *e; char *template_name; enum hash_algo algo; int i, algo_idx;
/* * ima_open_policy: sequentialize access to the policy file
*/ staticint ima_open_policy(struct inode *inode, struct file *filp)
{ if (!(filp->f_flags & O_WRONLY)) { #ifndef CONFIG_IMA_READ_POLICY return -EACCES; #else if ((filp->f_flags & O_ACCMODE) != O_RDONLY) return -EACCES; if (!capable(CAP_SYS_ADMIN)) return -EPERM; return seq_open(filp, &ima_policy_seqops); #endif
} if (test_and_set_bit(IMA_FS_BUSY, &ima_fs_flags)) return -EBUSY; return 0;
}
/* * ima_release_policy - start using the new measure policy rules. * * Initially, ima_measure points to the default policy rules, now * point to the new policy rules, and remove the securityfs policy file, * assuming a valid policy.
*/ staticint ima_release_policy(struct inode *inode, struct file *file)
{ constchar *cause = valid_policy ? "completed" : "failed";
if ((file->f_flags & O_ACCMODE) == O_RDONLY) return seq_release(inode, file);
if (valid_policy && ima_check_policy() < 0) {
cause = "failed";
valid_policy = 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.