/* * One struct chunk is attached to each inode of interest through * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging / * untagging, the mark is stable as long as there is chunk attached. The * association between mark and chunk is protected by hash_lock and * audit_tree_group->mark_mutex. Thus as long as we hold * audit_tree_group->mark_mutex and check that the mark is alive by * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to * the current chunk. * * Rules have pointer to struct audit_tree. * Rules have struct list_head rlist forming a list of rules over * the same tree. * References to struct chunk are collected at audit_inode{,_child}() * time and used in AUDIT_TREE rule matching. * These references are dropped at the same time we are calling * audit_free_names(), etc. * * Cyclic lists galore: * tree.chunks anchors chunk.owners[].list hash_lock * tree.rules anchors rule.rlist audit_filter_mutex * chunk.trees anchors tree.same_root hash_lock * chunk.hash is a hash with middle bits of watch.inode as * a hash function. RCU, hash_lock * * tree is refcounted; one reference for "some rules on rules_list refer to * it", one for each chunk with pointer to it. * * chunk is refcounted by embedded .refs. Mark associated with the chunk holds * one chunk reference. This reference is dropped either when a mark is going * to be freed (corresponding inode goes away) or when chunk attached to the * mark gets replaced. This reference must be dropped using * audit_mark_put_chunk() to make sure the reference is dropped only after RCU * grace period as it protects RCU readers of the hash table. * * node.index allows to get from node.list to containing chunk. * MSB of that sucker is stolen to mark taggings that we might have to * revert - several operations have very unpleasant cleanup logics and * that makes a difference. Some.
*/
/* * Drop reference to the chunk that was held by the mark. This is the reference * that gets dropped after we've removed the chunk from the hash table and we * use it to make sure chunk cannot be freed before RCU grace period expires.
*/ staticvoid audit_mark_put_chunk(struct audit_chunk *chunk)
{
call_rcu(&chunk->head, __put_chunk);
}
/* Function to return search key in our hash from inode. */ staticunsignedlong inode_to_key(conststruct inode *inode)
{ /* Use address pointed to by connector->obj as the key */ return (unsignedlong)&inode->i_fsnotify_marks;
}
staticinlinestruct list_head *chunk_hash(unsignedlong key)
{ unsignedlong n = key / L1_CACHE_BYTES; return chunk_hash_heads + n % HASH_SIZE;
}
/* hash_lock & mark->group->mark_mutex is held by caller */ staticvoid insert_hash(struct audit_chunk *chunk)
{ struct list_head *list;
/* * Make sure chunk is fully initialized before making it visible in the * hash. Pairs with a data dependency barrier in READ_ONCE() in * audit_tree_lookup().
*/
smp_wmb();
WARN_ON_ONCE(!chunk->key);
list = chunk_hash(chunk->key);
list_add_rcu(&chunk->hash, list);
}
list_for_each_entry_rcu(p, list, hash) { /* * We use a data dependency barrier in READ_ONCE() to make sure * the chunk we see is fully initialized.
*/ if (READ_ONCE(p->key) == key) {
atomic_long_inc(&p->refs); return p;
}
} return NULL;
}
bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
{ int n; for (n = 0; n < chunk->count; n++) if (chunk->owners[n].owner == tree) returntrue; returnfalse;
}
/* tagging and untagging inodes with trees */
staticstruct audit_chunk *find_chunk(struct audit_node *p)
{ int index = p->index & ~(1U<<31);
p -= index; return container_of(p, struct audit_chunk, owners[0]);
}
assert_spin_locked(&hash_lock);
old = mark_chunk(mark);
audit_mark(mark)->chunk = chunk; if (chunk)
chunk->mark = mark; if (old)
old->mark = NULL;
}
staticvoid replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
{ struct audit_tree *owner; int i, j;
new->key = old->key;
list_splice_init(&old->trees, &new->trees);
list_for_each_entry(owner, &new->trees, same_root)
owner->root = new; for (i = j = 0; j < old->count; i++, j++) { if (!old->owners[j].owner) {
i--; continue;
}
owner = old->owners[j].owner;
new->owners[i].owner = owner;
new->owners[i].index = old->owners[j].index - j + i; if (!owner) /* result of earlier fallback */ continue;
get_tree(owner);
list_replace_init(&old->owners[j].list, &new->owners[i].list);
}
replace_mark_chunk(old->mark, new); /* * Make sure chunk is fully initialized before making it visible in the * hash. Pairs with a data dependency barrier in READ_ONCE() in * audit_tree_lookup().
*/
smp_wmb();
list_replace_rcu(&old->hash, &new->hash);
}
fsnotify_group_lock(audit_tree_group); /* * mark_mutex stabilizes chunk attached to the mark so we can check * whether it didn't change while we've dropped hash_lock.
*/ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
mark_chunk(mark) != chunk) goto out_mutex;
new = alloc_chunk(size); if (!new) goto out_mutex;
spin_lock(&hash_lock); /* * This has to go last when updating chunk as once replace_chunk() is * called, new RCU readers can see the new chunk.
*/
replace_chunk(new, chunk);
spin_unlock(&hash_lock);
fsnotify_group_unlock(audit_tree_group);
audit_mark_put_chunk(chunk); return;
spin_lock(&hash_lock); if (tree->goner) {
spin_unlock(&hash_lock);
fsnotify_detach_mark(mark);
fsnotify_group_unlock(audit_tree_group);
fsnotify_free_mark(mark);
fsnotify_put_mark(mark);
kfree(chunk); return 0;
}
replace_mark_chunk(mark, chunk);
chunk->owners[0].index = (1U << 31);
chunk->owners[0].owner = tree;
get_tree(tree);
list_add(&chunk->owners[0].list, &tree->chunks); if (!tree->root) {
tree->root = chunk;
list_add(&tree->same_root, &chunk->trees);
}
chunk->key = inode_to_key(inode); /* * Inserting into the hash table has to go last as once we do that RCU * readers can see the chunk.
*/
insert_hash(chunk);
spin_unlock(&hash_lock);
fsnotify_group_unlock(audit_tree_group); /* * Drop our initial reference. When mark we point to is getting freed, * we get notification through ->freeing_mark callback and cleanup * chunk pointing to this mark.
*/
fsnotify_put_mark(mark); return 0;
}
/* the first tagged inode becomes root of tree */ staticint tag_chunk(struct inode *inode, struct audit_tree *tree)
{ struct fsnotify_mark *mark; struct audit_chunk *chunk, *old; struct audit_node *p; int n;
fsnotify_group_lock(audit_tree_group);
mark = fsnotify_find_inode_mark(inode, audit_tree_group); if (!mark) return create_chunk(inode, tree);
/* * Found mark is guaranteed to be attached and mark_mutex protects mark * from getting detached and thus it makes sure there is chunk attached * to the mark.
*/ /* are we already there? */
spin_lock(&hash_lock);
old = mark_chunk(mark); for (n = 0; n < old->count; n++) { if (old->owners[n].owner == tree) {
spin_unlock(&hash_lock);
fsnotify_group_unlock(audit_tree_group);
fsnotify_put_mark(mark); return 0;
}
}
spin_unlock(&hash_lock);
spin_lock(&hash_lock); if (tree->goner) {
spin_unlock(&hash_lock);
fsnotify_group_unlock(audit_tree_group);
fsnotify_put_mark(mark);
kfree(chunk); return 0;
}
p = &chunk->owners[chunk->count - 1];
p->index = (chunk->count - 1) | (1U<<31);
p->owner = tree;
get_tree(tree);
list_add(&p->list, &tree->chunks); if (!tree->root) {
tree->root = chunk;
list_add(&tree->same_root, &chunk->trees);
} /* * This has to go last when updating chunk as once replace_chunk() is * called, new RCU readers can see the new chunk.
*/
replace_chunk(chunk, old);
spin_unlock(&hash_lock);
fsnotify_group_unlock(audit_tree_group);
fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
audit_mark_put_chunk(old);
list_del_init(&rule->rlist); if (rule->tree) { /* not a half-baked one */
audit_tree_log_remove_rule(context, rule); if (entry->rule.exe)
audit_remove_mark(entry->rule.exe);
rule->tree = NULL;
list_del_rcu(&entry->list);
list_del(&entry->rule.list);
call_rcu(&entry->rcu, audit_free_rule_rcu);
}
}
}
/* * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged * chunks. The function expects tagged chunks are all at the beginning of the * chunks list.
*/ staticvoid prune_tree_chunks(struct audit_tree *victim, bool tagged)
{
spin_lock(&hash_lock); while (!list_empty(&victim->chunks)) { struct audit_node *p; struct audit_chunk *chunk; struct fsnotify_mark *mark;
p = list_first_entry(&victim->chunks, struct audit_node, list); /* have we run out of marked? */ if (tagged && !(p->index & (1U<<31))) break;
chunk = find_chunk(p);
mark = chunk->mark;
remove_chunk_node(chunk, p); /* Racing with audit_tree_freeing_mark()? */ if (!mark) continue;
fsnotify_get_mark(mark);
spin_unlock(&hash_lock);
staticint tag_mounts(struct path *paths, struct audit_tree *tree)
{ for (struct path *p = paths; p->dentry; p++) { int err = tag_chunk(p->dentry->d_inode, tree); if (err) return err;
} return 0;
}
/* * That gets run when evict_chunk() ends up needing to kill audit_tree. * Runs from a separate thread.
*/ staticint prune_tree_thread(void *unused)
{ for (;;) { if (list_empty(&prune_list)) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
/* * ... and that one is done if evict_chunk() decides to delay until the end * of syscall. Runs synchronously.
*/ void audit_kill_trees(struct audit_context *context)
{ struct list_head *list = &context->killed_trees;
/* * We are guaranteed to have at least one reference to the mark from * either the inode or the caller of fsnotify_destroy_mark().
*/
BUG_ON(refcount_read(&mark->refcnt) < 1);
}
audit_tree_group = fsnotify_alloc_group(&audit_tree_ops, 0); if (IS_ERR(audit_tree_group))
audit_panic("cannot initialize fsnotify group for rectree watches");
for (i = 0; i < HASH_SIZE; i++)
INIT_LIST_HEAD(&chunk_hash_heads[i]);
return 0;
}
__initcall(audit_tree_init);
¤ Dauer der Verarbeitung: 0.6 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.