/* * put the associated labels
*/ staticvoid apparmor_cred_free(struct cred *cred)
{
aa_put_label(cred_label(cred));
set_cred_label(cred, NULL);
}
/* * allocate the apparmor part of blank credentials
*/ staticint apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
set_cred_label(cred, NULL); return 0;
}
/* * prepare new cred label for modification by prepare_cred block
*/ staticint apparmor_cred_prepare(struct cred *new, conststruct cred *old,
gfp_t gfp)
{
set_cred_label(new, aa_get_newest_label(cred_label(old))); return 0;
}
/* * transfer the apparmor data to a blank set of creds
*/ staticvoid apparmor_cred_transfer(struct cred *new, conststruct cred *old)
{
set_cred_label(new, aa_get_newest_label(cred_label(old)));
}
/* * cap_capget is stacked ahead of this and will * initialize effective and permitted.
*/ if (!unconfined(label)) { struct aa_profile *profile; struct label_it i;
if (!path_mediated_fs(file->f_path.dentry)) return 0;
/* If in exec, permission is handled by bprm hooks. * Cache permissions granted by the previous exec check, with * implicit read and executable mmap which are required to * actually execute the image. * * Illogically, FMODE_EXEC is in f_flags, not f_mode.
*/ if (file->f_flags & __FMODE_EXEC) {
fctx->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP; return 0;
}
error = aa_path_perm(OP_OPEN, file->f_cred,
label, &file->f_path, 0,
aa_map_file_to_perms(file), &cond); /* todo cache full allowed permissions set and state */
fctx->allow = aa_map_file_to_perms(file);
}
aa_put_label_condref(label, needput);
/** * apparmor_uring_override_creds - check the requested cred override * @new: the target creds * * Check to see if the current task is allowed to override it's credentials * to service an io_uring operation.
*/ staticint apparmor_uring_override_creds(conststruct cred *new)
{ struct aa_profile *profile; struct aa_label *label; int error; bool needput;
DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING,
OP_URING_OVERRIDE);
/** * apparmor_uring_sqpoll - check if a io_uring polling thread can be created * * Check to see if the current task is allowed to create a new io_uring * kernel polling thread.
*/ staticint apparmor_uring_sqpoll(void)
{ struct aa_profile *profile; struct aa_label *label; int error; bool needput;
DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING,
OP_URING_SQPOLL);
if (attr) return do_setattr(attr, value, size); return -EINVAL;
}
/** * apparmor_bprm_committing_creds - do task cleanup on committing new creds * @bprm: binprm for the exec (NOT NULL)
*/ staticvoid apparmor_bprm_committing_creds(conststruct linux_binprm *bprm)
{ struct aa_label *label = aa_current_raw_label(); struct aa_label *new_label = cred_label(bprm->cred);
/* bail out if unconfined or not changing profile */ if ((new_label->proxy == label->proxy) ||
(unconfined(new_label))) return;
aa_inherit_files(bprm->cred, current->files);
current->pdeath_signal = 0;
/* reset soft limits and set hard limits for the new label */
__aa_transition_rlimits(label, new_label);
}
/** * apparmor_bprm_committed_creds() - do cleanup after new creds committed * @bprm: binprm for the exec (NOT NULL)
*/ staticvoid apparmor_bprm_committed_creds(conststruct linux_binprm *bprm)
{ /* clear out temporary/transitional state from the context */
aa_clear_task_ctx_trans(task_ctx(current));
/* dead these won't be updated any more */
aa_put_label(rcu_dereference_protected(ctx->label, true));
aa_put_label(rcu_dereference_protected(ctx->peer, true));
aa_put_label(rcu_dereference_protected(ctx->peer_lastupdate, true));
}
/** * apparmor_sk_clone_security - clone the sk_security field * @sk: sock to have security cloned * @newsk: sock getting clone
*/ staticvoid apparmor_sk_clone_security(conststruct sock *sk, struct sock *newsk)
{ struct aa_sk_ctx *ctx = aa_sock(sk); struct aa_sk_ctx *new = aa_sock(newsk);
/* not actually in use yet */ if (rcu_access_pointer(ctx->label) != rcu_access_pointer(new->label)) {
aa_put_label(rcu_dereference_protected(new->label, true));
rcu_assign_pointer(new->label, aa_get_label_rcu(&ctx->label));
}
if (rcu_access_pointer(ctx->peer) != rcu_access_pointer(new->peer)) {
aa_put_label(rcu_dereference_protected(new->peer, true));
rcu_assign_pointer(new->peer, aa_get_label_rcu(&ctx->peer));
}
rcu_assign_pointer(sk_ctx->peer, aa_get_label(label));
rcu_assign_pointer(sk_ctx->peer_lastupdate, label); /* transfer cnt */ //spin_unlock(&sk_ctx->lock);
}
/** * apparmor_unix_stream_connect - check perms before making unix domain conn * @sk: sk attempting to connect * @peer_sk: sk that is accepting the connection * @newsk: new sk created for this connection * peer is locked when this hook is called * * Return: * 0 if connection is permitted * error code on denial or failure
*/ staticint apparmor_unix_stream_connect(struct sock *sk, struct sock *peer_sk, struct sock *newsk)
{ struct aa_sk_ctx *sk_ctx = aa_sock(sk); struct aa_sk_ctx *peer_ctx = aa_sock(peer_sk); struct aa_sk_ctx *new_ctx = aa_sock(newsk); struct aa_label *label; int error; bool needput;
/* newsk doesn't go through post_create, but does go through * security_sk_alloc()
*/
rcu_assign_pointer(new_ctx->label,
aa_get_label(rcu_dereference_protected(peer_ctx->label, true)));
/* Cross reference the peer labels for SO_PEERSEC */
unix_connect_peers(sk_ctx, new_ctx);
return 0;
}
/** * apparmor_unix_may_send - check perms before conn or sending unix dgrams * @sock: socket sending the message * @peer: socket message is being send to * * Performs bidirectional permission checks for Unix domain socket communication: * 1. Verifies sender has AA_MAY_SEND to target socket * 2. Verifies receiver has AA_MAY_RECEIVE from source socket * * sock and peer are locked when this hook is called * called by: dgram_connect peer setup but path not copied to newsk * * Return: * 0 if transmission is permitted * error code on denial or failure
*/ staticint apparmor_unix_may_send(struct socket *sock, struct socket *peer)
{ struct aa_sk_ctx *peer_ctx = aa_sock(peer->sk); struct aa_label *label; int error; bool needput;
/** * apparmor_socket_post_create - setup the per-socket security struct * @sock: socket that is being setup * @family: family of socket being created * @type: type of the socket * @protocol: protocol of the socket * @kern: socket is a special kernel socket * * Note: * - kernel sockets labeled kernel_t used to use unconfined * - socket may not have sk here if created with sock_create_lite or * sock_alloc. These should be accept cases which will be handled in * sock_graft.
*/ staticint apparmor_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern)
{ struct aa_label *label;
if (sock->sk) { struct aa_sk_ctx *ctx = aa_sock(sock->sk);
/* still not live */
aa_put_label(rcu_dereference_protected(ctx->label, true));
rcu_assign_pointer(ctx->label, aa_get_label(label));
}
aa_put_label(label);
/** * apparmor_socket_bind - check perms before bind addr to socket * @sock: socket to bind the address to (must be non-NULL) * @address: address that is being bound (must be non-NULL) * @addrlen: length of @address * * Performs security checks before allowing a socket to bind to an address. * Handles Unix domain sockets specially through aa_unix_bind_perm(). * For other socket families, uses generic permission check via aa_sk_perm(). * * Return: * 0 if binding is permitted * error code on denial or invalid parameters
*/ staticint apparmor_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
{
AA_BUG(!sock);
AA_BUG(!sock->sk);
AA_BUG(!address);
AA_BUG(in_interrupt());
/* * Note: while @newsock is created and has some information, the accept * has not been done.
*/ staticint apparmor_socket_accept(struct socket *sock, struct socket *newsock)
{
AA_BUG(!sock);
AA_BUG(!sock->sk);
AA_BUG(!newsock);
AA_BUG(in_interrupt());
#ifdef CONFIG_NETWORK_SECMARK /** * apparmor_socket_sock_rcv_skb - check perms before associating skb to sk * @sk: sk to associate @skb with * @skb: skb to check for perms * * Note: can not sleep may be called with locks held * * dont want protocol specific in __skb_recv_datagram() * to deny an incoming connection socket_sock_rcv_skb()
*/ staticint apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{ struct aa_sk_ctx *ctx = aa_sock(sk); int error;
if (!skb->secmark) return 0;
/* * If reach here before socket_post_create hook is called, in which * case label is null, drop the packet.
*/ if (!rcu_access_pointer(ctx->label)) return -EACCES;
if (rcu_access_pointer(ctx->peer)) return aa_get_label_rcu(&ctx->peer);
if (sk->sk_family != PF_UNIX) return ERR_PTR(-ENOPROTOOPT);
return label;
}
/** * apparmor_socket_getpeersec_stream - get security context of peer * @sock: socket that we are trying to get the peer context of * @optval: output - buffer to copy peer name to * @optlen: output - size of copied name in @optval * @len: size of @optval buffer * Returns: 0 on success, -errno of failure * * Note: for tcp only valid if using ipsec or cipso on lan
*/ staticint apparmor_socket_getpeersec_stream(struct socket *sock,
sockptr_t optval, sockptr_t optlen, unsignedint len)
{ char *name = NULL; int slen, error = 0; struct aa_label *label; struct aa_label *peer;
peer = sk_peer_get_label(sock->sk); if (IS_ERR(peer)) {
error = PTR_ERR(peer); goto done;
}
label = begin_current_label_crit_section();
slen = aa_label_asxprint(&name, labels_ns(label), peer,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
FLAG_HIDDEN_UNCONFINED, GFP_KERNEL); /* don't include terminating \0 in slen, it breaks some apps */ if (slen < 0) {
error = -ENOMEM; goto done_put;
} if (slen > len) {
error = -ERANGE; goto done_len;
}
if (copy_to_sockptr(optval, name, slen))
error = -EFAULT;
done_len: if (copy_to_sockptr(optlen, &slen, sizeof(slen)))
error = -EFAULT;
/** * apparmor_socket_getpeersec_dgram - get security label of packet * @sock: the peer socket * @skb: packet data * @secid: pointer to where to put the secid of the packet * * Sets the netlabel socket state on sk from parent
*/ staticint apparmor_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{ /* TODO: requires secid support */ return -ENOPROTOOPT;
}
/** * apparmor_sock_graft - Initialize newly created socket * @sk: child sock * @parent: parent socket * * Note: could set off of SOCK_CTX(parent) but need to track inode and we can * just set sk security information off of current creating process label * Labeling of sk for accept case - probably should be sock based * instead of task, because of the case where an implicitly labeled * socket is shared by different tasks.
*/ staticvoid apparmor_sock_graft(struct sock *sk, struct socket *parent)
{ struct aa_sk_ctx *ctx = aa_sock(sk);
/* setup - not live */ if (!rcu_access_pointer(ctx->label))
rcu_assign_pointer(ctx->label, aa_get_current_label());
}
/* Determines if audit header is included in audited messages. This * provides more context if the audit daemon is not running
*/ bool aa_g_audit_header = true;
module_param_named(audit_header, aa_g_audit_header, aabool,
S_IRUSR | S_IWUSR);
/* lock out loading/removal of policy * TODO: add in at boot loading of policy, which is the only way to * load policy, if lock_policy is set
*/ bool aa_g_lock_policy;
module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy,
S_IRUSR | S_IWUSR);
/* Maximum pathname length before accesses will start getting rejected */ unsignedint aa_g_path_max = 2 * PATH_MAX;
module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
/* Determines how paranoid loading of policy is and how much verification * on the loaded policy is done. * DEPRECATED: read only as strict checking of load is always done now * that none root users (user namespaces) can load policy.
*/ bool aa_g_paranoid_load = IS_ENABLED(CONFIG_SECURITY_APPARMOR_PARANOID_LOAD);
module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
/* set global flag turning off the ability to load policy */ staticint param_set_aalockpolicy(constchar *val, conststruct kernel_param *kp)
{ if (!apparmor_enabled) return -EINVAL; if (apparmor_initialized && !aa_current_policy_admin_capable(NULL)) return -EPERM; return param_set_bool(val, kp);
}
/* Can only be set before AppArmor is initialized (i.e. on boot cmdline). */ staticint param_set_aaintbool(constchar *val, conststruct kernel_param *kp)
{ struct kernel_param kp_local; bool value; int error;
if (apparmor_initialized) return -EPERM;
/* Create local copy, with arg pointing to bool type. */
value = !!*((int *)kp->arg);
memcpy(&kp_local, kp, sizeof(kp_local));
kp_local.arg = &value;
/* * To avoid changing /sys/module/apparmor/parameters/enabled from Y/N to * 1/0, this converts the "int that is actually bool" back to bool for * display in the /sys filesystem, while keeping it "int" for the LSM * infrastructure.
*/ staticint param_get_aaintbool(char *buffer, conststruct kernel_param *kp)
{ struct kernel_param kp_local; bool value;
/* Create local copy, with arg pointing to bool type. */
value = !!*((int *)kp->arg);
memcpy(&kp_local, kp, sizeof(kp_local));
kp_local.arg = &value;
return param_get_bool(buffer, &kp_local);
}
staticint param_set_aacompressionlevel(constchar *val, conststruct kernel_param *kp)
{ int error;
if (!apparmor_enabled) return -EINVAL; if (apparmor_initialized) return -EPERM;
error = param_set_int(val, kp);
aa_g_rawdata_compression_level = clamp(aa_g_rawdata_compression_level,
AA_MIN_CLEVEL, AA_MAX_CLEVEL);
pr_info("AppArmor: policy rawdata compression level set to %d\n",
aa_g_rawdata_compression_level);
/* use per cpu cached buffers first */
cache = get_cpu_ptr(&aa_local_buffers); if (!list_empty(&cache->head)) {
aa_buf = list_first_entry(&cache->head, union aa_buffer, list);
list_del(&aa_buf->list);
cache->hold--;
cache->count--;
put_cpu_ptr(&aa_local_buffers); return &aa_buf->buffer[0];
}
put_cpu_ptr(&aa_local_buffers);
if (!spin_trylock(&aa_buffers_lock)) {
cache = get_cpu_ptr(&aa_local_buffers);
cache->hold += 1;
put_cpu_ptr(&aa_local_buffers);
spin_lock(&aa_buffers_lock);
} else {
cache = get_cpu_ptr(&aa_local_buffers);
put_cpu_ptr(&aa_local_buffers);
}
retry: if (buffer_count > reserve_count ||
(in_atomic && !list_empty(&aa_global_buffers))) {
aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
list);
list_del(&aa_buf->list);
buffer_count--;
spin_unlock(&aa_buffers_lock); return aa_buf->buffer;
} if (in_atomic) { /* * out of reserve buffers and in atomic context so increase * how many buffers to keep in reserve
*/
reserve_count++;
flags = GFP_ATOMIC;
}
spin_unlock(&aa_buffers_lock);
if (!in_atomic)
might_sleep();
aa_buf = kmalloc(aa_g_path_max, flags); if (!aa_buf) { if (try_again) {
try_again = false;
spin_lock(&aa_buffers_lock); goto retry;
}
pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n"); return NULL;
} return aa_buf->buffer;
}
void aa_put_buffer(char *buf)
{ union aa_buffer *aa_buf; struct aa_local_cache *cache;
if (!buf) return;
aa_buf = container_of(buf, union aa_buffer, buffer[0]);
cache = get_cpu_ptr(&aa_local_buffers); if (!cache->hold) {
put_cpu_ptr(&aa_local_buffers);
if (spin_trylock(&aa_buffers_lock)) { /* put back on global list */
list_add(&aa_buf->list, &aa_global_buffers);
buffer_count++;
spin_unlock(&aa_buffers_lock);
cache = get_cpu_ptr(&aa_local_buffers);
put_cpu_ptr(&aa_local_buffers); return;
} /* contention on global list, fallback to percpu */
cache = get_cpu_ptr(&aa_local_buffers);
cache->hold += 1;
}
/* cache in percpu list */
list_add(&aa_buf->list, &cache->head);
cache->count++;
put_cpu_ptr(&aa_local_buffers);
}
/* * AppArmor init functions
*/
/** * set_init_ctx - set a task context and profile on the first task. * * TODO: allow setting an alternate profile than unconfined
*/ staticint __init set_init_ctx(void)
{ struct cred *cred = (__force struct cred *)current->real_cred;
staticvoid destroy_buffers(void)
{ union aa_buffer *aa_buf;
spin_lock(&aa_buffers_lock); while (!list_empty(&aa_global_buffers)) {
aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
list);
list_del(&aa_buf->list);
spin_unlock(&aa_buffers_lock);
kfree(aa_buf);
spin_lock(&aa_buffers_lock);
}
spin_unlock(&aa_buffers_lock);
}
staticint __init alloc_buffers(void)
{ union aa_buffer *aa_buf; int i, num;
/* * per cpu set of cached allocated buffers used to help reduce * lock contention
*/
for_each_possible_cpu(i) {
per_cpu(aa_local_buffers, i).hold = 0;
per_cpu(aa_local_buffers, i).count = 0;
INIT_LIST_HEAD(&per_cpu(aa_local_buffers, i).head);
} /* * A function may require two buffers at once. Usually the buffers are * used for a short period of time and are shared. On UP kernel buffers * two should be enough, with more CPUs it is possible that more * buffers will be used simultaneously. The preallocated pool may grow. * This preallocation has also the side-effect that AppArmor will be * disabled early at boot if aa_g_path_max is extremely high.
*/ if (num_online_cpus() > 1)
num = 4 + RESERVE_COUNT; else
num = 2 + RESERVE_COUNT;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.