struct task_struct; struct rusage; union thread_union; struct css_set;
/* All the bits taken by the old clone syscall. */ #define CLONE_LEGACY_FLAGS 0xffffffffULL
struct kernel_clone_args {
u64 flags; int __user *pidfd; int __user *child_tid; int __user *parent_tid; constchar *name; int exit_signal;
u32 kthread:1;
u32 io_thread:1;
u32 user_worker:1;
u32 no_files:1; unsignedlong stack; unsignedlong stack_size; unsignedlong tls;
pid_t *set_tid; /* Number of elements in *set_tid */
size_t set_tid_size; int cgroup; int idle; int (*fn)(void *); void *fn_arg; struct cgroup *cgrp; struct css_set *cset; unsignedint kill_seq;
};
/* * This serializes "schedule()" and also protects * the run-queue from deletions/modifications (but * _adding_ to the beginning of the run-queue has * a separate lock).
*/ extern rwlock_t tasklist_lock; extern spinlock_t mmlist_lock;
staticinlinevoid put_task_struct(struct task_struct *t)
{ if (!refcount_dec_and_test(&t->usage)) return;
/* * Under PREEMPT_RT, we can't call __put_task_struct * in atomic context because it will indirectly * acquire sleeping locks. The same is true if the * current process has a mutex enqueued (blocked on * a PI chain). * * In !RT, it is always safe to call __put_task_struct(). * Though, in order to simplify the code, resort to the * deferred call too. * * call_rcu() will schedule __put_task_struct_rcu_cb() * to be called in process context. * * __put_task_struct() is called when * refcount_dec_and_test(&t->usage) succeeds. * * This means that it can't "conflict" with * put_task_struct_rcu_user() which abuses ->rcu the same * way; rcu_users has a reference so task->usage can't be * zero after rcu_users 1 -> 0 transition. * * delayed_free_task() also uses ->rcu, but it is only called * when it fails to fork a process. Therefore, there is no * way it can conflict with __put_task_struct().
*/
call_rcu(&t->rcu, __put_task_struct_rcu_cb);
}
DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
staticinlinevoid put_task_struct_many(struct task_struct *t, int nr)
{ if (refcount_sub_and_test(nr, &t->usage))
__put_task_struct(t);
}
#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST /* * If an architecture has not declared a thread_struct whitelist we * must assume something there may need to be copied to userspace.
*/ staticinlinevoid arch_thread_struct_whitelist(unsignedlong *offset, unsignedlong *size)
{
*offset = 0; /* Handle dynamically sized thread_struct. */
*size = arch_task_struct_size - offsetof(struct task_struct, thread);
} #endif
/* * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also * pins the final release of task.io_context. Also protects ->cpuset and * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist. * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), * neither inside nor outside.
*/ staticinlinevoid task_lock(struct task_struct *p)
{
spin_lock(&p->alloc_lock);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.