struct kthread_create_info
{ /* Information passed to kthread() from kthreadd. */ char *full_name; int (*threadfn)(void *data); void *data; int node;
/* Result passed back to kthread_create() from kthreadd. */ struct task_struct *result; struct completion *done;
struct list_head list;
};
struct kthread { unsignedlong flags; unsignedint cpu; unsignedint node; int started; int result; int (*threadfn)(void *); void *data; struct completion parked; struct completion exited; #ifdef CONFIG_BLK_CGROUP struct cgroup_subsys_state *blkcg_css; #endif /* To store the full name if task comm is truncated. */ char *full_name; struct task_struct *task; struct list_head hotplug_node; struct cpumask *preferred_affinity;
};
/* * Variant of to_kthread() that doesn't assume @p is a kthread. * * When "(p->flags & PF_KTHREAD)" is set the task is a kthread and will * always remain a kthread. For kthreads p->worker_private always * points to a struct kthread. For tasks that are not kthreads * p->worker_private is used to point to other things. * * Return NULL for any task that is not a kthread.
*/ staticinlinestruct kthread *__to_kthread(struct task_struct *p)
{ void *kthread = p->worker_private; if (kthread && !(p->flags & PF_KTHREAD))
kthread = NULL; return kthread;
}
/** * kthread_should_stop - should this kthread return now? * * When someone calls kthread_stop() on your kthread, it will be woken * and this will return true. You should then return, and your return * value will be passed through to kthread_stop().
*/ bool kthread_should_stop(void)
{ return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
}
EXPORT_SYMBOL(kthread_should_stop);
/** * kthread_should_park - should this kthread park now? * * When someone calls kthread_park() on your kthread, it will be woken * and this will return true. You should then do the necessary * cleanup and call kthread_parkme() * * Similar to kthread_should_stop(), but this keeps the thread alive * and in a park position. kthread_unpark() "restarts" the thread and * calls the thread function again.
*/ bool kthread_should_park(void)
{ return __kthread_should_park(current);
}
EXPORT_SYMBOL_GPL(kthread_should_park);
/** * kthread_freezable_should_stop - should this freezable kthread return now? * @was_frozen: optional out parameter, indicates whether %current was frozen * * kthread_should_stop() for freezable kthreads, which will enter * refrigerator if necessary. This function is safe from kthread_stop() / * freezer deadlock and freezable kthreads should use this function instead * of calling try_to_freeze() directly.
*/ bool kthread_freezable_should_stop(bool *was_frozen)
{ bool frozen = false;
might_sleep();
if (unlikely(freezing(current)))
frozen = __refrigerator(true);
/** * kthread_func - return the function specified on kthread creation * @task: kthread task in question * * Returns NULL if the task is not a kthread.
*/ void *kthread_func(struct task_struct *task)
{ struct kthread *kthread = __to_kthread(task); if (kthread) return kthread->threadfn; return NULL;
}
EXPORT_SYMBOL_GPL(kthread_func);
/** * kthread_data - return data value specified on kthread creation * @task: kthread task in question * * Return the data value specified when kthread @task was created. * The caller is responsible for ensuring the validity of @task when * calling this function.
*/ void *kthread_data(struct task_struct *task)
{ return to_kthread(task)->data;
}
EXPORT_SYMBOL_GPL(kthread_data);
/** * kthread_probe_data - speculative version of kthread_data() * @task: possible kthread task in question * * @task could be a kthread task. Return the data value specified when it * was created if accessible. If @task isn't a kthread task or its data is * inaccessible for any reason, %NULL is returned. This function requires * that @task itself is safe to dereference.
*/ void *kthread_probe_data(struct task_struct *task)
{ struct kthread *kthread = __to_kthread(task); void *data = NULL;
if (kthread)
copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); return data;
}
staticvoid __kthread_parkme(struct kthread *self)
{ for (;;) { /* * TASK_PARKED is a special state; we must serialize against * possible pending wakeups to avoid store-store collisions on * task->state. * * Such a collision might possibly result in the task state * changin from TASK_PARKED and us failing the * wait_task_inactive() in kthread_park().
*/
set_special_state(TASK_PARKED); if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) break;
/* * Thread is going to call schedule(), do not preempt it, * or the caller of kthread_park() may spend more time in * wait_task_inactive().
*/
preempt_disable();
complete(&self->parked);
schedule_preempt_disabled();
preempt_enable();
}
__set_current_state(TASK_RUNNING);
}
/** * kthread_exit - Cause the current kthread return @result to kthread_stop(). * @result: The integer value to return to kthread_stop(). * * While kthread_exit can be called directly, it exists so that * functions which do some additional work in non-modular code such as * module_put_and_kthread_exit can be implemented. * * Does not return.
*/ void __noreturn kthread_exit(long result)
{ struct kthread *kthread = to_kthread(current);
kthread->result = result; if (!list_empty(&kthread->hotplug_node)) {
mutex_lock(&kthreads_hotplug_lock);
list_del(&kthread->hotplug_node);
mutex_unlock(&kthreads_hotplug_lock);
/** * kthread_complete_and_exit - Exit the current kthread. * @comp: Completion to complete * @code: The integer value to return to kthread_stop(). * * If present, complete @comp and then return code to kthread_stop(). * * A kernel thread whose module may be removed after the completion of * @comp can use this function to exit safely. * * Does not return.
*/ void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
{ if (comp)
complete(comp);
if (kthread->node == NUMA_NO_NODE) {
housekeeping_affine(current, HK_TYPE_KTHREAD);
} else { if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) {
WARN_ON_ONCE(1); return;
}
mutex_lock(&kthreads_hotplug_lock);
WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
list_add_tail(&kthread->hotplug_node, &kthreads_hotplug); /* * The node cpumask is racy when read from kthread() but: * - a racing CPU going down will either fail on the subsequent * call to set_cpus_allowed_ptr() or be migrated to housekeepers * afterwards by the scheduler. * - a racing CPU going up will be handled by kthreads_online_cpu()
*/
kthread_fetch_affinity(kthread, affinity);
set_cpus_allowed_ptr(current, affinity);
mutex_unlock(&kthreads_hotplug_lock);
/* Release the structure when caller killed by a fatal signal. */
done = xchg(&create->done, NULL); if (!done) {
kfree(create->full_name);
kfree(create);
kthread_exit(-EINTR);
}
/* * The new thread inherited kthreadd's priority and CPU mask. Reset * back to default in case they have been changed.
*/
sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE);
create->result = current; /* * Thread is going to call schedule(), do not preempt it, * or the creator may spend more time in wait_task_inactive().
*/
preempt_disable();
complete(done);
schedule_preempt_disabled();
preempt_enable();
self->started = 1;
if (!(current->flags & PF_NO_SETAFFINITY) && !self->preferred_affinity)
kthread_affine_node();
ret = -EINTR; if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
cgroup_kthread_ready();
__kthread_parkme(self);
ret = threadfn(data);
}
kthread_exit(ret);
}
/* called from kernel_clone() to get node information for about to be created task */ int tsk_fork_get_node(struct task_struct *tsk)
{ #ifdef CONFIG_NUMA if (tsk == kthreadd_task) return tsk->pref_node_fork; #endif return NUMA_NO_NODE;
}
staticvoid create_kthread(struct kthread_create_info *create)
{ int pid;
#ifdef CONFIG_NUMA
current->pref_node_fork = create->node; #endif /* We want our own signal handler (we take no signals by default). */
pid = kernel_thread(kthread, create, create->full_name,
CLONE_FS | CLONE_FILES | SIGCHLD); if (pid < 0) { /* Release the structure when caller killed by a fatal signal. */ struct completion *done = xchg(&create->done, NULL);
wake_up_process(kthreadd_task); /* * Wait for completion in killable state, for I might be chosen by * the OOM killer while kthreadd is trying to allocate memory for * new kernel thread.
*/ if (unlikely(wait_for_completion_killable(&done))) { /* * If I was killed by a fatal signal before kthreadd (or new * kernel thread) calls complete(), leave the cleanup of this * structure to that thread.
*/ if (xchg(&create->done, NULL)) return ERR_PTR(-EINTR); /* * kthreadd (or new kernel thread) will call complete() * shortly.
*/
wait_for_completion(&done);
}
task = create->result;
free_create:
kfree(create); return task;
}
/** * kthread_create_on_node - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @node: task and thread structures for the thread are allocated on this node * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and * is affine to all CPUs. * * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either return directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
*/ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, constchar namefmt[],
...)
{ struct task_struct *task;
va_list args;
/** * kthread_bind - bind a just-created kthread to a cpu. * @p: thread created by kthread_create(). * @cpu: cpu (might not be online, must be possible) for @k to run on. * * Description: This function is equivalent to set_cpus_allowed(), * except that @cpu doesn't need to be online, and the thread must be * stopped (i.e., just returned from kthread_create()).
*/ void kthread_bind(struct task_struct *p, unsignedint cpu)
{ struct kthread *kthread = to_kthread(p);
__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
WARN_ON_ONCE(kthread->started);
}
EXPORT_SYMBOL(kthread_bind);
/** * kthread_create_on_cpu - Create a cpu bound kthread * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @cpu: The cpu on which the thread should be bound, * @namefmt: printf-style name for the thread. Format is restricted * to "name.*%u". Code fills in cpu number. * * Description: This helper function creates and names a kernel thread
*/ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), void *data, unsignedint cpu, constchar *namefmt)
{ struct task_struct *p;
p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
cpu); if (IS_ERR(p)) return p;
kthread_bind(p, cpu); /* CPU hotplug need to bind once again when unparking the thread. */
to_kthread(p)->cpu = cpu; return p;
}
EXPORT_SYMBOL(kthread_create_on_cpu);
void kthread_set_per_cpu(struct task_struct *k, int cpu)
{ struct kthread *kthread = to_kthread(k); if (!kthread) return;
WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
if (cpu < 0) {
clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); return;
}
/** * kthread_unpark - unpark a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_park() for @k to return false, wakes it, and * waits for it to return. If the thread is marked percpu then its * bound to the cpu again.
*/ void kthread_unpark(struct task_struct *k)
{ struct kthread *kthread = to_kthread(k);
if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)) return; /* * Newly created kthread was parked when the CPU was offline. * The binding was lost and we need to set it again.
*/ if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
__kthread_bind(k, kthread->cpu, TASK_PARKED);
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); /* * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
*/
wake_up_state(k, TASK_PARKED);
}
EXPORT_SYMBOL_GPL(kthread_unpark);
/** * kthread_park - park a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_park() for @k to return true, wakes it, and * waits for it to return. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will park without * calling threadfn(). * * Returns 0 if the thread is parked, -ENOSYS if the thread exited. * If called by the kthread itself just the park bit is set.
*/ int kthread_park(struct task_struct *k)
{ struct kthread *kthread = to_kthread(k);
if (WARN_ON(k->flags & PF_EXITING)) return -ENOSYS;
if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) return -EBUSY;
set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); if (k != current) {
wake_up_process(k); /* * Wait for __kthread_parkme() to complete(), this means we * _will_ have TASK_PARKED and are about to call schedule().
*/
wait_for_completion(&kthread->parked); /* * Now wait for that schedule() to complete and the task to * get scheduled out.
*/
WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
}
return 0;
}
EXPORT_SYMBOL_GPL(kthread_park);
/** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and * waits for it to exit. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * * If threadfn() may call kthread_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called.
*/ int kthread_stop(struct task_struct *k)
{ struct kthread *kthread; int ret;
/** * kthread_stop_put - stop a thread and put its task struct * @k: thread created by kthread_create(). * * Stops a thread created by kthread_create() and put its task_struct. * Only use when holding an extra task struct reference obtained by * calling get_task_struct().
*/ int kthread_stop_put(struct task_struct *k)
{ int ret;
ret = kthread_stop(k);
put_task_struct(k); return ret;
}
EXPORT_SYMBOL(kthread_stop_put);
/* Setup a clean context for our children to inherit. */
set_task_comm(tsk, comm);
ignore_signals(tsk);
set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
set_mems_allowed(node_states[N_MEMORY]);
/* It's safe because the task is inactive. */
raw_spin_lock_irqsave(&p->pi_lock, flags);
do_set_cpus_allowed(p, affinity);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
/* * Re-affine kthreads according to their preferences * and the newly online CPU. The CPU down part is handled * by select_fallback_rq() which default re-affines to * housekeepers from other nodes in case the preferred * affinity doesn't apply anymore.
*/ staticint kthreads_online_cpu(unsignedint cpu)
{
cpumask_var_t affinity; struct kthread *k; int ret;
guard(mutex)(&kthreads_hotplug_lock);
if (list_empty(&kthreads_hotplug)) return 0;
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) return -ENOMEM;
/** * kthread_worker_fn - kthread function to process kthread_worker * @worker_ptr: pointer to initialized kthread_worker * * This function implements the main cycle of kthread worker. It processes * work_list until it is stopped with kthread_stop(). It sleeps when the queue * is empty. * * The works are not allowed to keep any locks, disable preemption or interrupts * when they finish. There is defined a safe point for freezing when one work * finishes and before a new one is started. * * Also the works must not be handled by more than one worker at the same time, * see also kthread_queue_work().
*/ int kthread_worker_fn(void *worker_ptr)
{ struct kthread_worker *worker = worker_ptr; struct kthread_work *work;
/* * FIXME: Update the check and remove the assignment when all kthread * worker users are created using kthread_create_worker*() functions.
*/
WARN_ON(worker->task && worker->task != current);
worker->task = current;
if (worker->flags & KTW_FREEZABLE)
set_freezable();
work = NULL;
raw_spin_lock_irq(&worker->lock); if (!list_empty(&worker->work_list)) {
work = list_first_entry(&worker->work_list, struct kthread_work, node);
list_del_init(&work->node);
}
worker->current_work = work;
raw_spin_unlock_irq(&worker->lock);
if (work) {
kthread_work_func_t func = work->func;
__set_current_state(TASK_RUNNING);
trace_sched_kthread_work_execute_start(work);
work->func(work); /* * Avoid dereferencing work after this point. The trace * event only cares about the address.
*/
trace_sched_kthread_work_execute_end(work, func);
} elseif (!freezing(current)) {
schedule();
} else { /* * Handle the case where the current remains * TASK_INTERRUPTIBLE. try_to_freeze() expects * the current to be TASK_RUNNING.
*/
__set_current_state(TASK_RUNNING);
}
/** * kthread_create_worker_on_node - create a kthread worker * @flags: flags modifying the default behavior of the worker * @node: task structure for the thread is allocated on this node * @namefmt: printf-style name for the kthread worker (task). * * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) * when the needed structures could not get allocated, and ERR_PTR(-EINTR) * when the caller was killed by a fatal signal.
*/ struct kthread_worker *
kthread_create_worker_on_node(unsignedint flags, int node, constchar namefmt[], ...)
{ struct kthread_worker *worker;
va_list args;
/** * kthread_create_worker_on_cpu - create a kthread worker and bind it * to a given CPU and the associated NUMA node. * @cpu: CPU number * @flags: flags modifying the default behavior of the worker * @namefmt: printf-style name for the thread. Format is restricted * to "name.*%u". Code fills in cpu number. * * Use a valid CPU number if you want to bind the kthread worker * to the given CPU and the associated NUMA node. * * A good practice is to add the cpu number also into the worker name. * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). * * CPU hotplug: * The kthread worker API is simple and generic. It just provides a way * to create, use, and destroy workers. * * It is up to the API user how to handle CPU hotplug. They have to decide * how to handle pending work items, prevent queuing new ones, and * restore the functionality when the CPU goes off and on. There are a * few catches: * * - CPU affinity gets lost when it is scheduled on an offline CPU. * * - The worker might not exist when the CPU was off when the user * created the workers. * * Good practice is to implement two CPU hotplug callbacks and to * destroy/create the worker when the CPU goes down/up. * * Return: * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM) * when the needed structures could not get allocated, and ERR_PTR(-EINTR) * when the caller was killed by a fatal signal.
*/ struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsignedint flags, constchar namefmt[])
{ struct kthread_worker *worker;
worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu); if (!IS_ERR(worker))
kthread_bind(worker->task, cpu);
/* * Returns true when the work could not be queued at the moment. * It happens when it is already pending in a worker list * or when it is being cancelled.
*/ staticinlinebool queuing_blocked(struct kthread_worker *worker, struct kthread_work *work)
{
lockdep_assert_held(&worker->lock);
staticvoid kthread_insert_work_sanity_check(struct kthread_worker *worker, struct kthread_work *work)
{
lockdep_assert_held(&worker->lock);
WARN_ON_ONCE(!list_empty(&work->node)); /* Do not use a work with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker && work->worker != worker);
}
/** * kthread_queue_work - queue a kthread_work * @worker: target kthread_worker * @work: kthread_work to queue * * Queue @work to work processor @task for async execution. @task * must have been created with kthread_create_worker(). Returns %true * if @work was successfully queued, %false if it was already pending. * * Reinitialize the work if it needs to be used by another worker. * For example, when the worker was stopped and started again.
*/ bool kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work)
{ bool ret = false; unsignedlong flags;
raw_spin_lock_irqsave(&worker->lock, flags); if (!queuing_blocked(worker, work)) {
kthread_insert_work(worker, work, &worker->work_list);
ret = true;
}
raw_spin_unlock_irqrestore(&worker->lock, flags); return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_work);
/** * kthread_delayed_work_timer_fn - callback that queues the associated kthread * delayed work when the timer expires. * @t: pointer to the expired timer * * The format of the function is defined by struct timer_list. * It should have been called from irqsafe timer with irq already off.
*/ void kthread_delayed_work_timer_fn(struct timer_list *t)
{ struct kthread_delayed_work *dwork = timer_container_of(dwork, t,
timer); struct kthread_work *work = &dwork->work; struct kthread_worker *worker = work->worker; unsignedlong flags;
/* * This might happen when a pending work is reinitialized. * It means that it is used a wrong way.
*/ if (WARN_ON_ONCE(!worker)) return;
raw_spin_lock_irqsave(&worker->lock, flags); /* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
/* Move the work from worker->delayed_work_list. */
WARN_ON_ONCE(list_empty(&work->node));
list_del_init(&work->node); if (!work->canceling)
kthread_insert_work(worker, work, &worker->work_list);
/* * If @delay is 0, queue @dwork->work immediately. This is for * both optimization and correctness. The earliest @timer can * expire is on the closest next tick and delayed_work users depend * on that there's no such delay when @delay is 0.
*/ if (!delay) {
kthread_insert_work(worker, work, &worker->work_list); return;
}
/* Be paranoid and try to detect possible races already now. */
kthread_insert_work_sanity_check(worker, work);
/** * kthread_queue_delayed_work - queue the associated kthread work * after a delay. * @worker: target kthread_worker * @dwork: kthread_delayed_work to queue * @delay: number of jiffies to wait before queuing * * If the work has not been pending it starts a timer that will queue * the work after the given @delay. If @delay is zero, it queues the * work immediately. * * Return: %false if the @work has already been pending. It means that * either the timer was running or the work was queued. It returns %true * otherwise.
*/ bool kthread_queue_delayed_work(struct kthread_worker *worker, struct kthread_delayed_work *dwork, unsignedlong delay)
{ struct kthread_work *work = &dwork->work; unsignedlong flags; bool ret = false;
raw_spin_lock_irqsave(&worker->lock, flags);
if (!queuing_blocked(worker, work)) {
__kthread_queue_delayed_work(worker, dwork, delay);
ret = true;
}
if (!noop)
wait_for_completion(&fwork.done);
}
EXPORT_SYMBOL_GPL(kthread_flush_work);
/* * Make sure that the timer is neither set nor running and could * not manipulate the work list_head any longer. * * The function is called under worker->lock. The lock is temporary * released but the timer can't be set again in the meantime.
*/ staticvoid kthread_cancel_delayed_work_timer(struct kthread_work *work, unsignedlong *flags)
{ struct kthread_delayed_work *dwork =
container_of(work, struct kthread_delayed_work, work); struct kthread_worker *worker = work->worker;
/* * timer_delete_sync() must be called to make sure that the timer * callback is not running. The lock must be temporary released * to avoid a deadlock with the callback. In the meantime, * any queuing is blocked by setting the canceling counter.
*/
work->canceling++;
raw_spin_unlock_irqrestore(&worker->lock, *flags);
timer_delete_sync(&dwork->timer);
raw_spin_lock_irqsave(&worker->lock, *flags);
work->canceling--;
}
/* * This function removes the work from the worker queue. * * It is called under worker->lock. The caller must make sure that * the timer used by delayed work is not running, e.g. by calling * kthread_cancel_delayed_work_timer(). * * The work might still be in use when this function finishes. See the * current_work proceed by the worker. * * Return: %true if @work was pending and successfully canceled, * %false if @work was not pending
*/ staticbool __kthread_cancel_work(struct kthread_work *work)
{ /* * Try to remove the work from a worker list. It might either * be from worker->work_list or from worker->delayed_work_list.
*/ if (!list_empty(&work->node)) {
list_del_init(&work->node); returntrue;
}
returnfalse;
}
/** * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work * @worker: kthread worker to use * @dwork: kthread delayed work to queue * @delay: number of jiffies to wait before queuing * * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, * modify @dwork's timer so that it expires after @delay. If @delay is zero, * @work is guaranteed to be queued immediately. * * Return: %false if @dwork was idle and queued, %true otherwise. * * A special case is when the work is being canceled in parallel. * It might be caused either by the real kthread_cancel_delayed_work_sync() * or yet another kthread_mod_delayed_work() call. We let the other command * win and return %true here. The return value can be used for reference * counting and the number of queued works stays the same. Anyway, the caller * is supposed to synchronize these operations a reasonable way. * * This function is safe to call from any context including IRQ handler. * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() * for details.
*/ bool kthread_mod_delayed_work(struct kthread_worker *worker, struct kthread_delayed_work *dwork, unsignedlong delay)
{ struct kthread_work *work = &dwork->work; unsignedlong flags; int ret;
raw_spin_lock_irqsave(&worker->lock, flags);
/* Do not bother with canceling when never queued. */ if (!work->worker) {
ret = false; goto fast_queue;
}
/* Work must not be used with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker != worker);
/* * Temporary cancel the work but do not fight with another command * that is canceling the work as well. * * It is a bit tricky because of possible races with another * mod_delayed_work() and cancel_delayed_work() callers. * * The timer must be canceled first because worker->lock is released * when doing so. But the work can be removed from the queue (list) * only when it can be queued again so that the return value can * be used for reference counting.
*/
kthread_cancel_delayed_work_timer(work, &flags); if (work->canceling) { /* The number of works in the queue does not change. */
ret = true; goto out;
}
ret = __kthread_cancel_work(work);
staticbool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
{ struct kthread_worker *worker = work->worker; unsignedlong flags; int ret = false;
if (!worker) goto out;
raw_spin_lock_irqsave(&worker->lock, flags); /* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);
if (is_dwork)
kthread_cancel_delayed_work_timer(work, &flags);
ret = __kthread_cancel_work(work);
if (worker->current_work != work) goto out_fast;
/* * The work is in progress and we need to wait with the lock released. * In the meantime, block any queuing by setting the canceling counter.
*/
work->canceling++;
raw_spin_unlock_irqrestore(&worker->lock, flags);
kthread_flush_work(work);
raw_spin_lock_irqsave(&worker->lock, flags);
work->canceling--;
/** * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish * @work: the kthread work to cancel * * Cancel @work and wait for its execution to finish. This function * can be used even if the work re-queues itself. On return from this * function, @work is guaranteed to be not pending or executing on any CPU. * * kthread_cancel_work_sync(&delayed_work->work) must not be used for * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. * * The caller must ensure that the worker on which @work was last * queued can't be destroyed before this function returns. * * Return: %true if @work was pending, %false otherwise.
*/ bool kthread_cancel_work_sync(struct kthread_work *work)
{ return __kthread_cancel_work_sync(work, false);
}
EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
/** * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and * wait for it to finish. * @dwork: the kthread delayed work to cancel * * This is kthread_cancel_work_sync() for delayed works. * * Return: %true if @dwork was pending, %false otherwise.
*/ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
{ return __kthread_cancel_work_sync(&dwork->work, true);
}
EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
/** * kthread_flush_worker - flush all current works on a kthread_worker * @worker: worker to flush * * Wait until all currently executing or pending works on @worker are * finished.
*/ void kthread_flush_worker(struct kthread_worker *worker)
{ struct kthread_flush_work fwork = {
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
COMPLETION_INITIALIZER_ONSTACK(fwork.done),
};
/** * kthread_destroy_worker - destroy a kthread worker * @worker: worker to be destroyed * * Flush and destroy @worker. The simple flush is enough because the kthread * worker API is used only in trivial scenarios. There are no multi-step state * machines needed. * * Note that this function is not responsible for handling delayed work, so * caller should be responsible for queuing or canceling all delayed work items * before invoke this function.
*/ void kthread_destroy_worker(struct kthread_worker *worker)
{ struct task_struct *task;
/** * kthread_use_mm - make the calling kthread operate on an address space * @mm: address space to operate on
*/ void kthread_use_mm(struct mm_struct *mm)
{ struct mm_struct *active_mm; struct task_struct *tsk = current;
/* * It is possible for mm to be the same as tsk->active_mm, but * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm), * because these references are not equivalent.
*/
mmgrab(mm);
task_lock(tsk); /* Hold off tlb flush IPIs while switching mm's */
local_irq_disable();
active_mm = tsk->active_mm;
tsk->active_mm = mm;
tsk->mm = mm;
membarrier_update_current_mm(mm);
switch_mm_irqs_off(active_mm, mm, tsk);
local_irq_enable();
task_unlock(tsk); #ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch(); #endif
/* * When a kthread starts operating on an address space, the loop * in membarrier_{private,global}_expedited() may not observe * that tsk->mm, and not issue an IPI. Membarrier requires a * memory barrier after storing to tsk->mm, before accessing * user-space memory. A full memory barrier for membarrier * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by * mmdrop_lazy_tlb().
*/
mmdrop_lazy_tlb(active_mm);
}
EXPORT_SYMBOL_GPL(kthread_use_mm);
/** * kthread_unuse_mm - reverse the effect of kthread_use_mm() * @mm: address space to operate on
*/ void kthread_unuse_mm(struct mm_struct *mm)
{ struct task_struct *tsk = current;
task_lock(tsk); /* * When a kthread stops operating on an address space, the loop * in membarrier_{private,global}_expedited() may not observe * that tsk->mm, and not issue an IPI. Membarrier requires a * memory barrier after accessing user-space memory, before * clearing tsk->mm.
*/
smp_mb__after_spinlock();
local_irq_disable();
tsk->mm = NULL;
membarrier_update_current_mm(NULL);
mmgrab_lazy_tlb(mm); /* active_mm is still 'mm' */
enter_lazy_tlb(mm, tsk);
local_irq_enable();
task_unlock(tsk);
#ifdef CONFIG_BLK_CGROUP /** * kthread_associate_blkcg - associate blkcg to current kthread * @css: the cgroup info * * Current thread must be a kthread. The thread is running jobs on behalf of * other threads. In some cases, we expect the jobs attach cgroup info of * original threads instead of that of current thread. This function stores * original thread's cgroup info in current kthread context for later * retrieval.
*/ void kthread_associate_blkcg(struct cgroup_subsys_state *css)
{ struct kthread *kthread;
if (!(current->flags & PF_KTHREAD)) return;
kthread = to_kthread(current); if (!kthread) return;
if (kthread->blkcg_css) {
css_put(kthread->blkcg_css);
kthread->blkcg_css = NULL;
} if (css) {
css_get(css);
kthread->blkcg_css = css;
}
}
EXPORT_SYMBOL(kthread_associate_blkcg);
/** * kthread_blkcg - get associated blkcg css of current kthread * * Current thread must be a kthread.
*/ struct cgroup_subsys_state *kthread_blkcg(void)
{ struct kthread *kthread;
if (current->flags & PF_KTHREAD) {
kthread = to_kthread(current); if (kthread) return kthread->blkcg_css;
} return NULL;
} #endif
¤ Dauer der Verarbeitung: 0.14 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.