// SPDX-License-Identifier: GPL-2.0-only /* * kernel/workqueue.c - generic async execution with shared worker pool * * Copyright (C) 2002 Ingo Molnar * * Derived from the taskqueue/keventd code by: * David Woodhouse <dwmw2@infradead.org> * Andrew Morton * Kai Petzke <wpp@marie.physik.tu-berlin.de> * Theodore Ts'o <tytso@mit.edu> * * Made to use alloc_percpu by Christoph Lameter. * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * * This is the generic async execution mechanism. Work items as are * executed in process context. The worker pool is shared and * automatically managed. There are two worker pools for each CPU (one for * normal work items and the other for high priority ones) and some extra * pools for workqueues which are not bound to any specific CPU - the * number of these backing pools is dynamic. * * Please read Documentation/core-api/workqueue.rst for details.
*/
enum worker_pool_flags { /* * worker_pool flags * * A bound pool is either associated or disassociated with its CPU. * While associated (!DISASSOCIATED), all workers are bound to the * CPU and none has %WORKER_UNBOUND set and concurrency management * is in effect. * * While DISASSOCIATED, the cpu may be offline and all workers have * %WORKER_UNBOUND set and concurrency management disabled, and may * be executing on any CPU. The pool behaves as an unbound one. * * Note that DISASSOCIATED should be flipped only while holding * wq_pool_attach_mutex to avoid changing binding state while * worker_attach_to_pool() is in progress. * * As there can only be one concurrent BH execution context per CPU, a * BH pool is per-CPU and always DISASSOCIATED.
*/
POOL_BH = 1 << 0, /* is a BH pool */
POOL_MANAGER_ACTIVE = 1 << 1, /* being managed */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
POOL_BH_DRAINING = 1 << 3, /* draining after CPU offline */
};
enum worker_flags { /* worker flags */
WORKER_DIE = 1 << 1, /* die die die */
WORKER_IDLE = 1 << 2, /* is idle */
WORKER_PREP = 1 << 3, /* preparing to run works */
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
WORKER_REBOUND = 1 << 8, /* worker was rebound */
MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, /* call for help after 10ms
(min two ticks) */
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */
/* * Rescue workers are used only on emergencies and shared by * all cpus. Give MIN_NICE.
*/
RESCUER_NICE_LEVEL = MIN_NICE,
HIGHPRI_NICE_LEVEL = MIN_NICE,
/* * We don't want to trap softirq for too long. See MAX_SOFTIRQ_TIME and * MAX_SOFTIRQ_RESTART in kernel/softirq.c. These are macros because * msecs_to_jiffies() can't be an initializer.
*/ #define BH_WORKER_JIFFIES msecs_to_jiffies(2) #define BH_WORKER_RESTARTS 10
/* * Structure fields follow one of the following exclusion rules. * * I: Modifiable by initialization/destruction paths and read-only for * everyone else. * * P: Preemption protected. Disabling preemption is enough and should * only be modified and accessed from the local cpu. * * L: pool->lock protected. Access with pool->lock held. * * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for * reads. * * K: Only modified by worker while holding pool->lock. Can be safely read by * self, while holding pool->lock or from IRQ context if %current is the * kworker. * * S: Only modified by worker self. * * A: wq_pool_attach_mutex protected. * * PL: wq_pool_mutex protected. * * PR: wq_pool_mutex protected for writes. RCU protected for reads. * * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. * * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or * RCU for reads. * * WQ: wq->mutex protected. * * WR: wq->mutex protected for writes. RCU protected for reads. * * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read * with READ_ONCE() without locking. * * MD: wq_mayday_lock protected. * * WD: Used internally by the watchdog.
*/
/* struct worker is defined in workqueue_internal.h */
struct worker_pool {
raw_spinlock_t lock; /* the pool lock */ int cpu; /* I: the associated cpu */ int node; /* I: the associated node ID */ int id; /* I: pool ID */ unsignedint flags; /* L: flags */
unsignedlong watchdog_ts; /* L: watchdog timestamp */ bool cpu_stall; /* WD: stalled cpu bound pool */
/* * The counter is incremented in a process context on the associated CPU * w/ preemption disabled, and decremented or reset in the same context * but w/ pool->lock held. The readers grab pool->lock and are * guaranteed to see if the counter reached zero.
*/ int nr_running;
struct list_head worklist; /* L: list of pending works */
int nr_workers; /* L: total number of workers */ int nr_idle; /* L: currently idle workers */
struct timer_list mayday_timer; /* L: SOS timer for workers */
/* a workers is either on busy_hash or idle_list, or the manager */
DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); /* L: hash of busy workers */
/* * Destruction of pool is RCU protected to allow dereferences * from get_work_pool().
*/ struct rcu_head rcu;
};
/* * Per-pool_workqueue statistics. These can be monitored using * tools/workqueue/wq_monitor.py.
*/ enum pool_workqueue_stats {
PWQ_STAT_STARTED, /* work items started execution */
PWQ_STAT_COMPLETED, /* work items completed execution */
PWQ_STAT_CPU_TIME, /* total CPU time consumed */
PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */
PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */
PWQ_STAT_MAYDAY, /* maydays to rescuer */
PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
PWQ_NR_STATS,
};
/* * The per-pool workqueue. While queued, bits below WORK_PWQ_SHIFT * of work_struct->data are used for flags and the remaining high bits * point to the pwq; thus, pwqs need to be aligned at two's power of the * number of flag bits.
*/ struct pool_workqueue { struct worker_pool *pool; /* I: the associated pool */ struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ int flush_color; /* L: flushing color */ int refcnt; /* L: reference count */ int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ bool plugged; /* L: execution suspended */
/* * nr_active management and WORK_STRUCT_INACTIVE: * * When pwq->nr_active >= max_active, new work item is queued to * pwq->inactive_works instead of pool->worklist and marked with * WORK_STRUCT_INACTIVE. * * All work items marked with WORK_STRUCT_INACTIVE do not participate in * nr_active and all work items in pwq->inactive_works are marked with * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are * in pwq->inactive_works. Some of them are ready to run in * pool->worklist or worker->scheduled. Those work itmes are only struct * wq_barrier which is used for flush_work() and should not participate * in nr_active. For non-barrier work item, it is marked with * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
*/ int nr_active; /* L: nr of active works */ struct list_head inactive_works; /* L: inactive works */ struct list_head pending_node; /* LN: node on wq_node_nr_active->pending_pwqs */ struct list_head pwqs_node; /* WR: node on wq->pwqs */ struct list_head mayday_node; /* MD: node on wq->maydays */
u64 stats[PWQ_NR_STATS];
/* * Release of unbound pwq is punted to a kthread_worker. See put_pwq() * and pwq_release_workfn() for details. pool_workqueue itself is also * RCU protected so that the first pwq can be determined without * grabbing wq->mutex.
*/ struct kthread_work release_work; struct rcu_head rcu;
} __aligned(1 << WORK_STRUCT_PWQ_SHIFT);
/* * Structure used to wait for workqueue flush.
*/ struct wq_flusher { struct list_head list; /* WQ: list of flushers */ int flush_color; /* WQ: flush color waiting for */ struct completion done; /* flush completion */
};
struct wq_device;
/* * Unlike in a per-cpu workqueue where max_active limits its concurrency level * on each CPU, in an unbound workqueue, max_active applies to the whole system. * As sharing a single nr_active across multiple sockets can be very expensive, * the counting and enforcement is per NUMA node. * * The following struct is used to enforce per-node max_active. When a pwq wants * to start executing a work item, it should increment ->nr using * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in * round-robin order.
*/ struct wq_node_nr_active { int max; /* per-node max_active */
atomic_t nr; /* per-node nr_active */
raw_spinlock_t lock; /* nests inside pool locks */ struct list_head pending_pwqs; /* LN: pwqs with inactive works */
};
/* * The externally visible workqueue. It relays the issued work items to * the appropriate worker_pool through its pool_workqueues.
*/ struct workqueue_struct { struct list_head pwqs; /* WR: all pwqs of this wq */ struct list_head list; /* PR: list of all workqueues */
struct mutex mutex; /* protects this wq */ int work_color; /* WQ: current work color */ int flush_color; /* WQ: current flush color */
atomic_t nr_pwqs_to_flush; /* flush in progress */ struct wq_flusher *first_flusher; /* WQ: first flusher */ struct list_head flusher_queue; /* WQ: flush waiters */ struct list_head flusher_overflow; /* WQ: flush overflow list */
/* See alloc_workqueue() function comment for info on min/max_active */ int max_active; /* WO: max active works */ int min_active; /* WO: min active works */ int saved_max_active; /* WQ: saved max_active */ int saved_min_active; /* WQ: saved min_active */
struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */
/* * Destruction of workqueue_struct is RCU protected to allow walking * the workqueues list without grabbing wq_pool_mutex. * This is used to dump all workqueues from sysrq.
*/ struct rcu_head rcu;
/* hot fields used during command issue, aligned to cacheline */ unsignedint flags ____cacheline_aligned; /* WQ: WQ_* flags */ struct pool_workqueue __rcu * __percpu *cpu_pwq; /* I: per-cpu pwqs */ struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */
};
/* * Each pod type describes how CPUs should be grouped for unbound workqueues. * See the comment above workqueue_attrs->affn_scope.
*/ struct wq_pod_type { int nr_pods; /* number of pods */
cpumask_var_t *pod_cpus; /* pod -> cpus */ int *pod_node; /* pod -> node */ int *cpu_pod; /* cpu -> pod */
};
/* * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency * management to prevent them from noticeably delaying other per-cpu work items. * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. * The actual value is initialized in wq_cpu_intensive_thresh_init().
*/ staticunsignedlong wq_cpu_intensive_thresh_us = ULONG_MAX;
module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT staticunsignedint wq_cpu_intensive_warning_thresh = 4;
module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh, uint, 0644); #endif
/* see the comment above the definition of WQ_POWER_EFFICIENT */ staticbool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
staticbool wq_online; /* can kworkers be created yet? */ staticbool wq_topo_initialized __read_mostly = false;
/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ staticstruct workqueue_attrs *unbound_wq_update_pwq_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ /* wait for manager to go away */ staticstruct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
static LIST_HEAD(workqueues); /* PR: list of all workqueues */ staticbool workqueue_freezing; /* PL: have wqs started freezing? */
/* PL: mirror the cpu_online_mask excluding the CPU in the midst of hotplugging */ static cpumask_var_t wq_online_cpumask;
/* PL&A: allowable cpus for unbound wqs and work items */ static cpumask_var_t wq_unbound_cpumask;
/* PL: user requested unbound cpumask via sysfs */ static cpumask_var_t wq_requested_unbound_cpumask;
/* PL: isolated cpumask to be excluded from unbound cpumask */ static cpumask_var_t wq_isolated_cpumask;
/* for further constrain wq_unbound_cpumask by cmdline parameter*/ staticstruct cpumask wq_cmdline_cpumask __initdata;
/* CPU where unbound work was last round robin scheduled from this CPU */ static DEFINE_PER_CPU(int, wq_rr_cpu_last);
/* * Local execution of unbound work items is no longer guaranteed. The * following always forces round-robin CPU selection on unbound work items * to uncover usages which depend on it.
*/ #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU staticbool wq_debug_force_rr_cpu = true; #else staticbool wq_debug_force_rr_cpu = false; #endif
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
/* to raise softirq for the BH worker pools on other CPUs */ static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS], bh_pool_irq_works);
static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
/* PL: hash of all unbound pools keyed by pool->attrs */ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
/* I: attributes used when instantiating standard unbound pools on demand */ staticstruct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
/* I: attributes used when instantiating ordered pools on demand */ staticstruct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
/* * I: kthread_worker to release pwq's. pwq release needs to be bounced to a * process context while holding a pool lock. Bounce to a dedicated kthread * worker to avoid A-A deadlocks.
*/ staticstruct kthread_worker *pwq_release_worker __ro_after_init;
#define assert_rcu_or_pool_mutex() \
RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() && \
!lockdep_is_held(&wq_pool_mutex), \ "RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() && \
!lockdep_is_held(&wq->mutex) && \
!lockdep_is_held(&wq_pool_mutex), \ "RCU, wq->mutex or wq_pool_mutex should be held")
/** * for_each_pool - iterate through all worker_pools in the system * @pool: iteration cursor * @pi: integer used for iteration * * This must be called either with wq_pool_mutex held or RCU read * locked. If the pool needs to be used beyond the locking in effect, the * caller is responsible for guaranteeing that the pool stays online. * * The if/else clause exists only for the lockdep assertion and can be * ignored.
*/ #define for_each_pool(pool, pi) \
idr_for_each_entry(&worker_pool_idr, pool, pi) \ if (({ assert_rcu_or_pool_mutex(); false; })) { } \ else
/** * for_each_pool_worker - iterate through all workers of a worker_pool * @worker: iteration cursor * @pool: worker_pool to iterate workers of * * This must be called with wq_pool_attach_mutex. * * The if/else clause exists only for the lockdep assertion and can be * ignored.
*/ #define for_each_pool_worker(worker, pool) \
list_for_each_entry((worker), &(pool)->workers, node) \ if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ else
/** * for_each_pwq - iterate through all pool_workqueues of the specified workqueue * @pwq: iteration cursor * @wq: the target workqueue * * This must be called either with wq->mutex held or RCU read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. * * The if/else clause exists only for the lockdep assertion and can be * ignored.
*/ #define for_each_pwq(pwq, wq) \
list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
lockdep_is_held(&(wq->mutex)))
/** * worker_pool_assign_id - allocate ID and assign it to @pool * @pool: the pool pointer of interest * * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned * successfully, -errno on failure.
*/ staticint worker_pool_assign_id(struct worker_pool *pool)
{ int ret;
lockdep_assert_held(&wq_pool_mutex);
ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
GFP_KERNEL); if (ret >= 0) {
pool->id = ret; return 0;
} return ret;
}
/** * unbound_effective_cpumask - effective cpumask of an unbound workqueue * @wq: workqueue of interest * * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which * is masked with wq_unbound_cpumask to determine the effective cpumask. The * default pwq is always mapped to the pool with the current effective cpumask.
*/ staticstruct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq)
{ return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask;
}
staticunsignedint work_color_to_flags(int color)
{ return color << WORK_STRUCT_COLOR_SHIFT;
}
/* * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data * contain the pointer to the queued pwq. Once execution starts, the flag * is cleared and the high bits contain OFFQ flags and pool ID. * * set_work_pwq(), set_work_pool_and_clear_pending() and mark_work_canceling() * can be used to set the pwq, pool or clear work->data. These functions should * only be called while the work is owned - ie. while the PENDING bit is set. * * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq * corresponding to a work. Pool is available once the work has been * queued anywhere after initialization until it is sync canceled. pwq is * available only while the work item is queued.
*/ staticinlinevoid set_work_data(struct work_struct *work, unsignedlong data)
{
WARN_ON_ONCE(!work_pending(work));
atomic_long_set(&work->data, data | work_static(work));
}
staticvoid set_work_pool_and_clear_pending(struct work_struct *work, int pool_id, unsignedlong flags)
{ /* * The following wmb is paired with the implied mb in * test_and_set_bit(PENDING) and ensures all updates to @work made * here are visible to and precede any updates by the next PENDING * owner.
*/
smp_wmb();
set_work_data(work, ((unsignedlong)pool_id << WORK_OFFQ_POOL_SHIFT) |
flags); /* * The following mb guarantees that previous clear of a PENDING bit * will not be reordered with any speculative LOADS or STORES from * work->current_func, which is executed afterwards. This possible * reordering can lead to a missed execution on attempt to queue * the same @work. E.g. consider this case: * * CPU#0 CPU#1 * ---------------------------- -------------------------------- * * 1 STORE event_indicated * 2 queue_work_on() { * 3 test_and_set_bit(PENDING) * 4 } set_..._and_clear_pending() { * 5 set_work_data() # clear bit * 6 smp_mb() * 7 work->current_func() { * 8 LOAD event_indicated * } * * Without an explicit full barrier speculative LOAD on line 8 can * be executed before CPU#0 does STORE on line 1. If that happens, * CPU#0 observes the PENDING bit is still set and new execution of * a @work is not queued in a hope, that CPU#1 will eventually * finish the queued @work. Meanwhile CPU#1 does not see * event_indicated is set, because speculative LOAD was executed * before actual STORE.
*/
smp_mb();
}
staticstruct pool_workqueue *get_work_pwq(struct work_struct *work)
{ unsignedlong data = atomic_long_read(&work->data);
if (data & WORK_STRUCT_PWQ) return work_struct_pwq(data); else return NULL;
}
/** * get_work_pool - return the worker_pool a given work was associated with * @work: the work item of interest * * Pools are created and destroyed under wq_pool_mutex, and allows read * access under RCU read lock. As such, this function should be * called under wq_pool_mutex or inside of a rcu_read_lock() region. * * All fields of the returned pool are accessible as long as the above * mentioned locking is in effect. If the returned pool needs to be used * beyond the critical section, the caller is responsible for ensuring the * returned pool is and stays online. * * Return: The worker_pool @work was last associated with. %NULL if none.
*/ staticstruct worker_pool *get_work_pool(struct work_struct *work)
{ unsignedlong data = atomic_long_read(&work->data); int pool_id;
assert_rcu_or_pool_mutex();
if (data & WORK_STRUCT_PWQ) return work_struct_pwq(data)->pool;
pool_id = data >> WORK_OFFQ_POOL_SHIFT; if (pool_id == WORK_OFFQ_POOL_NONE) return NULL;
/* * Policy functions. These define the policies on how the global worker * pools are managed. Unless noted otherwise, these functions assume that * they're being called with pool->lock held.
*/
/* * Need to wake up a worker? Called from anything but currently * running workers. * * Note that, because unbound workers never contribute to nr_running, this * function will always return %true for unbound pools as long as the * worklist isn't empty.
*/ staticbool need_more_worker(struct worker_pool *pool)
{ return !list_empty(&pool->worklist) && !pool->nr_running;
}
/* Can I start working? Called from busy but !running workers. */ staticbool may_start_working(struct worker_pool *pool)
{ return pool->nr_idle;
}
/* Do I need to keep working? Called from currently running workers. */ staticbool keep_working(struct worker_pool *pool)
{ return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
}
/* Do we need a new worker? Called from manager. */ staticbool need_to_create_worker(struct worker_pool *pool)
{ return need_more_worker(pool) && !may_start_working(pool);
}
/* Do we have too many workers and should some go away? */ staticbool too_many_workers(struct worker_pool *pool)
{ bool managing = pool->flags & POOL_MANAGER_ACTIVE; int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ int nr_busy = pool->nr_workers - nr_idle;
/* * If transitioning out of NOT_RUNNING, increment nr_running. Note * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask * of multiple flags, not a single flag.
*/ if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) if (!(worker->flags & WORKER_NOT_RUNNING))
pool->nr_running++;
}
/* Return the first idle worker. Called with pool->lock held. */ staticstruct worker *first_idle_worker(struct worker_pool *pool)
{ if (unlikely(list_empty(&pool->idle_list))) return NULL;
/** * worker_leave_idle - leave idle state * @worker: worker which is leaving idle state * * @worker is leaving idle state. Update stats. * * LOCKING: * raw_spin_lock_irq(pool->lock).
*/ staticvoid worker_leave_idle(struct worker *worker)
{ struct worker_pool *pool = worker->pool;
if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
list_del_init(&worker->entry);
}
/** * find_worker_executing_work - find worker which is executing a work * @pool: pool of interest * @work: work to find worker for * * Find a worker which is executing @work on @pool by searching * @pool->busy_hash which is keyed by the address of @work. For a worker * to match, its current execution should match the address of @work and * its work function. This is to avoid unwanted dependency between * unrelated work executions through a work item being recycled while still * being executed. * * This is a bit tricky. A work item may be freed once its execution * starts and nothing prevents the freed area from being recycled for * another work item. If the same work item address ends up being reused * before the original execution finishes, workqueue will identify the * recycled work item as currently executing and make it wait until the * current execution finishes, introducing an unwanted dependency. * * This function checks the work item address and work function to avoid * false positives. Note that this isn't complete as one may construct a * work function which can introduce dependency onto itself through a * recycled work item. Well, if somebody wants to shoot oneself in the * foot that badly, there's only so much we can do, and if such deadlock * actually occurs, it should be easy to locate the culprit work function. * * CONTEXT: * raw_spin_lock_irq(pool->lock). * * Return: * Pointer to worker which is executing @work if found, %NULL * otherwise.
*/ staticstruct worker *find_worker_executing_work(struct worker_pool *pool, struct work_struct *work)
{ struct worker *worker;
hash_for_each_possible(pool->busy_hash, worker, hentry,
(unsignedlong)work) if (worker->current_work == work &&
worker->current_func == work->func) return worker;
return NULL;
}
/** * move_linked_works - move linked works to a list * @work: start of series of works to be scheduled * @head: target list to append @work to * @nextp: out parameter for nested worklist walking * * Schedule linked works starting from @work to @head. Work series to be * scheduled starts at @work and includes any consecutive work with * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on * @nextp. * * CONTEXT: * raw_spin_lock_irq(pool->lock).
*/ staticvoid move_linked_works(struct work_struct *work, struct list_head *head, struct work_struct **nextp)
{ struct work_struct *n;
/* * Linked worklist will always end before the end of the list, * use NULL for list head.
*/
list_for_each_entry_safe_from(work, n, NULL, entry) {
list_move_tail(&work->entry, head); if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) break;
}
/* * If we're already inside safe list traversal and have moved * multiple works to the scheduled queue, the next position * needs to be updated.
*/ if (nextp)
*nextp = n;
}
/** * assign_work - assign a work item and its linked work items to a worker * @work: work to assign * @worker: worker to assign to * @nextp: out parameter for nested worklist walking * * Assign @work and its linked work items to @worker. If @work is already being * executed by another worker in the same pool, it'll be punted there. * * If @nextp is not NULL, it's updated to point to the next work of the last * scheduled work. This allows assign_work() to be nested inside * list_for_each_entry_safe(). * * Returns %true if @work was successfully assigned to @worker. %false if @work * was punted to another worker already executing it.
*/ staticbool assign_work(struct work_struct *work, struct worker *worker, struct work_struct **nextp)
{ struct worker_pool *pool = worker->pool; struct worker *collision;
lockdep_assert_held(&pool->lock);
/* * A single work shouldn't be executed concurrently by multiple workers. * __queue_work() ensures that @work doesn't jump to a different pool * while still running in the previous pool. Here, we should ensure that * @work is not executed concurrently by multiple workers from the same * pool. Check whether anyone is already processing the work. If so, * defer the work to the currently executing one.
*/
collision = find_worker_executing_work(pool, work); if (unlikely(collision)) {
move_linked_works(work, &collision->scheduled, nextp); returnfalse;
}
staticvoid kick_bh_pool(struct worker_pool *pool)
{ #ifdef CONFIG_SMP /* see drain_dead_softirq_workfn() for BH_DRAINING */ if (unlikely(pool->cpu != smp_processor_id() &&
!(pool->flags & POOL_BH_DRAINING))) {
irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); return;
} #endif if (pool->attrs->nice == HIGHPRI_NICE_LEVEL)
raise_softirq_irqoff(HI_SOFTIRQ); else
raise_softirq_irqoff(TASKLET_SOFTIRQ);
}
/** * kick_pool - wake up an idle worker if necessary * @pool: pool to kick * * @pool may have pending work items. Wake up worker if necessary. Returns * whether a worker was woken up.
*/ staticbool kick_pool(struct worker_pool *pool)
{ struct worker *worker = first_idle_worker(pool); struct task_struct *p;
lockdep_assert_held(&pool->lock);
if (!need_more_worker(pool) || !worker) returnfalse;
if (pool->flags & POOL_BH) {
kick_bh_pool(pool); returntrue;
}
p = worker->task;
#ifdef CONFIG_SMP /* * Idle @worker is about to execute @work and waking up provides an * opportunity to migrate @worker at a lower cost by setting the task's * wake_cpu field. Let's see if we want to move @worker to improve * execution locality. * * We're waking the worker that went idle the latest and there's some * chance that @worker is marked idle but hasn't gone off CPU yet. If * so, setting the wake_cpu won't do anything. As this is a best-effort * optimization and the race window is narrow, let's leave as-is for * now. If this becomes pronounced, we can skip over workers which are * still on cpu when picking an idle worker. * * If @pool has non-strict affinity, @worker might have ended up outside * its affinity scope. Repatriate.
*/ if (!pool->attrs->affn_strict &&
!cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { struct work_struct *work = list_first_entry(&pool->worklist, struct work_struct, entry); int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
cpu_online_mask); if (wake_cpu < nr_cpu_ids) {
p->wake_cpu = wake_cpu;
get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
}
} #endif
wake_up_process(p); returntrue;
}
#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
/* * Concurrency-managed per-cpu work items that hog CPU for longer than * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism, * which prevents them from stalling other concurrency-managed work items. If a * work function keeps triggering this mechanism, it's likely that the work item * should be using an unbound workqueue instead. * * wq_cpu_intensive_report() tracks work functions which trigger such conditions * and report them so that they can be examined and converted to use unbound * workqueues as appropriate. To avoid flooding the console, each violating work * function is tracked and reported with exponential backoff.
*/ #define WCI_MAX_ENTS 128
restart:
ent = wci_find_ent(func); if (ent) {
u64 cnt;
/* * Start reporting from the warning_thresh and back off * exponentially.
*/
cnt = atomic64_inc_return_relaxed(&ent->cnt); if (wq_cpu_intensive_warning_thresh &&
cnt >= wq_cpu_intensive_warning_thresh &&
is_power_of_2(cnt + 1 - wq_cpu_intensive_warning_thresh))
printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
ent->func, wq_cpu_intensive_thresh_us,
atomic64_read(&ent->cnt)); return;
}
/* * @func is a new violation. Allocate a new entry for it. If wcn_ents[] * is exhausted, something went really wrong and we probably made enough * noise already.
*/ if (wci_nr_ents >= WCI_MAX_ENTS) return;
raw_spin_lock(&wci_lock);
if (wci_nr_ents >= WCI_MAX_ENTS) {
raw_spin_unlock(&wci_lock); return;
}
if (wci_find_ent(func)) {
raw_spin_unlock(&wci_lock); goto restart;
}
/** * wq_worker_running - a worker is running again * @task: task waking up * * This function is called when a worker returns from schedule()
*/ void wq_worker_running(struct task_struct *task)
{ struct worker *worker = kthread_data(task);
if (!READ_ONCE(worker->sleeping)) return;
/* * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check * and the nr_running increment below, we may ruin the nr_running reset * and leave with an unexpected pool->nr_running == 1 on the newly unbound * pool. Protect against such race.
*/
preempt_disable(); if (!(worker->flags & WORKER_NOT_RUNNING))
worker->pool->nr_running++;
preempt_enable();
/* * CPU intensive auto-detection cares about how long a work item hogged * CPU without sleeping. Reset the starting timestamp on wakeup.
*/
worker->current_at = worker->task->se.sum_exec_runtime;
WRITE_ONCE(worker->sleeping, 0);
}
/** * wq_worker_sleeping - a worker is going to sleep * @task: task going to sleep * * This function is called from schedule() when a busy worker is * going to sleep.
*/ void wq_worker_sleeping(struct task_struct *task)
{ struct worker *worker = kthread_data(task); struct worker_pool *pool;
/* * Rescuers, which may not have all the fields set up like normal * workers, also reach here, let's not access anything before * checking NOT_RUNNING.
*/ if (worker->flags & WORKER_NOT_RUNNING) return;
pool = worker->pool;
/* Return if preempted before wq_worker_running() was reached */ if (READ_ONCE(worker->sleeping)) return;
/* * Recheck in case unbind_workers() preempted us. We don't * want to decrement nr_running after the worker is unbound * and nr_running has been reset.
*/ if (worker->flags & WORKER_NOT_RUNNING) {
raw_spin_unlock_irq(&pool->lock); return;
}
pool->nr_running--; if (kick_pool(pool))
worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
raw_spin_unlock_irq(&pool->lock);
}
/** * wq_worker_tick - a scheduler tick occurred while a kworker is running * @task: task currently running * * Called from sched_tick(). We're in the IRQ context and the current * worker's fields which follow the 'K' locking rule can be accessed safely.
*/ void wq_worker_tick(struct task_struct *task)
{ struct worker *worker = kthread_data(task); struct pool_workqueue *pwq = worker->current_pwq; struct worker_pool *pool = worker->pool;
if (!pwq) return;
pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
if (!wq_cpu_intensive_thresh_us) return;
/* * If the current worker is concurrency managed and hogged the CPU for * longer than wq_cpu_intensive_thresh_us, it's automatically marked * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. * * Set @worker->sleeping means that @worker is in the process of * switching out voluntarily and won't be contributing to * @pool->nr_running until it wakes up. As wq_worker_sleeping() also * decrements ->nr_running, setting CPU_INTENSIVE here can lead to * double decrements. The task is releasing the CPU anyway. Let's skip. * We probably want to make this prettier in the future.
*/ if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
worker->task->se.sum_exec_runtime - worker->current_at <
wq_cpu_intensive_thresh_us * NSEC_PER_USEC) return;
if (kick_pool(pool))
pwq->stats[PWQ_STAT_CM_WAKEUP]++;
raw_spin_unlock(&pool->lock);
}
/** * wq_worker_last_func - retrieve worker's last work function * @task: Task to retrieve last work function of. * * Determine the last function a worker executed. This is called from * the scheduler to get a worker's last known identity. * * CONTEXT: * raw_spin_lock_irq(rq->lock) * * This function is called during schedule() when a kworker is going * to sleep. It's used by psi to identify aggregation workers during * dequeuing, to allow periodic aggregation to shut-off when that * worker is the last task in the system or cgroup to go to sleep. * * As this function doesn't involve any workqueue-related locking, it * only returns stable values when called from inside the scheduler's * queuing and dequeuing paths, when @task, which must be a kworker, * is guaranteed to not be processing any works. * * Return: * The last work function %current executed as a worker, NULL if it * hasn't executed any work yet.
*/
work_func_t wq_worker_last_func(struct task_struct *task)
{ struct worker *worker = kthread_data(task);
return worker->last_func;
}
/** * wq_node_nr_active - Determine wq_node_nr_active to use * @wq: workqueue of interest * @node: NUMA node, can be %NUMA_NO_NODE * * Determine wq_node_nr_active to use for @wq on @node. Returns: * * - %NULL for per-cpu workqueues as they don't need to use shared nr_active. * * - node_nr_active[nr_node_ids] if @node is %NUMA_NO_NODE. * * - Otherwise, node_nr_active[@node].
*/ staticstruct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq, int node)
{ if (!(wq->flags & WQ_UNBOUND)) return NULL;
if (node == NUMA_NO_NODE)
node = nr_node_ids;
return wq->node_nr_active[node];
}
/** * wq_update_node_max_active - Update per-node max_actives to use * @wq: workqueue to update * @off_cpu: CPU that's going down, -1 if a CPU is not going down * * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is * distributed among nodes according to the proportions of numbers of online * cpus. The result is always between @wq->min_active and max_active.
*/ staticvoid wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
{ struct cpumask *effective = unbound_effective_cpumask(wq); int min_active = READ_ONCE(wq->min_active); int max_active = READ_ONCE(wq->max_active); int total_cpus, node;
lockdep_assert_held(&wq->mutex);
if (!wq_topo_initialized) return;
if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective))
off_cpu = -1;
total_cpus = cpumask_weight_and(effective, cpu_online_mask); if (off_cpu >= 0)
total_cpus--;
/* If all CPUs of the wq get offline, use the default values */ if (unlikely(!total_cpus)) {
for_each_node(node)
wq_node_nr_active(wq, node)->max = min_active;
/** * get_pwq - get an extra reference on the specified pool_workqueue * @pwq: pool_workqueue to get * * Obtain an extra reference on @pwq. The caller should guarantee that * @pwq has positive refcnt and be holding the matching pool->lock.
*/ staticvoid get_pwq(struct pool_workqueue *pwq)
{
lockdep_assert_held(&pwq->pool->lock);
WARN_ON_ONCE(pwq->refcnt <= 0);
pwq->refcnt++;
}
/** * put_pwq - put a pool_workqueue reference * @pwq: pool_workqueue to put * * Drop a reference of @pwq. If its refcnt reaches zero, schedule its * destruction. The caller should be holding the matching pool->lock.
*/ staticvoid put_pwq(struct pool_workqueue *pwq)
{
lockdep_assert_held(&pwq->pool->lock); if (likely(--pwq->refcnt)) return; /* * @pwq can't be released under pool->lock, bounce to a dedicated * kthread_worker to avoid A-A deadlocks.
*/
kthread_queue_work(pwq_release_worker, &pwq->release_work);
}
/** * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock * @pwq: pool_workqueue to put (can be %NULL) * * put_pwq() with locking. This function also allows %NULL @pwq.
*/ staticvoid put_pwq_unlocked(struct pool_workqueue *pwq)
{ if (pwq) { /* * As both pwqs and pools are RCU protected, the * following lock operations are safe.
*/
raw_spin_lock_irq(&pwq->pool->lock);
put_pwq(pwq);
raw_spin_unlock_irq(&pwq->pool->lock);
}
}
staticbool tryinc_node_nr_active(struct wq_node_nr_active *nna)
{ int max = READ_ONCE(nna->max); int old = atomic_read(&nna->nr);
do { if (old >= max) returnfalse;
} while (!atomic_try_cmpxchg_relaxed(&nna->nr, &old, old + 1));
returntrue;
}
/** * pwq_tryinc_nr_active - Try to increment nr_active for a pwq * @pwq: pool_workqueue of interest * @fill: max_active may have increased, try to increase concurrency level * * Try to increment nr_active for @pwq. Returns %true if an nr_active count is * successfully obtained. %false otherwise.
*/ staticbool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill)
{ struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node); bool obtained = false;
lockdep_assert_held(&pool->lock);
if (!nna) { /* BH or per-cpu workqueue, pwq->nr_active is sufficient */
obtained = pwq->nr_active < READ_ONCE(wq->max_active); goto out;
}
if (unlikely(pwq->plugged)) returnfalse;
/* * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is * already waiting on $nna, pwq_dec_nr_active() will maintain the * concurrency level. Don't jump the line. * * We need to ignore the pending test after max_active has increased as * pwq_dec_nr_active() can only maintain the concurrency level but not * increase it. This is indicated by @fill.
*/ if (!list_empty(&pwq->pending_node) && likely(!fill)) goto out;
obtained = tryinc_node_nr_active(nna); if (obtained) goto out;
/* * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs * and try again. The smp_mb() is paired with the implied memory barrier * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either * we see the decremented $nna->nr or they see non-empty * $nna->pending_pwqs.
*/
raw_spin_lock(&nna->lock);
if (list_empty(&pwq->pending_node))
list_add_tail(&pwq->pending_node, &nna->pending_pwqs); elseif (likely(!fill)) goto out_unlock;
smp_mb();
obtained = tryinc_node_nr_active(nna);
/* * If @fill, @pwq might have already been pending. Being spuriously * pending in cold paths doesn't affect anything. Let's leave it be.
*/ if (obtained && likely(!fill))
list_del_init(&pwq->pending_node);
out_unlock:
raw_spin_unlock(&nna->lock);
out: if (obtained)
pwq->nr_active++; return obtained;
}
/** * pwq_activate_first_inactive - Activate the first inactive work item on a pwq * @pwq: pool_workqueue of interest * @fill: max_active may have increased, try to increase concurrency level * * Activate the first inactive work item of @pwq if available and allowed by * max_active limit. * * Returns %true if an inactive work item has been activated. %false if no * inactive work item is found or max_active limit is reached.
*/ staticbool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill)
{ struct work_struct *work =
list_first_entry_or_null(&pwq->inactive_works, struct work_struct, entry);
/** * unplug_oldest_pwq - unplug the oldest pool_workqueue * @wq: workqueue_struct where its oldest pwq is to be unplugged * * This function should only be called for ordered workqueues where only the * oldest pwq is unplugged, the others are plugged to suspend execution to * ensure proper work item ordering:: * * dfl_pwq --------------+ [P] - plugged * | * v * pwqs -> A -> B [P] -> C [P] (newest) * | | | * 1 3 5 * | | | * 2 4 6 * * When the oldest pwq is drained and removed, this function should be called * to unplug the next oldest one to start its work item execution. Note that * pwq's are linked into wq->pwqs with the oldest first, so the first one in * the list is the oldest.
*/ staticvoid unplug_oldest_pwq(struct workqueue_struct *wq)
{ struct pool_workqueue *pwq;
lockdep_assert_held(&wq->mutex);
/* Caller should make sure that pwqs isn't empty before calling */
pwq = list_first_entry_or_null(&wq->pwqs, struct pool_workqueue,
pwqs_node);
raw_spin_lock_irq(&pwq->pool->lock); if (pwq->plugged) {
pwq->plugged = false; if (pwq_activate_first_inactive(pwq, true))
kick_pool(pwq->pool);
}
raw_spin_unlock_irq(&pwq->pool->lock);
}
/** * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active * @nna: wq_node_nr_active to activate a pending pwq for * @caller_pool: worker_pool the caller is locking * * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked. * @caller_pool may be unlocked and relocked to lock other worker_pools.
*/ staticvoid node_activate_pending_pwq(struct wq_node_nr_active *nna, struct worker_pool *caller_pool)
{ struct worker_pool *locked_pool = caller_pool; struct pool_workqueue *pwq; struct work_struct *work;
/* * If @pwq is for a different pool than @locked_pool, we need to lock * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock * / lock dance. For that, we also need to release @nna->lock as it's * nested inside pool locks.
*/ if (pwq->pool != locked_pool) {
raw_spin_unlock(&locked_pool->lock);
locked_pool = pwq->pool; if (!raw_spin_trylock(&locked_pool->lock)) {
raw_spin_unlock(&nna->lock);
raw_spin_lock(&locked_pool->lock);
raw_spin_lock(&nna->lock); goto retry;
}
}
/* * $pwq may not have any inactive work items due to e.g. cancellations. * Drop it from pending_pwqs and see if there's another one.
*/
work = list_first_entry_or_null(&pwq->inactive_works, struct work_struct, entry); if (!work) {
list_del_init(&pwq->pending_node); goto retry;
}
/* * Acquire an nr_active count and activate the inactive work item. If * $pwq still has inactive work items, rotate it to the end of the * pending_pwqs so that we round-robin through them. This means that * inactive work items are not activated in queueing order which is fine * given that there has never been any ordering across different pwqs.
*/ if (likely(tryinc_node_nr_active(nna))) {
pwq->nr_active++;
__pwq_activate_work(pwq, work);
if (list_empty(&pwq->inactive_works))
list_del_init(&pwq->pending_node); else
list_move_tail(&pwq->pending_node, &nna->pending_pwqs);
/* if activating a foreign pool, make sure it's running */ if (pwq->pool != caller_pool)
kick_pool(pwq->pool);
}
/** * pwq_dec_nr_active - Retire an active count * @pwq: pool_workqueue of interest * * Decrement @pwq's nr_active and try to activate the first inactive work item. * For unbound workqueues, this function may temporarily drop @pwq->pool->lock.
*/ staticvoid pwq_dec_nr_active(struct pool_workqueue *pwq)
{ struct worker_pool *pool = pwq->pool; struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node);
lockdep_assert_held(&pool->lock);
/* * @pwq->nr_active should be decremented for both percpu and unbound * workqueues.
*/
pwq->nr_active--;
/* * For a percpu workqueue, it's simple. Just need to kick the first * inactive work item on @pwq itself.
*/ if (!nna) {
pwq_activate_first_inactive(pwq, false); return;
}
/* * If @pwq is for an unbound workqueue, it's more complicated because * multiple pwqs and pools may be sharing the nr_active count. When a * pwq needs to wait for an nr_active count, it puts itself on * $nna->pending_pwqs. The following atomic_dec_return()'s implied * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to * guarantee that either we see non-empty pending_pwqs or they see * decremented $nna->nr. * * $nna->max may change as CPUs come online/offline and @pwq->wq's * max_active gets updated. However, it is guaranteed to be equal to or * larger than @pwq->wq->min_active which is above zero unless freezing. * This maintains the forward progress guarantee.
*/ if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max)) return;
if (!list_empty(&nna->pending_pwqs))
node_activate_pending_pwq(nna, pool);
}
/** * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight * @pwq: pwq of interest * @work_data: work_data of work which left the queue * * A work either has completed or is removed from pending queue, * decrement nr_in_flight of its pwq and handle workqueue flushing. * * NOTE: * For unbound workqueues, this function may temporarily drop @pwq->pool->lock * and thus should be called after all other state updates for the in-flight * work item is complete. * * CONTEXT: * raw_spin_lock_irq(pool->lock).
*/ staticvoid pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsignedlong work_data)
{ int color = get_work_color(work_data);
if (!(work_data & WORK_STRUCT_INACTIVE))
pwq_dec_nr_active(pwq);
pwq->nr_in_flight[color]--;
/* is flush in progress and are we at the flushing tip? */ if (likely(pwq->flush_color != color)) goto out_put;
/* are there still in-flight works? */ if (pwq->nr_in_flight[color]) goto out_put;
/* this pwq is done, clear flush_color */
pwq->flush_color = -1;
/* * If this was the last pwq, wake up the first flusher. It * will handle the rest.
*/ if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
complete(&pwq->wq->first_flusher->done);
out_put:
put_pwq(pwq);
}
/** * try_to_grab_pending - steal work item from worklist and disable irq * @work: work item to steal * @cflags: %WORK_CANCEL_ flags * @irq_flags: place to store irq state * * Try to grab PENDING bit of @work. This function can handle @work in any * stable state - idle, on timer or on worklist. * * Return: * * ======== ================================================================ * 1 if @work was pending and we successfully stole PENDING * 0 if @work was idle and we claimed PENDING * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry * ======== ================================================================ * * Note: * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting * interrupted while holding PENDING and @work off queue, irq must be * disabled on entry. This, combined with delayed_work->timer being * irqsafe, ensures that we return -EAGAIN for finite short period of time. * * On successful return, >= 0, irq is disabled and the caller is * responsible for releasing it using local_irq_restore(*@irq_flags). * * This function is safe to call from any context including IRQ handler.
*/ staticint try_to_grab_pending(struct work_struct *work, u32 cflags, unsignedlong *irq_flags)
{ struct worker_pool *pool; struct pool_workqueue *pwq;
local_irq_save(*irq_flags);
/* try to steal the timer if it exists */ if (cflags & WORK_CANCEL_DELAYED) { struct delayed_work *dwork = to_delayed_work(work);
/* * dwork->timer is irqsafe. If timer_delete() fails, it's * guaranteed that the timer is not queued anywhere and not * running on the local CPU.
*/ if (likely(timer_delete(&dwork->timer))) return 1;
}
/* try to claim PENDING the normal way */ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) return 0;
rcu_read_lock(); /* * The queueing is in progress, or it is already queued. Try to
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.58 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.