/* * Calculate the expected normal priority: i.e. priority * without taking RT-inheritance into account. Might be * boosted by interactivity modifiers. Changes upon fork, * setprio syscalls, and whenever the interactivity * estimator recalculates.
*/ staticinlineint normal_prio(struct task_struct *p)
{ return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
}
/* * Calculate the current priority, i.e. the priority * taken into account by the scheduler. This value might * be boosted by RT tasks, or might be boosted by * interactivity modifiers. Will be RT if the task got * RT-boosted. If not then it returns p->normal_prio.
*/ staticint effective_prio(struct task_struct *p)
{
p->normal_prio = normal_prio(p); /* * If we are RT tasks or we were boosted to RT priority, * keep the priority unchanged. Otherwise, update priority * to the normal priority:
*/ if (!rt_or_dl_prio(p->prio)) return p->normal_prio; return p->prio;
}
void set_user_nice(struct task_struct *p, long nice)
{ bool queued, running; struct rq *rq; int old_prio;
if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) return; /* * We have to be careful, if called from sys_setpriority(), * the task might be in the middle of scheduling on another CPU.
*/ CLASS(task_rq_lock, rq_guard)(p);
rq = rq_guard.rq;
update_rq_clock(rq);
/* * The RT priorities are set via sched_setscheduler(), but we still * allow the 'normal' nice value to be set - but as expected * it won't have any effect on scheduling until the task is * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
*/ if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice); return;
}
queued = task_on_rq_queued(p);
running = task_current_donor(rq, p); if (queued)
dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); if (running)
put_prev_task(rq, p);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); if (running)
set_next_task(rq, p);
/* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU:
*/
p->sched_class->prio_changed(rq, p, old_prio);
}
EXPORT_SYMBOL(set_user_nice);
/* * is_nice_reduction - check if nice value is an actual reduction * * Similar to can_nice() but does not perform a capability check. * * @p: task * @nice: nice value
*/ staticbool is_nice_reduction(conststruct task_struct *p, constint nice)
{ /* Convert nice value [19,-20] to rlimit style value [1,40]: */ int nice_rlim = nice_to_rlimit(nice);
/* * can_nice - check if a task can reduce its nice value * @p: task * @nice: nice value
*/ int can_nice(conststruct task_struct *p, constint nice)
{ return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
}
#ifdef __ARCH_WANT_SYS_NICE
/* * sys_nice - change the priority of the current process. * @increment: priority increment * * sys_setpriority is a more generic, but much slower function that * does similar things.
*/
SYSCALL_DEFINE1(nice, int, increment)
{ long nice, retval;
/* * Setpriority might change our priority at the same moment. * We don't have to worry. Conceptually one call occurs first * and we have a single winner.
*/
increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
nice = task_nice(current) + increment;
retval = security_task_setnice(current, nice); if (retval) return retval;
set_user_nice(current, nice); return 0;
}
#endif/* __ARCH_WANT_SYS_NICE */
/** * task_prio - return the priority value of a given task. * @p: the task in question. * * Return: The priority value as seen by users in /proc. * * sched policy return value kernel prio user prio/nice * * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] * deadline -101 -1 0
*/ int task_prio(conststruct task_struct *p)
{ return p->prio - MAX_RT_PRIO;
}
/** * idle_cpu - is a given CPU idle currently? * @cpu: the processor in question. * * Return: 1 if the CPU is currently idle. 0 otherwise.
*/ int idle_cpu(int cpu)
{ struct rq *rq = cpu_rq(cpu);
if (rq->curr != rq->idle) return 0;
if (rq->nr_running) return 0;
if (rq->ttwu_pending) return 0;
return 1;
}
/** * available_idle_cpu - is a given CPU idle for enqueuing work. * @cpu: the CPU in question. * * Return: 1 if the CPU is currently idle. 0 otherwise.
*/ int available_idle_cpu(int cpu)
{ if (!idle_cpu(cpu)) return 0;
if (vcpu_is_preempted(cpu)) return 0;
return 1;
}
/** * idle_task - return the idle task for a given CPU. * @cpu: the processor in question. * * Return: The idle task for the CPU @cpu.
*/ struct task_struct *idle_task(int cpu)
{ return cpu_rq(cpu)->idle;
}
/** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. * * The task of @pid, if found. %NULL otherwise.
*/ staticstruct task_struct *find_process_by_pid(pid_t pid)
{ return pid ? find_task_by_vpid(pid) : current;
}
if (policy == SETPARAM_POLICY)
policy = p->policy;
p->policy = policy;
if (dl_policy(policy))
__setparam_dl(p, attr); elseif (fair_policy(policy))
__setparam_fair(p, attr);
/* rt-policy tasks do not have a timerslack */ if (rt_or_dl_task_policy(p)) {
p->timer_slack_ns = 0;
} elseif (p->timer_slack_ns == 0) { /* when switching back to non-rt policy, restore timerslack */
p->timer_slack_ns = p->default_timer_slack_ns;
}
/* * __sched_setscheduler() ensures attr->sched_priority == 0 when * !rt_policy. Always setting this ensures that things like * getparam()/getattr() don't report silly values for !rt tasks.
*/
p->rt_priority = attr->sched_priority;
p->normal_prio = normal_prio(p);
set_load_weight(p, true);
}
/* * Check the target process has a UID that matches the current process's:
*/ staticbool check_same_owner(struct task_struct *p)
{ conststruct cred *cred = current_cred(), *pcred;
guard(rcu)();
/* * We have valid uclamp attributes; make sure uclamp is enabled. * * We need to do that here, because enabling static branches is a * blocking operation which obviously cannot be done while holding * scheduler locks.
*/
sched_uclamp_enable();
return 0;
}
staticbool uclamp_reset(conststruct sched_attr *attr, enum uclamp_id clamp_id, struct uclamp_se *uc_se)
{ /* Reset on sched class change for a non user-defined clamp value. */ if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
!uc_se->user_defined) returntrue;
if (!uclamp_reset(attr, clamp_id, uc_se)) continue;
/* * RT by default have a 100% boost value that could be modified * at runtime.
*/ if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
value = sysctl_sched_uclamp_util_min_rt_default; else
value = uclamp_none(clamp_id);
uclamp_se_set(uc_se, value, false);
}
if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) return;
/* * Allow unprivileged RT tasks to decrease priority. * Only issue a capable test if needed and only once to avoid an audit * event on permitted non-privileged operations:
*/ staticint user_check_sched_setscheduler(struct task_struct *p, conststruct sched_attr *attr, int policy, int reset_on_fork)
{ if (fair_policy(policy)) { if (attr->sched_nice < task_nice(p) &&
!is_nice_reduction(p, attr->sched_nice)) goto req_priv;
}
if (rt_policy(policy)) { unsignedlong rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
/* Can't set/change the rt policy: */ if (policy != p->policy && !rlim_rtprio) goto req_priv;
/* * Can't set/change SCHED_DEADLINE policy at all for now * (safest behavior); in the future we would like to allow * unprivileged DL tasks to increase their relative deadline * or reduce their runtime (both ways reducing utilization)
*/ if (dl_policy(policy)) goto req_priv;
/* * Treat SCHED_IDLE as nice 20. Only allow a switch to * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
*/ if (task_has_idle_policy(p) && !idle_policy(policy)) { if (!is_nice_reduction(p, task_nice(p))) goto req_priv;
}
/* Can't change other user's priorities: */ if (!check_same_owner(p)) goto req_priv;
/* Normal users shall not reset the sched_reset_on_fork flag: */ if (p->sched_reset_on_fork && !reset_on_fork) goto req_priv;
return 0;
req_priv: if (!capable(CAP_SYS_NICE)) return -EPERM;
if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) return -EINVAL;
/* * Valid priorities for SCHED_FIFO and SCHED_RR are * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, * SCHED_BATCH and SCHED_IDLE is 0.
*/ if (attr->sched_priority > MAX_RT_PRIO-1) return -EINVAL; if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
(rt_policy(policy) != (attr->sched_priority != 0))) return -EINVAL;
if (user) {
retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); if (retval) return retval;
if (attr->sched_flags & SCHED_FLAG_SUGOV) return -EINVAL;
retval = security_task_setscheduler(p); if (retval) return retval;
}
/* Update task specific "requested" clamps */ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
retval = uclamp_validate(p, attr); if (retval) return retval;
}
/* * Make sure no PI-waiters arrive (or leave) while we are * changing the priority of the task: * * To be able to change p->policy safely, the appropriate * runqueue lock must be held.
*/
rq = task_rq_lock(p, &rf);
update_rq_clock(rq);
/* * Changing the policy of the stop threads its a very bad idea:
*/ if (p == rq->stop) {
retval = -EINVAL; goto unlock;
}
retval = scx_check_setscheduler(p, policy); if (retval) goto unlock;
/* * If not changing anything there's no need to proceed further, * but store a possible modification of reset_on_fork.
*/ if (unlikely(policy == p->policy)) { if (fair_policy(policy) &&
(attr->sched_nice != task_nice(p) ||
(attr->sched_runtime != p->se.slice))) goto change; if (rt_policy(policy) && attr->sched_priority != p->rt_priority) goto change; if (dl_policy(policy) && dl_param_changed(p, attr)) goto change; if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) goto change;
if (user) { #ifdef CONFIG_RT_GROUP_SCHED /* * Do not allow real-time tasks into groups that have no runtime * assigned.
*/ if (rt_group_sched_enabled() &&
rt_bandwidth_enabled() && rt_policy(policy) &&
task_group(p)->rt_bandwidth.rt_runtime == 0 &&
!task_group_is_autogroup(task_group(p))) {
retval = -EPERM; goto unlock;
} #endif/* CONFIG_RT_GROUP_SCHED */ if (dl_bandwidth_enabled() && dl_policy(policy) &&
!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
cpumask_t *span = rq->rd->span;
/* * Don't allow tasks with an affinity mask smaller than * the entire root_domain to become SCHED_DEADLINE. We * will also fail if there's no bandwidth available.
*/ if (!cpumask_subset(span, p->cpus_ptr) ||
rq->rd->dl_bw.bw == 0) {
retval = -EPERM; goto unlock;
}
}
}
/* Re-check policy now with rq lock held: */ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
task_rq_unlock(rq, p, &rf); if (cpuset_locked)
cpuset_unlock(); goto recheck;
}
/* * If setscheduling to SCHED_DEADLINE (or changing the parameters * of a SCHED_DEADLINE task) we need to check if enough bandwidth * is available.
*/ if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
retval = -EBUSY; goto unlock;
}
newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); if (pi) { /* * Take priority boosted tasks into account. If the new * effective priority is unchanged, we just store the new * normal parameters and do not touch the scheduler class and * the runqueue. This will be done when the task deboost * itself.
*/
newprio = rt_effective_prio(p, newprio); if (newprio == oldprio)
queue_flags &= ~DEQUEUE_MOVE;
}
if (queued) { /* * We enqueue to tail when the priority of a task is * increased (user space view).
*/ if (oldprio < p->prio)
queue_flags |= ENQUEUE_HEAD;
enqueue_task(rq, p, queue_flags);
} if (running)
set_next_task(rq, p);
check_class_changed(rq, p, prev_class, oldprio);
/* Avoid rq from going away on us: */
preempt_disable();
head = splice_balance_callbacks(rq);
task_rq_unlock(rq, p, &rf);
if (pi) { if (cpuset_locked)
cpuset_unlock();
rt_mutex_adjust_pi(p);
}
/* Run balance callbacks after we've adjusted the PI chain: */
balance_callbacks(rq, head);
preempt_enable();
return 0;
unlock:
task_rq_unlock(rq, p, &rf); if (cpuset_locked)
cpuset_unlock(); return retval;
}
return __sched_setscheduler(p, &attr, check, true);
} /** * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. * @p: the task in question. * @policy: new policy. * @param: structure containing the new RT priority. * * Use sched_set_fifo(), read its comment. * * Return: 0 on success. An error code otherwise. * * NOTE that the task may be already dead.
*/ int sched_setscheduler(struct task_struct *p, int policy, conststruct sched_param *param)
{ return _sched_setscheduler(p, policy, param, true);
}
/** * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space. * @p: the task in question. * @policy: new policy. * @param: structure containing the new RT priority. * * Just like sched_setscheduler, only don't bother checking if the * current context has permission. For example, this is needed in * stop_machine(): we create temporary high priority worker threads, * but our caller might not have that capability. * * Return: 0 on success. An error code otherwise.
*/ int sched_setscheduler_nocheck(struct task_struct *p, int policy, conststruct sched_param *param)
{ return _sched_setscheduler(p, policy, param, false);
}
/* * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally * incapable of resource management, which is the one thing an OS really should * be doing. * * This is of course the reason it is limited to privileged users only. * * Worse still; it is fundamentally impossible to compose static priority * workloads. You cannot take two correctly working static prio workloads * and smash them together and still expect them to work. * * For this reason 'all' FIFO tasks the kernel creates are basically at: * * MAX_RT_PRIO / 2 * * The administrator _MUST_ configure the system, the kernel simply doesn't * know enough information to make a sensible choice.
*/ void sched_set_fifo(struct task_struct *p)
{ struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
}
EXPORT_SYMBOL_GPL(sched_set_fifo);
/* * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
*/ void sched_set_fifo_low(struct task_struct *p)
{ struct sched_param sp = { .sched_priority = 1 };
WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
}
EXPORT_SYMBOL_GPL(sched_set_fifo_low);
/* Zero the full structure, so that a short copy will be nice: */
memset(attr, 0, sizeof(*attr));
ret = get_user(size, &uattr->size); if (ret) return ret;
/* ABI compatibility quirk: */ if (!size)
size = SCHED_ATTR_SIZE_VER0; if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) goto err_size;
ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); if (ret) { if (ret == -E2BIG) goto err_size; return ret;
}
if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
size < SCHED_ATTR_SIZE_VER1) return -EINVAL;
/* * XXX: Do we want to be lenient like existing syscalls; or do we want * to be strict and return an error on out-of-bounds values?
*/
attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
/** * sys_sched_setparam - set/change the RT priority of a thread * @pid: the pid in question. * @param: structure containing the new RT priority. * * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
}
/** * sys_sched_setattr - same as above, but with extended sched_attr * @pid: the pid in question. * @uattr: structure containing the extended parameters. * @flags: for future extension.
*/
SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, unsignedint, flags)
{ struct sched_attr attr; int retval;
if (unlikely(!uattr || pid < 0 || flags)) return -EINVAL;
retval = sched_copy_attr(uattr, &attr); if (retval) return retval;
if ((int)attr.sched_policy < 0) return -EINVAL; if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
attr.sched_policy = SETPARAM_POLICY;
CLASS(find_get_task, p)(pid); if (!p) return -ESRCH;
if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
get_params(p, &attr);
return sched_setattr(p, &attr);
}
/** * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. * * Return: On success, the policy of the thread. Otherwise, a negative error * code.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{ struct task_struct *p; int retval;
if (pid < 0) return -EINVAL;
guard(rcu)();
p = find_process_by_pid(pid); if (!p) return -ESRCH;
retval = security_task_getscheduler(p); if (!retval) {
retval = p->policy; if (p->sched_reset_on_fork)
retval |= SCHED_RESET_ON_FORK;
} return retval;
}
/** * sys_sched_getparam - get the RT priority of a thread * @pid: the pid in question. * @param: structure containing the RT priority. * * Return: On success, 0 and the RT priority is in @param. Otherwise, an error * code.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{ struct sched_param lp = { .sched_priority = 0 }; struct task_struct *p; int retval;
if (unlikely(!param || pid < 0)) return -EINVAL;
scoped_guard (rcu) {
p = find_process_by_pid(pid); if (!p) return -ESRCH;
retval = security_task_getscheduler(p); if (retval) return retval;
if (task_has_rt_policy(p))
lp.sched_priority = p->rt_priority;
}
/* * This one might sleep, we cannot do it with a spinlock held ...
*/ return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
}
/** * sys_sched_getattr - similar to sched_getparam, but with sched_attr * @pid: the pid in question. * @uattr: structure containing the extended parameters. * @usize: sizeof(attr) for fwd/bwd comp. * @flags: for future extension.
*/
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, unsignedint, usize, unsignedint, flags)
{ struct sched_attr kattr = { }; struct task_struct *p; int retval;
#ifdef CONFIG_UCLAMP_TASK /* * This could race with another potential updater, but this is fine * because it'll correctly read the old or the new value. We don't need * to guarantee who wins the race as long as it doesn't return garbage.
*/
kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; #endif
}
int dl_task_check_affinity(struct task_struct *p, conststruct cpumask *mask)
{ /* * If the task isn't a deadline task or admission control is * disabled then we don't care about affinity changes.
*/ if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) return 0;
/* * The special/sugov task isn't part of regular bandwidth/admission * control so let userspace change affinities.
*/ if (dl_entity_is_special(&p->dl)) return 0;
/* * Since bandwidth control happens on root_domain basis, * if admission test is enabled, we only admit -deadline * tasks allowed to run on all the CPUs in the task's * root_domain.
*/
guard(rcu)(); if (!cpumask_subset(task_rq(p)->rd->span, mask)) return -EBUSY;
return 0;
}
int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
{ int retval;
cpumask_var_t cpus_allowed, new_mask;
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) return -ENOMEM;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM; goto out_free_cpus_allowed;
}
retval = dl_task_check_affinity(p, new_mask); if (retval) goto out_free_new_mask;
retval = __set_cpus_allowed_ptr(p, ctx); if (retval) goto out_free_new_mask;
cpuset_cpus_allowed(p, cpus_allowed); if (!cpumask_subset(new_mask, cpus_allowed)) { /* * We must have raced with a concurrent cpuset update. * Just reset the cpumask to the cpuset's cpus_allowed.
*/
cpumask_copy(new_mask, cpus_allowed);
/* * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() * will restore the previous user_cpus_ptr value. * * In the unlikely event a previous user_cpus_ptr exists, * we need to further restrict the mask to what is allowed * by that old user_cpus_ptr.
*/ if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { bool empty = !cpumask_and(new_mask, new_mask,
ctx->user_mask);
/** * sys_sched_setaffinity - set the CPU affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new CPU mask * * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsignedint, len, unsignedlong __user *, user_mask_ptr)
{
cpumask_var_t new_mask; int retval;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) return -ENOMEM;
/** * sys_sched_getaffinity - get the CPU affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current CPU mask * * Return: size of CPU mask copied to user_mask_ptr on success. An * error code otherwise.
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsignedint, len, unsignedlong __user *, user_mask_ptr)
{ int ret;
cpumask_var_t mask;
if ((len * BITS_PER_BYTE) < nr_cpu_ids) return -EINVAL; if (len & (sizeof(unsignedlong)-1)) return -EINVAL;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM;
ret = sched_getaffinity(pid, mask); if (ret == 0) { unsignedint retlen = min(len, cpumask_size());
if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
ret = -EFAULT; else
ret = retlen;
}
free_cpumask_var(mask);
/** * sys_sched_yield - yield the current processor to other threads. * * This function yields the current CPU to other tasks. If there are no * other threads running on this CPU then this function will return. * * Return: 0.
*/
SYSCALL_DEFINE0(sched_yield)
{
do_sched_yield(); return 0;
}
/** * yield - yield the current processor to other threads. * * Do not ever use this function, there's a 99% chance you're doing it wrong. * * The scheduler is at all times free to pick the calling task as the most * eligible task to run, if removing the yield() call from your code breaks * it, it's already broken. * * Typical broken usage is: * * while (!event) * yield(); * * where one assumes that yield() will let 'the other' process run that will * make event true. If the current task is a SCHED_FIFO task that will never * happen. Never use yield() as a progress guarantee!! * * If you want to use yield() to wait for something, use wait_event(). * If you want to use yield() to be 'nice' for others, use cond_resched(). * If you still want to use yield(), do not!
*/ void __sched yield(void)
{
set_current_state(TASK_RUNNING);
do_sched_yield();
}
EXPORT_SYMBOL(yield);
/** * yield_to - yield the current processor to another thread in * your thread group, or accelerate that thread toward the * processor it's on. * @p: target task * @preempt: whether task preemption is allowed or not * * It's the caller's job to ensure that the target task struct * can't go away on us before we can do any checks. * * Return: * true (>0) if we indeed boosted the target task. * false (0) if we failed to boost the target. * -ESRCH if there's no task to yield to.
*/ int __sched yield_to(struct task_struct *p, bool preempt)
{ struct task_struct *curr = current; struct rq *rq, *p_rq; int yielded = 0;
again:
p_rq = task_rq(p); /* * If we're the only runnable task on the rq and target rq also * has only one task, there's absolutely no point in yielding.
*/ if (rq->nr_running == 1 && p_rq->nr_running == 1) return -ESRCH;
guard(double_rq_lock)(rq, p_rq); if (task_rq(p) != p_rq) goto again;
if (!curr->sched_class->yield_to_task) return 0;
if (curr->sched_class != p->sched_class) return 0;
if (task_on_cpu(p_rq, p) || !task_is_running(p)) return 0;
yielded = curr->sched_class->yield_to_task(rq, p); if (yielded) {
schedstat_inc(rq->yld_count); /* * Make p's CPU reschedule; pick_next_entity * takes care of fairness.
*/ if (preempt && rq != p_rq)
resched_curr(p_rq);
}
}
if (yielded)
schedule();
return yielded;
}
EXPORT_SYMBOL_GPL(yield_to);
/** * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. * * Return: On success, this syscall returns the maximum * rt_priority that can be used by a given scheduling class. * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{ int ret = -EINVAL;
switch (policy) { case SCHED_FIFO: case SCHED_RR:
ret = MAX_RT_PRIO-1; break; case SCHED_DEADLINE: case SCHED_NORMAL: case SCHED_BATCH: case SCHED_IDLE: case SCHED_EXT:
ret = 0; break;
} return ret;
}
/** * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * * Return: On success, this syscall returns the minimum * rt_priority that can be used by a given scheduling class. * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{ int ret = -EINVAL;
switch (policy) { case SCHED_FIFO: case SCHED_RR:
ret = 1; break; case SCHED_DEADLINE: case SCHED_NORMAL: case SCHED_BATCH: case SCHED_IDLE: case SCHED_EXT:
ret = 0;
} return ret;
}
/** * sys_sched_rr_get_interval - return the default time-slice of a process. * @pid: pid of the process. * @interval: userspace pointer to the time-slice value. * * this syscall writes the default time-slice value of a given process * into the user-space timespec buffer. A value of '0' means infinity. * * Return: On success, 0 and the time-slice is in @interval. Otherwise, * an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, struct __kernel_timespec __user *, interval)
{ struct timespec64 t; int retval = sched_rr_get_interval(pid, &t);
if (retval == 0)
retval = put_timespec64(&t, interval);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.