tg = sched_create_group(&root_task_group); if (IS_ERR(tg)) goto out_free;
kref_init(&ag->kref);
init_rwsem(&ag->lock);
ag->id = atomic_inc_return(&autogroup_seq_nr);
ag->tg = tg; #ifdef CONFIG_RT_GROUP_SCHED /* * Autogroup RT tasks are redirected to the root task group * so we don't have to move tasks around upon policy change, * or flail around trying to allocate bandwidth on the fly. * A bandwidth exception in __sched_setscheduler() allows * the policy change to proceed.
*/
free_rt_sched_group(tg);
tg->rt_se = root_task_group.rt_se;
tg->rt_rq = root_task_group.rt_rq; #endif/* CONFIG_RT_GROUP_SCHED */
tg->autogroup = ag;
out_free:
kfree(ag);
out_fail: if (printk_ratelimit()) {
printk(KERN_WARNING "autogroup_create: %s failure.\n",
ag ? "sched_create_group()" : "kzalloc()");
}
return autogroup_kref_get(&autogroup_default);
}
bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{ if (tg != &root_task_group) returnfalse; /* * If we race with autogroup_move_group() the caller can use the old * value of signal->autogroup but in this case sched_move_task() will * be called again before autogroup_kref_put(). * * However, there is no way sched_autogroup_exit_task() could tell us * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
*/ if (p->flags & PF_EXITING) returnfalse;
returntrue;
}
void sched_autogroup_exit_task(struct task_struct *p)
{ /* * We are going to call exit_notify() and autogroup_move_group() can't * see this thread after that: we can no longer use signal->autogroup. * See the PF_EXITING check in task_wants_autogroup().
*/
sched_move_task(p, true);
}
p->signal->autogroup = autogroup_kref_get(ag); /* * We can't avoid sched_move_task() after we changed signal->autogroup, * this process can already run with task_group() == prev->tg or we can * race with cgroup code which can read autogroup = prev under rq->lock. * In the latter case for_each_thread() can not miss a migrating thread, * cpu_cgroup_attach() must not be possible after cgroup_exit() and it * can't be removed from thread list, we hold ->siglock. * * If an exiting thread was already removed from thread list we rely on * sched_autogroup_exit_task().
*/
for_each_thread(p, t)
sched_move_task(t, true);
/* Allocates GFP_KERNEL, cannot be called under any spinlock: */ void sched_autogroup_create_attach(struct task_struct *p)
{ struct autogroup *ag = autogroup_create();
autogroup_move_group(p, ag);
/* Drop extra reference added by autogroup_create(): */
autogroup_kref_put(ag);
}
EXPORT_SYMBOL(sched_autogroup_create_attach);
/* Cannot be called under siglock. Currently has no users: */ void sched_autogroup_detach(struct task_struct *p)
{
autogroup_move_group(p, &autogroup_default);
}
EXPORT_SYMBOL(sched_autogroup_detach);
int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
{ staticunsignedlong next = INITIAL_JIFFIES; struct autogroup *ag; unsignedlong shares; int err, idx;
if (nice < MIN_NICE || nice > MAX_NICE) return -EINVAL;
err = security_task_setnice(current, nice); if (err) return err;
if (nice < 0 && !can_nice(current, nice)) return -EPERM;
/* This is a heavy operation, taking global locks.. */ if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) return -EAGAIN;
next = HZ / 10 + jiffies;
ag = autogroup_task_get(p);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.