/* * Replacement functions for the sys_sched_setaffinity() and * sys_sched_getaffinity() system calls, so that we can integrate * FPU affinity with the user's requested processor affinity. * This code is 98% identical with the sys_sched_setaffinity() * and sys_sched_getaffinity() system calls, and should be * updated when kernel/sched/core.c changes.
*/
/* * find_process_by_pid - find a process with a matching PID value. * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so * cloned here.
*/ staticinlinestruct task_struct *find_process_by_pid(pid_t pid)
{ return pid ? find_task_by_vpid(pid) : current;
}
/* * check the target process has a UID that matches the current process's
*/ staticbool check_same_owner(struct task_struct *p)
{ conststruct cred *cred = current_cred(), *pcred; bool match;
/* * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsignedint len, unsignedlong __user *user_mask_ptr)
{
cpumask_var_t cpus_allowed, new_mask, effective_mask; struct thread_info *ti; struct task_struct *p; int retval;
if (len < sizeof(new_mask)) return -EINVAL;
if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) return -EFAULT;
cpus_read_lock();
rcu_read_lock();
p = find_process_by_pid(pid); if (!p) {
rcu_read_unlock();
cpus_read_unlock(); return -ESRCH;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
retval = security_task_setscheduler(p); if (retval) goto out_unlock;
/* Record new user-specified CPU set for future reference */
cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
again: /* Compute new global allowed CPU set if necessary */
ti = task_thread_info(p); if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
retval = set_cpus_allowed_ptr(p, effective_mask);
} else {
cpumask_copy(effective_mask, new_mask);
clear_ti_thread_flag(ti, TIF_FPUBOUND);
retval = set_cpus_allowed_ptr(p, new_mask);
}
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed); if (!cpumask_subset(effective_mask, cpus_allowed)) { /* * We must have raced with a concurrent cpuset * update. Just reset the cpus_allowed to the * cpuset's cpus_allowed
*/
cpumask_copy(new_mask, cpus_allowed); goto again;
}
}
out_unlock:
free_cpumask_var(effective_mask);
out_free_new_mask:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p);
cpus_read_unlock(); return retval;
}
/* * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsignedint len, unsignedlong __user *user_mask_ptr)
{ unsignedint real_len;
cpumask_t allowed, mask; int retval; struct task_struct *p;
real_len = sizeof(mask); if (len < real_len) return -EINVAL;
cpus_read_lock();
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid); if (!p) goto out_unlock;
retval = security_task_getscheduler(p); if (retval) goto out_unlock;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.