switch (clockid) { case CLOCK_MONOTONIC:
offset = timespec64_to_ktime(ns_offsets->monotonic); break; case CLOCK_BOOTTIME: case CLOCK_BOOTTIME_ALARM:
offset = timespec64_to_ktime(ns_offsets->boottime); break; default: return tim;
}
/* * Check that @tim value is in [offset, KTIME_MAX + offset] * and subtract offset.
*/ if (tim < offset) { /* * User can specify @tim *absolute* value - if it's lesser than * the time namespace's offset - it's already expired.
*/
tim = 0;
} else {
tim = ktime_sub(tim, offset); if (unlikely(tim > KTIME_MAX))
tim = KTIME_MAX;
}
/** * clone_time_ns - Clone a time namespace * @user_ns: User namespace which owns a new namespace. * @old_ns: Namespace to clone * * Clone @old_ns and set the clone refcount to 1 * * Return: The new namespace or ERR_PTR.
*/ staticstruct time_namespace *clone_time_ns(struct user_namespace *user_ns, struct time_namespace *old_ns)
{ struct time_namespace *ns; struct ucounts *ucounts; int err;
err = -ENOSPC;
ucounts = inc_time_namespaces(user_ns); if (!ucounts) goto fail;
/** * copy_time_ns - Create timens_for_children from @old_ns * @flags: Cloning flags * @user_ns: User namespace which owns a new namespace. * @old_ns: Namespace to clone * * If CLONE_NEWTIME specified in @flags, creates a new timens_for_children; * adds a refcounter to @old_ns otherwise. * * Return: timens_for_children namespace or ERR_PTR.
*/ struct time_namespace *copy_time_ns(unsignedlong flags, struct user_namespace *user_ns, struct time_namespace *old_ns)
{ if (!(flags & CLONE_NEWTIME)) return get_time_ns(old_ns);
/* * A time namespace VVAR page has the same layout as the VVAR page which * contains the system wide VDSO data. * * For a normal task the VVAR pages are installed in the normal ordering: * VVAR * PVCLOCK * HVCLOCK * TIMENS <- Not really required * * Now for a timens task the pages are installed in the following order: * TIMENS * PVCLOCK * HVCLOCK * VVAR * * The check for vdso_clock->clock_mode is in the unlikely path of * the seq begin magic. So for the non-timens case most of the time * 'seq' is even, so the branch is not taken. * * If 'seq' is odd, i.e. a concurrent update is in progress, the extra check * for vdso_clock->clock_mode is a non-issue. The task is spin waiting for the * update to finish and for 'seq' to become even anyway. * * Timens page has vdso_clock->clock_mode set to VDSO_CLOCKMODE_TIMENS which * enforces the time namespace handling path.
*/ staticvoid timens_setup_vdso_clock_data(struct vdso_clock *vc, struct time_namespace *ns)
{ struct timens_offset *offset = vc->offset; struct timens_offset monotonic = offset_from_ts(ns->offsets.monotonic); struct timens_offset boottime = offset_from_ts(ns->offsets.boottime);
/* * VM_PFNMAP | VM_IO protect .fault() handler from being called * through interfaces like /proc/$pid/mem or * process_vm_{readv,writev}() as long as there's no .access() * in special_mapping_vmops(). * For more details check_vma_flags() and __access_remote_vm()
*/
WARN(1, "vvar_page accessed remotely");
return NULL;
}
/* * Protects possibly multiple offsets writers racing each other * and tasks entering the namespace.
*/ static DEFINE_MUTEX(offset_lock);
for (i = 0; i < CS_BASES; i++)
timens_setup_vdso_clock_data(&vc[i], ns);
if (IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS)) { for (i = 0; i < ARRAY_SIZE(vdata->aux_clock_data); i++)
timens_setup_vdso_clock_data(&vdata->aux_clock_data[i], ns);
}
int proc_timens_set_offset(struct file *file, struct task_struct *p, struct proc_timens_offset *offsets, int noffsets)
{ struct ns_common *ns; struct time_namespace *time_ns; struct timespec64 tp; int i, err;
ns = timens_for_children_get(p); if (!ns) return -ESRCH;
time_ns = to_time_ns(ns);
if (!file_ns_capable(file, time_ns->user_ns, CAP_SYS_TIME)) {
put_time_ns(time_ns); return -EPERM;
}
for (i = 0; i < noffsets; i++) { struct proc_timens_offset *off = &offsets[i];
switch (off->clockid) { case CLOCK_MONOTONIC:
ktime_get_ts64(&tp); break; case CLOCK_BOOTTIME:
ktime_get_boottime_ts64(&tp); break; default:
err = -EINVAL; goto out;
}
err = -ERANGE;
if (off->val.tv_sec > KTIME_SEC_MAX ||
off->val.tv_sec < -KTIME_SEC_MAX) goto out;
tp = timespec64_add(tp, off->val); /* * KTIME_SEC_MAX is divided by 2 to be sure that KTIME_MAX is * still unreachable.
*/ if (tp.tv_sec < 0 || tp.tv_sec > KTIME_SEC_MAX / 2) goto out;
}
mutex_lock(&offset_lock); if (time_ns->frozen_offsets) {
err = -EACCES; goto out_unlock;
}
err = 0; /* Don't report errors after this line */ for (i = 0; i < noffsets; i++) { struct proc_timens_offset *off = &offsets[i]; struct timespec64 *offset = NULL;
switch (off->clockid) { case CLOCK_MONOTONIC:
offset = &time_ns->offsets.monotonic; break; case CLOCK_BOOTTIME:
offset = &time_ns->offsets.boottime; break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.