/* * sys_alloc_thread_area: get a yet unused TLS descriptor index.
*/ staticint get_free_idx(void)
{ struct thread_struct *t = ¤t->thread; int idx;
for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) if (desc_empty(&t->tls_array[idx])) return idx + GDT_ENTRY_TLS_MIN; return -ESRCH;
}
staticbool tls_desc_okay(conststruct user_desc *info)
{ /* * For historical reasons (i.e. no one ever documented how any * of the segmentation APIs work), user programs can and do * assume that a struct user_desc that's all zeros except for * entry_number means "no segment at all". This never actually * worked. In fact, up to Linux 3.19, a struct user_desc like * this would create a 16-bit read-write segment with base and * limit both equal to zero. * * That was close enough to "no segment at all" until we * hardened this function to disallow 16-bit TLS segments. Fix * it up by interpreting these zeroed segments the way that they * were almost certainly intended to be interpreted. * * The correct way to ask for "no segment at all" is to specify * a user_desc that satisfies LDT_empty. To keep everything * working, we accept both. * * Note that there's a similar kludge in modify_ldt -- look at * the distinction between modes 1 and 0x11.
*/ if (LDT_empty(info) || LDT_zero(info)) returntrue;
/* * espfix is required for 16-bit data segments, but espfix * only works for LDT segments.
*/ if (!info->seg_32bit) returnfalse;
/* Only allow data segments in the TLS array. */ if (info->contents > 1) returnfalse;
/* * Non-present segments with DPL 3 present an interesting attack * surface. The kernel should handle such segments correctly, * but TLS is very difficult to protect in a sandbox, so prevent * such segments from being created. * * If userspace needs to remove a TLS entry, it can still delete * it outright.
*/ if (info->seg_not_present) returnfalse;
returntrue;
}
staticvoid set_tls_desc(struct task_struct *p, int idx, conststruct user_desc *info, int n)
{ struct thread_struct *t = &p->thread; struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN]; int cpu;
/* * We must not get preempted while modifying the TLS.
*/
cpu = get_cpu();
while (n-- > 0) { if (LDT_empty(info) || LDT_zero(info))
memset(desc, 0, sizeof(*desc)); else
fill_ldt(desc, info);
++info;
++desc;
}
if (t == ¤t->thread)
load_TLS(t, cpu);
put_cpu();
}
/* * Set a given TLS descriptor:
*/ int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info, int can_allocate)
{ struct user_desc info; unsignedshort __maybe_unused sel, modified_sel;
if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT;
if (!tls_desc_okay(&info)) return -EINVAL;
if (idx == -1)
idx = info.entry_number;
/* * index -1 means the kernel should try to find and * allocate an empty descriptor:
*/ if (idx == -1 && can_allocate) {
idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT;
}
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL;
set_tls_desc(p, idx, &info, 1);
/* * If DS, ES, FS, or GS points to the modified segment, forcibly * refresh it. Only needed on x86_64 because x86_32 reloads them * on return to user mode.
*/
modified_sel = (idx << 3) | 3;
if (p == current) { #ifdef CONFIG_X86_64
savesegment(ds, sel); if (sel == modified_sel)
loadsegment(ds, sel);
savesegment(es, sel); if (sel == modified_sel)
loadsegment(es, sel);
savesegment(fs, sel); if (sel == modified_sel)
loadsegment(fs, sel); #endif
savesegment(gs, sel); if (sel == modified_sel)
load_gs_index(sel);
} else { #ifdef CONFIG_X86_64 if (p->thread.fsindex == modified_sel)
p->thread.fsbase = info.base_addr;
if (p->thread.gsindex == modified_sel)
p->thread.gsbase = info.base_addr; #endif
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.