#define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024) /* Define a reasonable max cap */ #define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024) /* * File descriptor number for the pidfd for the thread-group leader of * the coredumping task installed into the usermode helper's file * descriptor table.
*/ #define COREDUMP_PIDFD_NUMBER 3
if (!expand_corename(cn, cn->size + need - free + 1)) goto again;
return -ENOMEM;
}
static __printf(2, 3) int cn_printf(struct core_name *cn, constchar *fmt, ...)
{
va_list arg; int ret;
va_start(arg, fmt);
ret = cn_vprintf(cn, fmt, arg);
va_end(arg);
return ret;
}
static __printf(2, 3) int cn_esc_printf(struct core_name *cn, constchar *fmt, ...)
{ int cur = cn->used;
va_list arg; int ret;
va_start(arg, fmt);
ret = cn_vprintf(cn, fmt, arg);
va_end(arg);
if (ret == 0) { /* * Ensure that this coredump name component can't cause the * resulting corefile path to consist of a ".." or ".".
*/ if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
(cn->used - cur == 2 && cn->corename[cur] == '.'
&& cn->corename[cur+1] == '.'))
cn->corename[cur] = '!';
/* * Empty names are fishy and could be used to create a "//" in a * corefile name, causing the coredump to happen one directory * level too high. Enforce that all components of the core * pattern are at least one character long.
*/ if (cn->used == cur)
ret = cn_printf(cn, "!");
}
for (; cur < cn->used; ++cur) { if (cn->corename[cur] == '/')
cn->corename[cur] = '!';
} return ret;
}
/* * coredump_parse will inspect the pattern parameter, and output a name * into corename, which must have space for at least CORENAME_MAX_SIZE * bytes plus one byte for the zero terminator.
*/ staticbool coredump_parse(struct core_name *cn, struct coredump_params *cprm,
size_t **argv, int *argc)
{ conststruct cred *cred = current_cred(); constchar *pat_ptr = core_pattern; bool was_space = false; int pid_in_pattern = 0; int err = 0;
switch (cn->core_type) { case COREDUMP_PIPE: { int argvs = sizeof(core_pattern) / 2;
(*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL); if (!(*argv)) returnfalse;
(*argv)[(*argc)++] = 0;
++pat_ptr; if (!(*pat_ptr)) returnfalse; break;
} case COREDUMP_SOCK: { /* skip the @ */
pat_ptr++; if (!(*pat_ptr)) returnfalse; if (*pat_ptr == '@') {
pat_ptr++; if (!(*pat_ptr)) returnfalse;
cn->core_type = COREDUMP_SOCK_REQ;
}
err = cn_printf(cn, "%s", pat_ptr); if (err) returnfalse;
/* Require absolute paths. */ if (cn->corename[0] != '/') returnfalse;
/* * Ensure we can uses spaces to indicate additional * parameters in the future.
*/ if (strchr(cn->corename, ' ')) {
coredump_report_failure("Coredump socket may not %s contain spaces", cn->corename); returnfalse;
}
/* Must not contain ".." in the path. */ if (name_contains_dotdot(cn->corename)) {
coredump_report_failure("Coredump socket may not %s contain '..' spaces", cn->corename); returnfalse;
}
if (strlen(cn->corename) >= UNIX_PATH_MAX) {
coredump_report_failure("Coredump socket path %s too long", cn->corename); returnfalse;
}
/* * Currently no need to parse any other options. * Relevant information can be retrieved from the peer * pidfd retrievable via SO_PEERPIDFD by the receiver or * via /proc/<pid>, using the SO_PEERPIDFD to guard * against pid recycling when opening /proc/<pid>.
*/ returntrue;
} case COREDUMP_FILE: break; default:
WARN_ON_ONCE(true); returnfalse;
}
/* Repeat as long as we have more pattern to process and more output
space */ while (*pat_ptr) { /* * Split on spaces before doing template expansion so that * %e and %E don't get split if they have spaces in them
*/ if (cn->core_type == COREDUMP_PIPE) { if (isspace(*pat_ptr)) { if (cn->used != 0)
was_space = true;
pat_ptr++; continue;
} elseif (was_space) {
was_space = false;
err = cn_printf(cn, "%c", '\0'); if (err) returnfalse;
(*argv)[(*argc)++] = cn->used;
}
} if (*pat_ptr != '%') {
err = cn_printf(cn, "%c", *pat_ptr++);
} else { switch (*++pat_ptr) { /* single % at the end, drop that */ case 0: goto out; /* Double percent, output one percent */ case'%':
err = cn_printf(cn, "%c", '%'); break; /* pid */ case'p':
pid_in_pattern = 1;
err = cn_printf(cn, "%d",
task_tgid_vnr(current)); break; /* global pid */ case'P':
err = cn_printf(cn, "%d",
task_tgid_nr(current)); break; case'i':
err = cn_printf(cn, "%d",
task_pid_vnr(current)); break; case'I':
err = cn_printf(cn, "%d",
task_pid_nr(current)); break; /* uid */ case'u':
err = cn_printf(cn, "%u",
from_kuid(&init_user_ns,
cred->uid)); break; /* gid */ case'g':
err = cn_printf(cn, "%u",
from_kgid(&init_user_ns,
cred->gid)); break; case'd':
err = cn_printf(cn, "%d",
__get_dumpable(cprm->mm_flags)); break; /* signal that caused the coredump */ case's':
err = cn_printf(cn, "%d",
cprm->siginfo->si_signo); break; /* UNIX time of coredump */ case't': {
time64_t time;
time = ktime_get_real_seconds();
err = cn_printf(cn, "%lld", time); break;
} /* hostname */ case'h':
down_read(&uts_sem);
err = cn_esc_printf(cn, "%s",
utsname()->nodename);
up_read(&uts_sem); break; /* executable, could be changed by prctl PR_SET_NAME etc */ case'e':
err = cn_esc_printf(cn, "%s", current->comm); break; /* file name of executable */ case'f':
err = cn_print_exe_file(cn, true); break; case'E':
err = cn_print_exe_file(cn, false); break; /* core limit size */ case'c':
err = cn_printf(cn, "%lu",
rlimit(RLIMIT_CORE)); break; /* CPU the task ran on */ case'C':
err = cn_printf(cn, "%d", cprm->cpu); break; /* pidfd number */ case'F': { /* * Installing a pidfd only makes sense if * we actually spawn a usermode helper.
*/ if (cn->core_type != COREDUMP_PIPE) break;
/* * Note that we'll install a pidfd for the * thread-group leader. We know that task * linkage hasn't been removed yet and even if * this @current isn't the actual thread-group * leader we know that the thread-group leader * cannot be reaped until @current has exited.
*/
cprm->pid = task_tgid(current);
err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER); break;
} default: break;
}
++pat_ptr;
}
if (err) returnfalse;
}
out: /* Backward compatibility with core_uses_pid: * * If core_pattern does not include a %p (as is the default) * and core_uses_pid is set, then .%pid will be appended to
* the filename. Do not do this for piped commands. */ if (cn->core_type == COREDUMP_FILE && !pid_in_pattern && core_uses_pid) return cn_printf(cn, ".%d", task_tgid_vnr(current)) == 0;
returntrue;
}
staticint zap_process(struct signal_struct *signal, int exit_code)
{ struct task_struct *t; int nr = 0;
wait_for_completion_state(&core_state->startup,
TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); /* * Wait for all the threads to become inactive, so that * all the thread context (extended register state, like * fpu etc) gets copied to the memory.
*/
ptr = core_state->dumper.next; while (ptr != NULL) {
wait_task_inactive(ptr->task, TASK_ANY);
ptr = ptr->next;
}
}
spin_lock_irq(¤t->sighand->siglock); if (core_dumped && !__fatal_signal_pending(current))
current->signal->group_exit_code |= 0x80;
next = current->signal->core_state->dumper.next;
current->signal->core_state = NULL;
spin_unlock_irq(¤t->sighand->siglock);
while ((curr = next) != NULL) {
next = curr->next;
task = curr->task; /* * see coredump_task_exit(), curr->task must not see * ->task == NULL before we read ->next.
*/
smp_mb();
curr->task = NULL;
wake_up_process(task);
}
}
staticbool dump_interrupted(void)
{ /* * SIGKILL or freezing() interrupt the coredumping. Perhaps we * can do try_to_freeze() and check __fatal_signal_pending(), * but then we need to teach dump_write() to restart and clear * TIF_SIGPENDING.
*/ return fatal_signal_pending(current) || freezing(current);
}
/* * We actually want wait_event_freezable() but then we need * to clear TIF_SIGPENDING and improve dump_interrupted().
*/
wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
/* * umh_coredump_setup * helper function to customize the process used * to collect the core in userspace. Specifically * it sets up a pipe and installs it as fd 0 (stdin) * for the process. Returns 0 on success, or * PTR_ERR on failure. * Note that it also sets the core limit to 1. This * is a special value that we use to trap recursive * core dumps
*/ staticint umh_coredump_setup(struct subprocess_info *info, struct cred *new)
{ struct file *files[2]; struct coredump_params *cp = (struct coredump_params *)info->data; int err;
if (cp->pid) { struct file *pidfs_file __free(fput) = NULL;
pidfs_file = pidfs_alloc_file(cp->pid, 0); if (IS_ERR(pidfs_file)) return PTR_ERR(pidfs_file);
pidfs_coredump(cp);
/* * Usermode helpers are childen of either * system_unbound_wq or of kthreadd. So we know that * we're starting off with a clean file descriptor * table. So we should always be able to use * COREDUMP_PIDFD_NUMBER as our file descriptor value.
*/
err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0); if (err < 0) return err;
}
err = create_pipe_files(files, 0); if (err) return err;
/* * It is possible that the userspace process which is supposed * to handle the coredump and is listening on the AF_UNIX socket * coredumps. Userspace should just mark itself non dumpable.
*/
file = sock_alloc_file(socket, 0, NULL); if (IS_ERR(file)) returnfalse;
/* * Set the thread-group leader pid which is used for the peer * credentials during connect() below. Then immediately register * it in pidfs...
*/
cprm->pid = task_tgid(current);
retval = pidfs_register_pid(cprm->pid); if (retval) returnfalse;
/* * ... and set the coredump information so userspace has it * available after connect()...
*/
pidfs_coredump(cprm);
/* * We use a simple read to wait for the coredump processing to * finish. Either the socket is closed or we get sent unexpected * data. In both cases, we're done.
*/
n = __kernel_read(file, &(char){ 0 }, 1, NULL); if (n > 0)
coredump_report_failure("Coredump socket had unexpected data"); elseif (n < 0)
coredump_report_failure("Coredump socket failed");
}
if (cprm->limit < binfmt->min_coredump) returnfalse;
if (coredump_force_suid_safe(cprm) && cn->corename[0] != '/') {
coredump_report_failure("this process can only dump core to a fully qualified path, skipping core dump"); returnfalse;
}
/* * Unlink the file if it exists unless this is a SUID * binary - in that case, we're running around with root * privs and don't want to unlink another user's coredump.
*/ if (!coredump_force_suid_safe(cprm)) { /* * If it doesn't exist, that's fine. If there's some * other problem, we'll catch it at the filp_open().
*/
do_unlinkat(AT_FDCWD, getname_kernel(cn->corename));
}
/* * There is a race between unlinking and creating the * file, but if that causes an EEXIST here, that's * fine - another process raced with us while creating * the corefile, and the other process won. To userspace, * what matters is that at least one of the two processes * writes its coredump successfully, not which one.
*/ if (coredump_force_suid_safe(cprm)) { /* * Using user namespaces, normal user tasks can change * their current->fs->root to point to arbitrary * directories. Since the intention of the "only dump * with a fully qualified path" rule is to control where * coredumps may be placed using root privileges, * current->fs->root must not be used. Instead, use the * root directory of init_task.
*/ struct path root;
inode = file_inode(file); if (inode->i_nlink > 1) returnfalse; if (d_unhashed(file->f_path.dentry)) returnfalse; /* * AK: actually i see no reason to not allow this for named * pipes etc, but keep the previous behaviour for now.
*/ if (!S_ISREG(inode->i_mode)) returnfalse; /* * Don't dump core if the filesystem changed owner or mode * of the file during file creation. This is an issue when * a process dumps core while its cwd is e.g. on a vfat * filesystem.
*/
idmap = file_mnt_idmap(file); if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid())) {
coredump_report_failure("Core dump to %s aborted: cannot preserve file owner", cn->corename); returnfalse;
} if ((inode->i_mode & 0677) != 0600) {
coredump_report_failure("Core dump to %s aborted: cannot preserve file permissions", cn->corename); returnfalse;
} if (!(file->f_mode & FMODE_CAN_WRITE)) returnfalse; if (do_truncate(idmap, file->f_path.dentry, 0, 0, file)) returnfalse;
if (cprm->limit == 1) { /* See umh_coredump_setup() which sets RLIMIT_CORE = 1. * * Normally core limits are irrelevant to pipes, since * we're not writing to the file system, but we use * cprm.limit of 1 here as a special value, this is a * consistent way to catch recursive crashes. * We can still crash if the core_pattern binary sets * RLIM_CORE = !1, but it runs as root, and can do * lots of stupid things. * * Note that we use task_tgid_vnr here to grab the pid * of the process group leader. That way we get the * right pid if a thread in a multi-threaded * core_pattern process dies.
*/
coredump_report_failure("RLIMIT_CORE is set to 1, aborting core"); returnfalse;
}
cprm->limit = RLIM_INFINITY;
if (call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC)) {
coredump_report_failure("|%s pipe failed", cn->corename); returnfalse;
}
/* * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would * have this set to NULL.
*/ if (!cprm->file) {
coredump_report_failure("Core dump to |%s disabled", cn->corename); returnfalse;
}
file_start_write(cprm->file);
cn->core_dumped = binfmt->core_dump(cprm); /* * Ensures that file size is big enough to contain the current * file postion. This prevents gdb from complaining about * a truncated file if the last "write" to the file was * dump_skip.
*/ if (cprm->to_skip) {
cprm->to_skip--;
dump_emit(cprm, "", 1);
}
file_end_write(cprm->file);
free_vma_snapshot(cprm); returntrue;
}
staticinlinebool coredump_skip(conststruct coredump_params *cprm, conststruct linux_binfmt *binfmt)
{ if (!binfmt) returntrue; if (!binfmt->core_dump) returntrue; if (!__get_dumpable(cprm->mm_flags)) returntrue; returnfalse;
}
void vfs_coredump(const kernel_siginfo_t *siginfo)
{ struct cred *cred __free(put_cred) = NULL;
size_t *argv __free(kfree) = NULL; struct core_state core_state; struct core_name cn; struct mm_struct *mm = current->mm; struct linux_binfmt *binfmt = mm->binfmt; conststruct cred *old_cred; int argc = 0; struct coredump_params cprm = {
.siginfo = siginfo,
.limit = rlimit(RLIMIT_CORE), /* * We must use the same mm->flags while dumping core to avoid * inconsistency of bit flags, since this flag is not protected * by any locks.
*/
.mm_flags = mm->flags,
.vma_meta = NULL,
.cpu = raw_smp_processor_id(),
};
audit_core_dumps(siginfo->si_signo);
if (coredump_skip(&cprm, binfmt)) return;
cred = prepare_creds(); if (!cred) return; /* * We cannot trust fsuid as being the "true" uid of the process * nor do we know its entire history. We only know it was tainted * so we dump it as root in mode 2, and only into a controlled * environment (pipe handler or fully qualified path).
*/ if (coredump_force_suid_safe(&cprm))
cred->fsuid = GLOBAL_ROOT_UID;
if (coredump_wait(siginfo->si_signo, &core_state) < 0) return;
switch (cn.core_type) { case COREDUMP_FILE: if (!coredump_file(&cn, &cprm, binfmt)) goto close_fail; break; case COREDUMP_PIPE: if (!coredump_pipe(&cn, &cprm, argv, argc)) goto close_fail; break; case COREDUMP_SOCK_REQ:
fallthrough; case COREDUMP_SOCK: if (!coredump_socket(&cn, &cprm)) goto close_fail; break; default:
WARN_ON_ONCE(true); goto close_fail;
}
/* Don't even generate the coredump. */ if (cn.mask & COREDUMP_REJECT) goto close_fail;
/* get us an unshared descriptor table; almost always a no-op */ /* The cell spufs coredump code reads the file descriptor tables */ if (unshare_files()) goto close_fail;
if ((cn.mask & COREDUMP_KERNEL) && !coredump_write(&cn, &cprm, binfmt)) goto close_fail;
coredump_sock_shutdown(cprm.file);
/* Let the parent know that a coredump was generated. */ if (cn.mask & COREDUMP_USERSPACE)
cn.core_dumped = true;
/* * When core_pipe_limit is set we wait for the coredump server * or usermodehelper to finish before exiting so it can e.g., * inspect /proc/<pid>.
*/ if (cn.mask & COREDUMP_WAIT) { switch (cn.core_type) { case COREDUMP_PIPE:
wait_for_dump_helpers(cprm.file); break; case COREDUMP_SOCK_REQ:
fallthrough; case COREDUMP_SOCK:
coredump_sock_wait(cprm.file); break; default: break;
}
}
/* * Core dumping helper functions. These are the only things you should * do on a core-file: use only these functions to write out all the * necessary info.
*/ staticint __dump_emit(struct coredump_params *cprm, constvoid *addr, int nr)
{ struct file *file = cprm->file;
loff_t pos = file->f_pos;
ssize_t n;
if (cprm->written + nr > cprm->limit) return 0; if (dump_interrupted()) return 0;
n = __kernel_write(file, addr, nr, &pos); if (n != nr) return 0;
file->f_pos = pos;
cprm->written += n;
cprm->pos += n;
if (cprm->to_skip) { if (!__dump_skip(cprm, cprm->to_skip)) return 0;
cprm->to_skip = 0;
} if (cprm->written + PAGE_SIZE > cprm->limit) return 0; if (dump_interrupted()) return 0;
pos = file->f_pos;
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
n = __kernel_write_iter(cprm->file, &iter, &pos); if (n != PAGE_SIZE) return 0;
file->f_pos = pos;
cprm->written += PAGE_SIZE;
cprm->pos += PAGE_SIZE;
return 1;
}
/* * If we might get machine checks from kernel accesses during the * core dump, let's get those errors early rather than during the * IO. This is not performance-critical enough to warrant having * all the machine check logic in the iovec paths.
*/ #ifdef copy_mc_to_kernel
if (!locked) { if (mmap_read_lock_killable(current->mm)) goto out;
locked = 1;
}
/* * To avoid having to allocate page tables for virtual address * ranges that have never been used yet, and also to make it * easy to generate sparse core files, use a helper that returns * NULL when encountering an empty page table entry that would * otherwise have been filled with the zero page.
*/
page = get_dump_page(addr, &locked); if (page) { if (locked) {
mmap_read_unlock(current->mm);
locked = 0;
} int stop = !dump_emit_page(cprm, dump_page_copy(page, dump_page));
put_page(page); if (stop) goto out;
} else {
dump_skip(cprm, PAGE_SIZE);
}
if (dump_interrupted()) goto out;
if (!need_resched()) continue; if (locked) {
mmap_read_unlock(current->mm);
locked = 0;
}
cond_resched();
}
ret = 1;
out: if (locked)
mmap_read_unlock(current->mm);
dump_page_free(dump_page); return ret;
} #endif
int dump_align(struct coredump_params *cprm, int align)
{ unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1); if (align & (align - 1)) return 0; if (mod)
cprm->to_skip += align - mod; return 1;
}
EXPORT_SYMBOL(dump_align);
/* * Coredump socket must be located in the initial mount * namespace. Don't give the impression that anything else is * supported right now.
*/ if (current->nsproxy->mnt_ns != init_task.nsproxy->mnt_ns) returnfalse;
/* Must be an absolute path... */ if (core_pattern[1] != '/') { /* ... or the socket request protocol... */ if (core_pattern[1] != '@') returnfalse; /* ... and if so must be an absolute path. */ if (core_pattern[2] != '/') returnfalse;
p = &core_pattern[2];
} else {
p = &core_pattern[1];
}
/* The path obviously cannot exceed UNIX_PATH_MAX. */ if (strlen(p) >= UNIX_PATH_MAX) returnfalse;
/* Must not contain ".." in the path. */ if (name_contains_dotdot(core_pattern)) returnfalse;
returntrue;
}
staticint proc_dostring_coredump(conststruct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)
{ int error;
ssize_t retval; char old_core_pattern[CORENAME_MAX_SIZE];
if (!write) return proc_dostring(table, write, buffer, lenp, ppos);
/* * The purpose of always_dump_vma() is to make sure that special kernel mappings * that are useful for post-mortem analysis are included in every core dump. * In that way we ensure that the core dump is fully interpretable later * without matching up the same kernel and hardware config to see what PC values * meant. These special mappings include - vDSO, vsyscall, and other * architecture specific mappings
*/ staticbool always_dump_vma(struct vm_area_struct *vma)
{ /* Any vsyscall mappings? */ if (vma == get_gate_vma(vma->vm_mm)) returntrue;
/* * Assume that all vmas with a .name op should always be dumped. * If this changes, a new vm_ops field can easily be added.
*/ if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) returntrue;
/* * arch_vma_name() returns non-NULL for special architecture mappings, * such as vDSO sections.
*/ if (arch_vma_name(vma)) returntrue;
returnfalse;
}
#define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1
/* * Decide how much of @vma's contents should be included in a core dump.
*/ staticunsignedlong vma_dump_size(struct vm_area_struct *vma, unsignedlong mm_flags)
{ #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
/* always dump the vdso and vsyscall sections */ if (always_dump_vma(vma)) goto whole;
if (vma->vm_flags & VM_DONTDUMP) return 0;
/* support for DAX */ if (vma_is_dax(vma)) { if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED)) goto whole; if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE)) goto whole; return 0;
}
/* Hugetlb memory check */ if (is_vm_hugetlb_page(vma)) { if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) goto whole; if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) goto whole; return 0;
}
/* Do not dump I/O mapped devices or special mappings */ if (vma->vm_flags & VM_IO) return 0;
/* By default, dump shared memory if mapped from an anonymous file. */ if (vma->vm_flags & VM_SHARED) { if (file_inode(vma->vm_file)->i_nlink == 0 ?
FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED)) goto whole; return 0;
}
/* Dump segments that have been written to. */ if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE)) goto whole; if (vma->vm_file == NULL) return 0;
if (FILTER(MAPPED_PRIVATE)) goto whole;
/* * If this is the beginning of an executable file mapping, * dump the first page to aid in determining what was mapped here.
*/ if (FILTER(ELF_HEADERS) &&
vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0) return PAGE_SIZE;
/* * ELF libraries aren't always executable. * We'll want to check whether the mapping starts with the ELF * magic, but not now - we're holding the mmap lock, * so copy_from_user() doesn't work here. * Use a placeholder instead, and fix it up later in * dump_vma_snapshot().
*/ return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER;
}
#undef FILTER
return 0;
whole: return vma->vm_end - vma->vm_start;
}
/* * Helper function for iterating across a vma list. It ensures that the caller * will visit `gate_vma' prior to terminating the search.
*/ staticstruct vm_area_struct *coredump_next_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct *gate_vma)
{ if (gate_vma && (vma == gate_vma)) return NULL;
vma = vma_next(vmi); if (vma) return vma; return gate_vma;
}
staticvoid free_vma_snapshot(struct coredump_params *cprm)
{ if (cprm->vma_meta) { int i; for (i = 0; i < cprm->vma_count; i++) { struct file *file = cprm->vma_meta[i].file; if (file)
fput(file);
}
kvfree(cprm->vma_meta);
cprm->vma_meta = NULL;
}
}
if (vma_meta_lhs->dump_size < vma_meta_rhs->dump_size) return -1; if (vma_meta_lhs->dump_size > vma_meta_rhs->dump_size) return 1; return 0;
}
/* * Under the mmap_lock, take a snapshot of relevant information about the task's * VMAs.
*/ staticbool dump_vma_snapshot(struct coredump_params *cprm)
{ struct vm_area_struct *gate_vma, *vma = NULL; struct mm_struct *mm = current->mm;
VMA_ITERATOR(vmi, mm, 0); int i = 0;
/* * Once the stack expansion code is fixed to not change VMA bounds * under mmap_lock in read mode, this can be changed to take the * mmap_lock in read mode.
*/ if (mmap_write_lock_killable(mm)) returnfalse;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.