struct bpf_iter_seq_task_info { /* The first field must be struct bpf_iter_seq_task_common. * this is assumed by {init, fini}_seq_pidns() callback functions.
*/ struct bpf_iter_seq_task_common common;
u32 tid;
};
if (!*tid) { /* The first time, the iterator calls this function. */
pid = find_pid_ns(common->pid, common->ns);
task = get_pid_task(pid, PIDTYPE_TGID); if (!task) return NULL;
/* If the control returns to user space and comes back to the * kernel again, *tid and common->pid_visiting should be the * same for task_seq_start() to pick up the correct task.
*/ if (*tid == common->pid_visiting) {
pid = find_pid_ns(common->pid_visiting, common->ns);
task = get_pid_task(pid, PIDTYPE_PID);
return task;
}
task = find_task_by_pid_ns(common->pid_visiting, common->ns); if (!task) return NULL;
retry:
task = __next_thread(task); if (!task) return NULL;
next_tid = __task_pid_nr_ns(task, PIDTYPE_PID, common->ns); if (!next_tid) goto retry;
if (skip_if_dup_files && task->files == task->group_leader->files) goto retry;
struct bpf_iter_seq_task_file_info { /* The first field must be struct bpf_iter_seq_task_common. * this is assumed by {init, fini}_seq_pidns() callback functions.
*/ struct bpf_iter_seq_task_common common; struct task_struct *task;
u32 tid;
u32 fd;
};
/* If this function returns a non-NULL file object, * it held a reference to the task/file. * Otherwise, it does not hold any reference.
*/
again: if (info->task) {
curr_task = info->task;
curr_fd = info->fd;
} else {
curr_task = task_seq_get_next(&info->common, &info->tid, true); if (!curr_task) {
info->task = NULL; return NULL;
}
/* set info->task */
info->task = curr_task; if (saved_tid == info->tid)
curr_fd = info->fd; else
curr_fd = 0;
}
f = fget_task_next(curr_task, &curr_fd); if (f) { /* set info->fd */
info->fd = curr_fd; return f;
}
/* the current task is done, go to the next task */
put_task_struct(curr_task);
struct bpf_iter_seq_task_vma_info { /* The first field must be struct bpf_iter_seq_task_common. * this is assumed by {init, fini}_seq_pidns() callback functions.
*/ struct bpf_iter_seq_task_common common; struct task_struct *task; struct mm_struct *mm; struct vm_area_struct *vma;
u32 tid; unsignedlong prev_vm_start; unsignedlong prev_vm_end;
};
enum bpf_task_vma_iter_find_op {
task_vma_iter_first_vma, /* use find_vma() with addr 0 */
task_vma_iter_next_vma, /* use vma_next() with curr_vma */
task_vma_iter_find_vma, /* use find_vma() to find next vma */
};
/* If this function returns a non-NULL vma, it holds a reference to * the task_struct, holds a refcount on mm->mm_users, and holds * read lock on vma->mm->mmap_lock. * If this function returns NULL, it does not hold any reference or * lock.
*/ if (info->task) {
curr_task = info->task;
curr_vma = info->vma;
curr_mm = info->mm; /* In case of lock contention, drop mmap_lock to unblock * the writer. * * After relock, call find(mm, prev_vm_end - 1) to find * new vma to process. * * +------+------+-----------+ * | VMA1 | VMA2 | VMA3 | * +------+------+-----------+ * | | | | * 4k 8k 16k 400k * * For example, curr_vma == VMA2. Before unlock, we set * * prev_vm_start = 8k * prev_vm_end = 16k * * There are a few cases: * * 1) VMA2 is freed, but VMA3 exists. * * find_vma() will return VMA3, just process VMA3. * * 2) VMA2 still exists. * * find_vma() will return VMA2, process VMA2->next. * * 3) no more vma in this mm. * * Process the next task. * * 4) find_vma() returns a different vma, VMA2'. * * 4.1) If VMA2 covers same range as VMA2', skip VMA2', * because we already covered the range; * 4.2) VMA2 and VMA2' covers different ranges, process * VMA2'.
*/ if (mmap_lock_is_contended(curr_mm)) {
info->prev_vm_start = curr_vma->vm_start;
info->prev_vm_end = curr_vma->vm_end;
op = task_vma_iter_find_vma;
mmap_read_unlock(curr_mm); if (mmap_read_lock_killable(curr_mm)) {
mmput(curr_mm); goto finish;
}
} else {
op = task_vma_iter_next_vma;
}
} else {
again:
curr_task = task_seq_get_next(&info->common, &info->tid, true); if (!curr_task) {
info->tid++; goto finish;
}
if (saved_tid != info->tid) { /* new task, process the first vma */
op = task_vma_iter_first_vma;
} else { /* Found the same tid, which means the user space * finished data in previous buffer and read more. * We dropped mmap_lock before returning to user * space, so it is necessary to use find_vma() to * find the next vma to process.
*/
op = task_vma_iter_find_vma;
}
curr_mm = get_task_mm(curr_task); if (!curr_mm) goto next_task;
if (mmap_read_lock_killable(curr_mm)) {
mmput(curr_mm); goto finish;
}
}
switch (op) { case task_vma_iter_first_vma:
curr_vma = find_vma(curr_mm, 0); break; case task_vma_iter_next_vma:
curr_vma = find_vma(curr_mm, curr_vma->vm_end); break; case task_vma_iter_find_vma: /* We dropped mmap_lock so it is necessary to use find_vma * to find the next vma. This is similar to the mechanism * in show_smaps_rollup().
*/
curr_vma = find_vma(curr_mm, info->prev_vm_end - 1); /* case 1) and 4.2) above just use curr_vma */
/* check for case 2) or case 4.1) above */ if (curr_vma &&
curr_vma->vm_start == info->prev_vm_start &&
curr_vma->vm_end == info->prev_vm_end)
curr_vma = find_vma(curr_mm, curr_vma->vm_end); break;
} if (!curr_vma) { /* case 3) above, or case 2) 4.1) with vma->next == NULL */
mmap_read_unlock(curr_mm);
mmput(curr_mm); goto next_task;
}
info->task = curr_task;
info->vma = curr_vma;
info->mm = curr_mm; return curr_vma;
next_task: if (info->common.type == BPF_TASK_ITER_TID) goto finish;
if (!v) {
(void)__task_vma_seq_show(seq, true);
} else { /* info->vma has not been seen by the BPF program. If the * user space reads more, task_vma_seq_get_next should * return this vma again. Set prev_vm_start to ~0UL, * so that we don't skip the vma returned by the next * find_vma() (case task_vma_iter_find_vma in * task_vma_seq_get_next()).
*/
info->prev_vm_start = ~0UL;
info->prev_vm_end = info->vma->vm_end;
mmap_read_unlock(info->mm);
mmput(info->mm);
info->mm = NULL;
put_task_struct(info->task);
info->task = NULL;
}
}
/* is_iter_reg_valid_uninit guarantees that kit hasn't been initialized * before, so non-NULL kit->data doesn't point to previously * bpf_mem_alloc'd bpf_iter_task_vma_kern_data
*/
kit->data = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_iter_task_vma_kern_data)); if (!kit->data) return -ENOMEM;
enum { /* all process in the system */
BPF_TASK_ITER_ALL_PROCS, /* all threads in the system */
BPF_TASK_ITER_ALL_THREADS, /* all threads of a specific process */
BPF_TASK_ITER_PROC_THREADS
};
switch (flags) { case BPF_TASK_ITER_ALL_THREADS: case BPF_TASK_ITER_ALL_PROCS: break; case BPF_TASK_ITER_PROC_THREADS: if (!task__nullable) return -EINVAL; break; default: return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.