/* * Maximum length of a cpumask that can be specified in * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
*/ #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
staticvoid exe_add_tsk(struct taskstats *stats, struct task_struct *tsk)
{ /* No idea if I'm allowed to access that here, now. */ struct file *exe_file = get_task_exe_file(tsk);
if (exe_file) { /* Following cp_new_stat64() in stat.c . */
stats->ac_exe_dev =
huge_encode_dev(exe_file->f_inode->i_sb->s_dev);
stats->ac_exe_inode = exe_file->f_inode->i_ino;
fput(exe_file);
} else {
stats->ac_exe_dev = 0;
stats->ac_exe_inode = 0;
}
}
staticvoid fill_stats(struct user_namespace *user_ns, struct pid_namespace *pid_ns, struct task_struct *tsk, struct taskstats *stats)
{
memset(stats, 0, sizeof(*stats)); /* * Each accounting subsystem adds calls to its functions to * fill in relevant parts of struct taskstsats as follows * * per-task-foo(stats, tsk);
*/
delayacct_add_tsk(stats, tsk);
/* fill in basic acct fields */
stats->version = TASKSTATS_VERSION;
stats->nvcsw = tsk->nvcsw;
stats->nivcsw = tsk->nivcsw;
bacct_add_tsk(user_ns, pid_ns, stats, tsk);
/* fill in extended acct fields */
xacct_add_tsk(stats, tsk);
/* add executable info */
exe_add_tsk(stats, tsk);
}
/* * Add additional stats from live tasks except zombie thread group * leaders who are already counted with the dead tasks
*/
rcu_read_lock();
first = find_task_by_vpid(tgid);
if (!first || !lock_task_sighand(first, &flags)) goto out;
if (first->signal->stats)
memcpy(stats, first->signal->stats, sizeof(*stats)); else
memset(stats, 0, sizeof(*stats));
start_time = ktime_get_ns();
for_each_thread(first, tsk) { if (tsk->exit_state) continue; /* * Accounting subsystem can call its functions here to * fill in relevant parts of struct taskstsats as follows * * per-task-foo(stats, tsk);
*/
delayacct_add_tsk(stats, tsk);
/* calculate task elapsed time in nsec */
delta = start_time - tsk->start_time; /* Convert to micro seconds */
do_div(delta, NSEC_PER_USEC);
stats->ac_etime += delta;
spin_lock_irqsave(&tsk->sighand->siglock, flags); if (!tsk->signal->stats) goto ret;
/* * Each accounting subsystem calls its functions here to * accumalate its per-task stats for tsk, into the per-tgid structure * * per-task-foo(tsk->signal->stats, tsk);
*/
delayacct_add_tsk(tsk->signal->stats, tsk);
ret:
spin_unlock_irqrestore(&tsk->sighand->siglock, flags); return;
}
staticint add_del_listener(pid_t pid, conststruct cpumask *mask, int isadd)
{ struct listener_list *listeners; struct listener *s, *tmp, *s2; unsignedint cpu; int ret = 0;
if (!cpumask_subset(mask, cpu_possible_mask)) return -EINVAL;
if (current_user_ns() != &init_user_ns) return -EINVAL;
if (task_active_pid_ns(current) != &init_pid_ns) return -EINVAL;
if (isadd == REGISTER) {
for_each_cpu(cpu, mask) {
s = kmalloc_node(sizeof(struct listener),
GFP_KERNEL, cpu_to_node(cpu)); if (!s) {
ret = -ENOMEM; goto cleanup;
}
s->pid = pid;
s->valid = 1;
staticint parse(struct nlattr *na, struct cpumask *mask)
{ char *data; int len; int ret;
if (na == NULL) return 1;
len = nla_len(na); if (len > TASKSTATS_CPUMASK_MAXLEN) return -E2BIG; if (len < 1) return -EINVAL;
data = kmalloc(len, GFP_KERNEL); if (!data) return -ENOMEM;
nla_strscpy(data, na, len);
ret = cpulist_parse(data, mask);
kfree(data); return ret;
}
staticstruct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
{ struct nlattr *na, *ret; int aggr;
/* Pairs with smp_store_release() below. */
stats = smp_load_acquire(&sig->stats); if (stats || thread_group_empty(tsk)) return stats;
/* No problem if kmem_cache_zalloc() fails */
stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
spin_lock_irq(&tsk->sighand->siglock);
stats = sig->stats; if (!stats) { /* * Pairs with smp_store_release() above and order the * kmem_cache_zalloc().
*/
smp_store_release(&sig->stats, stats_new);
stats = stats_new;
stats_new = NULL;
}
spin_unlock_irq(&tsk->sighand->siglock);
if (stats_new)
kmem_cache_free(taskstats_cache, stats_new);
return stats;
}
/* Send pid data out on exit */ void taskstats_exit(struct task_struct *tsk, int group_dead)
{ int rc; struct listener_list *listeners; struct taskstats *stats; struct sk_buff *rep_skb;
size_t size; int is_thread_group;
if (!family_registered) return;
/* * Size includes space for nested attributes
*/
size = taskstats_packet_size();
is_thread_group = !!taskstats_tgid_alloc(tsk); if (is_thread_group) { /* PID + STATS + TGID + STATS */
size = 2 * size; /* fill the tsk->signal->stats structure */
fill_tgid_exit(tsk);
}
listeners = raw_cpu_ptr(&listener_array); if (list_empty(&listeners->list)) return;
/* * late initcall ensures initialization of statistics collection * mechanisms precedes initialization of the taskstats interface
*/
late_initcall(taskstats_init);
¤ Dauer der Verarbeitung: 0.3 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.