/* * The tgid_map array maps from pid to tgid; i.e. the value stored at index i * is the tgid last observed corresponding to pid=i.
*/ staticint *tgid_map;
/* The maximum valid index into tgid_map. */ static size_t tgid_map_max;
#define SAVED_CMDLINES_DEFAULT 128 #define NO_CMDLINE_MAP UINT_MAX /* * Preemption must be disabled before acquiring trace_cmdline_lock. * The various trace_arrays' max_lock must be acquired in a context * where interrupt is disabled.
*/ static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; struct saved_cmdlines_buffer { unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; unsigned *map_cmdline_to_pid; unsigned cmdline_num; int cmdline_idx; char saved_cmdlines[];
}; staticstruct saved_cmdlines_buffer *savedcmd;
/* Holds the size of a cmdline and pid element */ #define SAVED_CMDLINE_MAP_ELEMENT_SIZE(s) \
(TASK_COMM_LEN + sizeof((s)->map_cmdline_to_pid[0]))
staticstruct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsignedint val)
{ struct saved_cmdlines_buffer *s; struct page *page; int orig_size, size; int order;
/* Figure out how much is needed to hold the given number of cmdlines */
orig_size = sizeof(*s) + val * SAVED_CMDLINE_MAP_ELEMENT_SIZE(s);
order = get_order(orig_size);
size = 1 << (order + PAGE_SHIFT);
page = alloc_pages(GFP_KERNEL, order); if (!page) return NULL;
s = page_address(page);
kmemleak_alloc(s, size, 1, GFP_KERNEL);
memset(s, 0, sizeof(*s));
/* Round up to actual allocation */
val = (size - sizeof(*s)) / SAVED_CMDLINE_MAP_ELEMENT_SIZE(s);
s->cmdline_num = val;
/* Place map_cmdline_to_pid array right after saved_cmdlines */
s->map_cmdline_to_pid = (unsigned *)&s->saved_cmdlines[val * TASK_COMM_LEN];
int trace_create_savedcmd(void)
{
savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
return savedcmd ? 0 : -ENOMEM;
}
int trace_save_cmdline(struct task_struct *tsk)
{ unsigned tpid, idx;
/* treat recording of idle task as a success */ if (!tsk->pid) return 1;
tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
/* * It's not the end of the world if we don't get * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. * * This is called within the scheduler and wake up, so interrupts * had better been disabled and run queue lock been held.
*/
lockdep_assert_preemption_disabled(); if (!arch_spin_trylock(&trace_cmdline_lock)) return 0;
staticint *trace_find_tgid_ptr(int pid)
{ /* * Pairs with the smp_store_release in set_tracer_flag() to ensure that * if we observe a non-NULL tgid_map then we also observe the correct * tgid_map_max.
*/ int *map = smp_load_acquire(&tgid_map);
if (unlikely(!map || pid > tgid_map_max)) return NULL;
return &map[pid];
}
int trace_find_tgid(int pid)
{ int *ptr = trace_find_tgid_ptr(pid);
return ptr ? *ptr : 0;
}
staticint trace_save_tgid(struct task_struct *tsk)
{ int *ptr;
/* treat recording of idle task as a success */ if (!tsk->pid) return 1;
ptr = trace_find_tgid_ptr(tsk->pid); if (!ptr) return 0;
*ptr = tsk->tgid; return 1;
}
staticbool tracing_record_taskinfo_skip(int flags)
{ if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) returntrue; if (!__this_cpu_read(trace_taskinfo_save)) returntrue; returnfalse;
}
/** * tracing_record_taskinfo - record the task info of a task * * @task: task to record * @flags: TRACE_RECORD_CMDLINE for recording comm * TRACE_RECORD_TGID for recording tgid
*/ void tracing_record_taskinfo(struct task_struct *task, int flags)
{ bool done;
if (tracing_record_taskinfo_skip(flags)) return;
/* * Record as much task information as possible. If some fail, continue * to try to record the others.
*/
done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
/* If recording any information failed, retry again soon. */ if (!done) return;
__this_cpu_write(trace_taskinfo_save, false);
}
/** * tracing_record_taskinfo_sched_switch - record task info for sched_switch * * @prev: previous task during sched_switch * @next: next task during sched_switch * @flags: TRACE_RECORD_CMDLINE for recording comm * TRACE_RECORD_TGID for recording tgid
*/ void tracing_record_taskinfo_sched_switch(struct task_struct *prev, struct task_struct *next, int flags)
{ bool done;
if (tracing_record_taskinfo_skip(flags)) return;
/* * Record as much task information as possible. If some fail, continue * to try to record the others.
*/
done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
/* If recording any information failed, retry again soon. */ if (!done) return;
__this_cpu_write(trace_taskinfo_save, false);
}
/* Helpers to record a specific task information */ void tracing_record_cmdline(struct task_struct *task)
{
tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
}
/* * Pairs with smp_load_acquire() in * trace_find_tgid_ptr() to ensure that if it observes * the tgid_map we just allocated then it also observes * the corresponding tgid_map_max value.
*/
smp_store_release(&tgid_map, map); return 0;
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.1Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.