staticinlineunion lower_chunk *get_lower_chunk(struct trace_pid_list *pid_list)
{ union lower_chunk *chunk;
lockdep_assert_held(&pid_list->lock);
if (!pid_list->lower_list) return NULL;
chunk = pid_list->lower_list;
pid_list->lower_list = chunk->next;
pid_list->free_lower_chunks--;
WARN_ON_ONCE(pid_list->free_lower_chunks < 0);
chunk->next = NULL; /* * If a refill needs to happen, it can not happen here * as the scheduler run queue locks are held.
*/ if (pid_list->free_lower_chunks <= CHUNK_REALLOC)
irq_work_queue(&pid_list->refill_irqwork);
return chunk;
}
staticinlineunion upper_chunk *get_upper_chunk(struct trace_pid_list *pid_list)
{ union upper_chunk *chunk;
lockdep_assert_held(&pid_list->lock);
if (!pid_list->upper_list) return NULL;
chunk = pid_list->upper_list;
pid_list->upper_list = chunk->next;
pid_list->free_upper_chunks--;
WARN_ON_ONCE(pid_list->free_upper_chunks < 0);
chunk->next = NULL; /* * If a refill needs to happen, it can not happen here * as the scheduler run queue locks are held.
*/ if (pid_list->free_upper_chunks <= CHUNK_REALLOC)
irq_work_queue(&pid_list->refill_irqwork);
return chunk;
}
staticinlinevoid put_lower_chunk(struct trace_pid_list *pid_list, union lower_chunk *chunk)
{
lockdep_assert_held(&pid_list->lock);
staticinlinebool upper_empty(union upper_chunk *chunk)
{ /* * If chunk->data has no lower chunks, it will be the same * as a zeroed bitmask.
*/ return bitmap_empty((unsignedlong *)chunk->data, BITS_PER_TYPE(chunk->data));
}
staticinlineint pid_split(unsignedint pid, unsignedint *upper1, unsignedint *upper2, unsignedint *lower)
{ /* MAX_PID should cover all pids */
BUILD_BUG_ON(MAX_PID < PID_MAX_LIMIT);
/* In case a bad pid is passed in, then fail */ if (unlikely(pid >= MAX_PID)) return -1;
/** * trace_pid_list_is_set - test if the pid is set in the list * @pid_list: The pid list to test * @pid: The pid to see if set in the list. * * Tests if @pid is set in the @pid_list. This is usually called * from the scheduler when a task is scheduled. Its pid is checked * if it should be traced or not. * * Return true if the pid is in the list, false otherwise.
*/ bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsignedint pid)
{ union upper_chunk *upper_chunk; union lower_chunk *lower_chunk; unsignedlong flags; unsignedint upper1; unsignedint upper2; unsignedint lower; bool ret = false;
if (!pid_list) returnfalse;
if (pid_split(pid, &upper1, &upper2, &lower) < 0) returnfalse;
raw_spin_lock_irqsave(&pid_list->lock, flags);
upper_chunk = pid_list->upper[upper1]; if (upper_chunk) {
lower_chunk = upper_chunk->data[upper2]; if (lower_chunk)
ret = test_bit(lower, lower_chunk->data);
}
raw_spin_unlock_irqrestore(&pid_list->lock, flags);
return ret;
}
/** * trace_pid_list_set - add a pid to the list * @pid_list: The pid list to add the @pid to. * @pid: The pid to add. * * Adds @pid to @pid_list. This is usually done explicitly by a user * adding a task to be traced, or indirectly by the fork function * when children should be traced and a task's pid is in the list. * * Return 0 on success, negative otherwise.
*/ int trace_pid_list_set(struct trace_pid_list *pid_list, unsignedint pid)
{ union upper_chunk *upper_chunk; union lower_chunk *lower_chunk; unsignedlong flags; unsignedint upper1; unsignedint upper2; unsignedint lower; int ret;
if (!pid_list) return -ENODEV;
if (pid_split(pid, &upper1, &upper2, &lower) < 0) return -EINVAL;
raw_spin_lock_irqsave(&pid_list->lock, flags);
upper_chunk = pid_list->upper[upper1]; if (!upper_chunk) {
upper_chunk = get_upper_chunk(pid_list); if (!upper_chunk) {
ret = -ENOMEM; goto out;
}
pid_list->upper[upper1] = upper_chunk;
}
lower_chunk = upper_chunk->data[upper2]; if (!lower_chunk) {
lower_chunk = get_lower_chunk(pid_list); if (!lower_chunk) {
ret = -ENOMEM; goto out;
}
upper_chunk->data[upper2] = lower_chunk;
}
set_bit(lower, lower_chunk->data);
ret = 0;
out:
raw_spin_unlock_irqrestore(&pid_list->lock, flags); return ret;
}
/** * trace_pid_list_clear - remove a pid from the list * @pid_list: The pid list to remove the @pid from. * @pid: The pid to remove. * * Removes @pid from @pid_list. This is usually done explicitly by a user * removing tasks from tracing, or indirectly by the exit function * when a task that is set to be traced exits. * * Return 0 on success, negative otherwise.
*/ int trace_pid_list_clear(struct trace_pid_list *pid_list, unsignedint pid)
{ union upper_chunk *upper_chunk; union lower_chunk *lower_chunk; unsignedlong flags; unsignedint upper1; unsignedint upper2; unsignedint lower;
if (!pid_list) return -ENODEV;
if (pid_split(pid, &upper1, &upper2, &lower) < 0) return -EINVAL;
raw_spin_lock_irqsave(&pid_list->lock, flags);
upper_chunk = pid_list->upper[upper1]; if (!upper_chunk) goto out;
lower_chunk = upper_chunk->data[upper2]; if (!lower_chunk) goto out;
clear_bit(lower, lower_chunk->data);
/* if there's no more bits set, add it to the free list */ if (find_first_bit(lower_chunk->data, LOWER_MAX) >= LOWER_MAX) {
put_lower_chunk(pid_list, lower_chunk);
upper_chunk->data[upper2] = NULL; if (upper_empty(upper_chunk)) {
put_upper_chunk(pid_list, upper_chunk);
pid_list->upper[upper1] = NULL;
}
}
out:
raw_spin_unlock_irqrestore(&pid_list->lock, flags); return 0;
}
/** * trace_pid_list_next - return the next pid in the list * @pid_list: The pid list to examine. * @pid: The pid to start from * @next: The pointer to place the pid that is set starting from @pid. * * Looks for the next consecutive pid that is in @pid_list starting * at the pid specified by @pid. If one is set (including @pid), then * that pid is placed into @next. * * Return 0 when a pid is found, -1 if there are no more pids included.
*/ int trace_pid_list_next(struct trace_pid_list *pid_list, unsignedint pid, unsignedint *next)
{ union upper_chunk *upper_chunk; union lower_chunk *lower_chunk; unsignedlong flags; unsignedint upper1; unsignedint upper2; unsignedint lower;
if (!pid_list) return -ENODEV;
if (pid_split(pid, &upper1, &upper2, &lower) < 0) return -EINVAL;
/** * trace_pid_list_first - return the first pid in the list * @pid_list: The pid list to examine. * @pid: The pointer to place the pid first found pid that is set. * * Looks for the first pid that is set in @pid_list, and places it * into @pid if found. * * Return 0 when a pid is found, -1 if there are no pids set.
*/ int trace_pid_list_first(struct trace_pid_list *pid_list, unsignedint *pid)
{ return trace_pid_list_next(pid_list, 0, pid);
}
staticvoid pid_list_refill_irq(struct irq_work *iwork)
{ struct trace_pid_list *pid_list = container_of(iwork, struct trace_pid_list,
refill_irqwork); union upper_chunk *upper = NULL; union lower_chunk *lower = NULL; union upper_chunk **upper_next = &upper; union lower_chunk **lower_next = &lower; int upper_count; int lower_count; int ucnt = 0; int lcnt = 0;
/* * On success of allocating all the chunks, both counters * will be less than zero. If they are not, then an allocation * failed, and we should not try again.
*/ if (upper_count >= 0 || lower_count >= 0) return; /* * When the locks were released, free chunks could have * been used and allocation needs to be done again. Might as * well allocate it now.
*/ goto again;
}
/** * trace_pid_list_alloc - create a new pid_list * * Allocates a new pid_list to store pids into. * * Returns the pid_list on success, NULL otherwise.
*/ struct trace_pid_list *trace_pid_list_alloc(void)
{ struct trace_pid_list *pid_list; int i;
/* According to linux/thread.h, pids can be no bigger that 30 bits */
WARN_ON_ONCE(init_pid_ns.pid_max > (1 << 30));
pid_list = kzalloc(sizeof(*pid_list), GFP_KERNEL); if (!pid_list) return NULL;
/** * trace_pid_list_free - Frees an allocated pid_list. * @pid_list: The pid list to free. * * Frees the memory for a pid_list that was allocated.
*/ void trace_pid_list_free(struct trace_pid_list *pid_list)
{ union upper_chunk *upper; union lower_chunk *lower; int i, j;
if (!pid_list) return;
irq_work_sync(&pid_list->refill_irqwork);
while (pid_list->lower_list) { union lower_chunk *chunk;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.