/* * Fills up the given @cpu/@tsk with breakpoints, only leaving @skip slots free. * * Returns true if this can be called again, continuing at @id.
*/ staticbool fill_bp_slots(struct kunit *test, int *id, int cpu, struct task_struct *tsk, intskip)
{ for (int i = 0; i < get_test_bp_slots() - skip; ++i)
fill_one_bp_slot(test, id, cpu, tsk);
staticvoid test_many_cpus(struct kunit *test)
{ int idx = 0; int cpu;
/* Test that CPUs are independent. */
for_each_online_cpu(cpu) { bool do_continue = fill_bp_slots(test, &idx, cpu, NULL, 0);
TEST_EXPECT_NOSPC(register_test_bp(cpu, NULL, idx)); if (!do_continue) break;
}
}
staticvoid test_one_task_on_all_cpus(struct kunit *test)
{ int idx = 0;
fill_bp_slots(test, &idx, -1, current, 0);
TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx)); /* Remove one and adding back CPU-target should work. */
unregister_test_bp(&test_bps[0]);
fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
}
staticvoid test_two_tasks_on_all_cpus(struct kunit *test)
{ int idx = 0;
/* Test that tasks are independent. */
fill_bp_slots(test, &idx, -1, current, 0);
fill_bp_slots(test, &idx, -1, get_other_task(test), 0);
TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx)); /* Remove one from first task and adding back CPU-target should not work. */
unregister_test_bp(&test_bps[0]);
TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
}
staticvoid test_one_task_on_one_cpu(struct kunit *test)
{ int idx = 0;
fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx)); /* * Remove one and adding back CPU-target should work; this case is * special vs. above because the task's constraints are CPU-dependent.
*/
unregister_test_bp(&test_bps[0]);
fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
}
staticvoid test_one_task_mixed(struct kunit *test)
{ int idx = 0;
/* We should still be able to use up another CPU's slots. */
cpu_idx = idx;
fill_one_bp_slot(test, &idx, get_test_cpu(1), NULL);
TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));
/* Transitioning back to task target on all CPUs. */
unregister_test_bp(&test_bps[tsk_on_cpu_idx]); /* Still have a CPU target breakpoint in get_test_cpu(1). */
TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx)); /* Remove it and try again. */
unregister_test_bp(&test_bps[cpu_idx]);
fill_one_bp_slot(test, &idx, -1, current);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.