/* Simple cross correlation to help find phase shift. Not a performant impl */
std::vector<double>
cross_correlate(std::vector<double> & f, std::vector<double> & g,
size_t signal_length)
{ /* the length we sweep our window through to find the cross correlation */
size_t sweep_length = f.size() - signal_length + 1;
std::vector<double> correlation;
correlation.reserve(sweep_length); for (size_t i = 0; i < sweep_length; i++) { double accumulator = 0.0; for (size_t j = 0; j < signal_length; j++) {
accumulator += f.at(j) * g.at(i + j);
}
correlation.push_back(accumulator);
} return correlation;
}
/* best effort discovery of phase shift between output and (looped) input*/
size_t
find_phase(std::vector<double> & output_frames,
std::vector<double> & input_frames, size_t signal_length)
{
std::vector<double> correlation =
cross_correlate(output_frames, input_frames, signal_length);
size_t phase = 0; double max_correlation = correlation.at(0); for (size_t i = 1; i < correlation.size(); i++) { if (correlation.at(i) > max_correlation) {
max_correlation = correlation.at(i);
phase = i;
}
} return phase;
}
std::vector<double>
normalize_frames(std::vector<double> & frames)
{ double max = abs(
*std::max_element(frames.begin(), frames.end(),
[](double a, double b) { return abs(a) < abs(b); }));
std::vector<double> normalized_frames;
normalized_frames.reserve(frames.size()); for (constdouble frame : frames) {
normalized_frames.push_back(frame / max);
} return normalized_frames;
}
/* heuristic comparison of aligned output and input signals, gets flaky if
* TONE_FREQUENCY is too high */ void
compare_signals(std::vector<double> & output_frames,
std::vector<double> & input_frames)
{
ASSERT_EQ(output_frames.size(), input_frames.size())
<< "#Output frames != #input frames";
size_t num_frames = output_frames.size();
std::vector<double> normalized_output_frames =
normalize_frames(output_frames);
std::vector<double> normalized_input_frames = normalize_frames(input_frames);
/* calculate mean absolute errors */ /* mean absolute errors between output and input */ double io_mas = 0.0; /* mean absolute errors between output and silence */ double output_silence_mas = 0.0; /* mean absolute errors between input and silence */ double input_silence_mas = 0.0; for (size_t i = 0; i < num_frames; i++) {
io_mas +=
abs(normalized_output_frames.at(i) - normalized_input_frames.at(i));
output_silence_mas += abs(normalized_output_frames.at(i));
input_silence_mas += abs(normalized_input_frames.at(i));
}
io_mas /= num_frames;
output_silence_mas /= num_frames;
input_silence_mas /= num_frames;
ASSERT_LT(io_mas, output_silence_mas)
<< "Error between output and input should be less than output and " "silence!";
ASSERT_LT(io_mas, input_silence_mas)
<< "Error between output and input should be less than output and " "silence!";
/* make sure extrema are in (roughly) correct location */ /* number of maxima + minama expected in the frames*/ constlong NUM_EXTREMA =
2 * TONE_FREQUENCY * NUM_FRAMES_TO_OUTPUT / SAMPLE_FREQUENCY; /* expected index of first maxima */ constlong FIRST_MAXIMUM_INDEX = SAMPLE_FREQUENCY / TONE_FREQUENCY / 4; /* Threshold we expect all maxima and minima to be above or below. Ideally the extrema would be 1 or -1, but particularly at the start of loopback
the values seen can be significantly lower. */ constdouble THRESHOLD = 0.5;
for (size_t i = 0; i < NUM_EXTREMA; i++) { bool is_maximum = i % 2 == 0; /* expected offset to current extreme: i * stide between extrema */
size_t offset = i * SAMPLE_FREQUENCY / TONE_FREQUENCY / 2; if (is_maximum) {
ASSERT_GT(normalized_output_frames.at(FIRST_MAXIMUM_INDEX + offset),
THRESHOLD)
<< "Output frames have unexpected missing maximum!";
ASSERT_GT(normalized_input_frames.at(FIRST_MAXIMUM_INDEX + offset),
THRESHOLD)
<< "Input frames have unexpected missing maximum!";
} else {
ASSERT_LT(normalized_output_frames.at(FIRST_MAXIMUM_INDEX + offset),
-THRESHOLD)
<< "Output frames have unexpected missing minimum!";
ASSERT_LT(normalized_input_frames.at(FIRST_MAXIMUM_INDEX + offset),
-THRESHOLD)
<< "Input frames have unexpected missing minimum!";
}
}
}
std::lock_guard<std::mutex> lock(u->user_state_mutex); /* generate our test tone on the fly */ for (int i = 0; i < nframes; i++) { double tone = 0.0; if (u->position + i < NUM_FRAMES_TO_OUTPUT) { /* generate sine wave */
tone =
sin(2 * M_PI * (i + u->position) * TONE_FREQUENCY / SAMPLE_FREQUENCY);
tone *= OUTPUT_AMPLITUDE;
}
ob[i] = ConvertSampleToOutput<T>(tone);
u->output_frames.push_back(tone); /* store any looped back output, may be silence */
u->input_frames.push_back(ConvertSampleFromOutput(ib[i]));
}
u->position += nframes;
return nframes;
}
template <typename T> long
data_cb_loop_input_only(cubeb_stream * stream, void * user, constvoid * inputbuffer, void * outputbuffer, long nframes)
{ struct user_state_loopback * u = (struct user_state_loopback *)user;
T * ib = (T *)inputbuffer;
if (outputbuffer != NULL) { // Can't assert as it needs to return, so expect to fail instead
EXPECT_EQ(outputbuffer, (void *)NULL)
<< "outputbuffer should be null in input only callback"; return CUBEB_ERROR;
}
std::lock_guard<std::mutex> lock(u->user_state_mutex); for (int i = 0; i < nframes; i++) {
u->input_frames.push_back(ConvertSampleFromOutput(ib[i]));
}
return nframes;
}
template <typename T> long
data_cb_playback(cubeb_stream * stream, void * user, constvoid * inputbuffer, void * outputbuffer, long nframes)
{ struct user_state_loopback * u = (struct user_state_loopback *)user;
T * ob = (T *)outputbuffer;
std::lock_guard<std::mutex> lock(u->user_state_mutex); /* generate our test tone on the fly */ for (int i = 0; i < nframes; i++) { double tone = 0.0; if (u->position + i < NUM_FRAMES_TO_OUTPUT) { /* generate sine wave */
tone =
sin(2 * M_PI * (i + u->position) * TONE_FREQUENCY / SAMPLE_FREQUENCY);
tone *= OUTPUT_AMPLITUDE;
}
ob[i] = ConvertSampleToOutput<T>(tone);
u->output_frames.push_back(tone);
}
/* access after stop should not happen, but lock just in case and to appease
* sanitization tools */
std::lock_guard<std::mutex> lock(user_data->user_state_mutex);
std::vector<double> & output_frames = user_data->output_frames;
std::vector<double> & input_frames = user_data->input_frames;
ASSERT_EQ(output_frames.size(), input_frames.size())
<< "#Output frames != #input frames";
/* extract vectors of just the relevant signal from output and input */ auto output_frames_signal_start = output_frames.begin(); auto output_frames_signal_end = output_frames.begin() + NUM_FRAMES_TO_OUTPUT;
std::vector<double> trimmed_output_frames(output_frames_signal_start,
output_frames_signal_end); auto input_frames_signal_start = input_frames.begin() + phase; auto input_frames_signal_end =
input_frames.begin() + phase + NUM_FRAMES_TO_OUTPUT;
std::vector<double> trimmed_input_frames(input_frames_signal_start,
input_frames_signal_end);
/* access after stop should not happen, but lock just in case and to appease
* sanitization tools */
std::lock_guard<std::mutex> lock(user_data->user_state_mutex);
std::vector<double> & output_frames = user_data->output_frames;
std::vector<double> & input_frames = user_data->input_frames;
ASSERT_LE(output_frames.size(), input_frames.size())
<< "#Output frames should be less or equal to #input frames";
/* extract vectors of just the relevant signal from output and input */ auto output_frames_signal_start = output_frames.begin(); auto output_frames_signal_end = output_frames.begin() + NUM_FRAMES_TO_OUTPUT;
std::vector<double> trimmed_output_frames(output_frames_signal_start,
output_frames_signal_end); auto input_frames_signal_start = input_frames.begin() + phase; auto input_frames_signal_end =
input_frames.begin() + phase + NUM_FRAMES_TO_OUTPUT;
std::vector<double> trimmed_input_frames(input_frames_signal_start,
input_frames_signal_end);
/* access after stop should not happen, but lock just in case and to appease
* sanitization tools */
std::lock_guard<std::mutex> lock(user_data->user_state_mutex);
std::vector<double> & input_frames = user_data->input_frames;
/* expect to have at least ~50ms of frames */
ASSERT_GE(input_frames.size(), SAMPLE_FREQUENCY / 20); double EPISILON = 0.0001; /* frames should be 0.0, but use epsilon to avoid possible issues with impls
that may use ~0.0 silence values. */ for (double frame : input_frames) {
ASSERT_LT(abs(frame), EPISILON);
}
}
r = cubeb_enumerate_devices(ctx, CUBEB_DEVICE_TYPE_OUTPUT, &collection); if (r == CUBEB_ERROR_NOT_SUPPORTED) {
fprintf(stderr, "Device enumeration not supported" " for this backend, skipping this test.\n"); return;
}
ASSERT_EQ(r, CUBEB_OK) << "Error enumerating devices " << r; /* get first preferred output device id */
std::string device_id; for (size_t i = 0; i < collection.count; i++) { if (collection.device[i].preferred) {
device_id = collection.device[i].device_id; break;
}
}
cubeb_device_collection_destroy(ctx, &collection); if (device_id.empty()) {
fprintf(stderr, "Could not find preferred device, aborting test.\n"); return;
}
/* access after stop should not happen, but lock just in case and to appease
* sanitization tools */
std::lock_guard<std::mutex> lock(user_data->user_state_mutex);
std::vector<double> & output_frames = user_data->output_frames;
std::vector<double> & input_frames = user_data->input_frames;
ASSERT_LE(output_frames.size(), input_frames.size())
<< "#Output frames should be less or equal to #input frames";
/* extract vectors of just the relevant signal from output and input */ auto output_frames_signal_start = output_frames.begin(); auto output_frames_signal_end = output_frames.begin() + NUM_FRAMES_TO_OUTPUT;
std::vector<double> trimmed_output_frames(output_frames_signal_start,
output_frames_signal_end); auto input_frames_signal_start = input_frames.begin() + phase; auto input_frames_signal_end =
input_frames.begin() + phase + NUM_FRAMES_TO_OUTPUT;
std::vector<double> trimmed_input_frames(input_frames_signal_start,
input_frames_signal_end);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.