/* * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/ #include"video/video_quality_test.h"
namespace { enum : int { // The first valid value is 1.
kAbsSendTimeExtensionId = 1,
kGenericFrameDescriptorExtensionId00,
kGenericFrameDescriptorExtensionId01,
kTransportSequenceNumberExtensionId,
kVideoContentTypeExtensionId,
kVideoTimingExtensionId,
};
constexpr char kSyncGroup[] = "av_sync";
constexpr int kOpusMinBitrateBps = 6000;
constexpr int kOpusBitrateFbBps = 32000;
constexpr int kFramesSentInQuickTest = 1;
constexpr uint32_t kThumbnailSendSsrcStart = 0xE0000;
constexpr uint32_t kThumbnailRtxSsrcStart = 0xF0000;
// This wrapper provides two features needed by the video quality tests: // 1. Invoke VideoAnalyzer callbacks before and after encoding each frame. // 2. Write the encoded frames to file, one file per simulcast layer. class QualityTestVideoEncoder : public VideoEncoder, private EncodedImageCallback { public:
QualityTestVideoEncoder(std::unique_ptr<VideoEncoder> encoder,
VideoAnalyzer* analyzer,
std::vector<FileWrapper> files, double overshoot_factor)
: encoder_(std::move(encoder)),
overshoot_factor_(overshoot_factor),
analyzer_(analyzer) { for (FileWrapper& file : files) {
writers_.push_back(
IvfFileWriter::Wrap(std::move(file), /* byte_limit= */ 100000000));
}
}
// Simulating encoder overshooting target bitrate, by configuring actual // encoder too high. Take care not to adjust past limits of config, // otherwise encoders may crash on DCHECK.
VideoBitrateAllocation overshot_allocation; for (size_t si = 0; si < kMaxSpatialLayers; ++si) { const uint32_t spatial_layer_bitrate_bps =
parameters.bitrate.GetSpatialLayerSum(si); if (spatial_layer_bitrate_bps == 0) { continue;
}
double overshoot_factor = 1.0; // Match format to either of the streams in dual-stream mode in order to get // the overshoot factor. This is not very robust but we can't know for sure // which stream this encoder is meant for, from within the factory. if (format ==
SdpVideoFormat(params_.video[0].codec, params_.video[0].sdp_params)) {
overshoot_factor = params_.video[0].encoder_overshoot_factor;
} elseif (format == SdpVideoFormat(params_.video[1].codec,
params_.video[1].sdp_params)) {
overshoot_factor = params_.video[1].encoder_overshoot_factor;
} if (overshoot_factor == 0.0) { // If params were zero-initialized, set to 1.0 instead.
overshoot_factor = 1.0;
}
// Register header extensions that are used by transport to identify // extensions when parsing incomig packets.
RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri,
kTransportSequenceNumberExtensionId));
RegisterRtpExtension(
RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId));
RegisterRtpExtension(RtpExtension(RtpExtension::kGenericFrameDescriptorUri00,
kGenericFrameDescriptorExtensionId00));
RegisterRtpExtension(RtpExtension(RtpExtension::kDependencyDescriptorUri,
kRtpExtensionDependencyDescriptor));
RegisterRtpExtension(RtpExtension(RtpExtension::kVideoContentTypeUri,
kVideoContentTypeExtensionId));
RegisterRtpExtension(
RtpExtension(RtpExtension::kVideoTimingUri, kVideoTimingExtensionId));
}
std::string VideoQualityTest::GenerateGraphTitle() const {
rtc::StringBuilder ss;
ss << params_.video[0].codec;
ss << " (" << params_.video[0].target_bitrate_bps / 1000 << "kbps";
ss << ", " << params_.video[0].fps << " FPS"; if (params_.screenshare[0].scroll_duration)
ss << ", " << params_.screenshare[0].scroll_duration << "s scroll"; if (params_.ss[0].streams.size() > 1)
ss << ", Stream #" << params_.ss[0].selected_stream; if (params_.ss[0].num_spatial_layers > 1)
ss << ", Layer #" << params_.ss[0].selected_sl;
ss << ")"; return ss.Release();
}
void VideoQualityTest::CheckParamsAndInjectionComponents() { if (injection_components_ == nullptr) {
injection_components_ = std::make_unique<InjectionComponents>();
} if (!params_.config && injection_components_->sender_network == nullptr &&
injection_components_->receiver_network == nullptr) {
params_.config = BuiltInNetworkBehaviorConfig();
}
RTC_CHECK(
(params_.config && injection_components_->sender_network == nullptr &&
injection_components_->receiver_network == nullptr) ||
(!params_.config && injection_components_->sender_network != nullptr &&
injection_components_->receiver_network != nullptr)); for (size_t video_idx = 0; video_idx < num_video_streams_; ++video_idx) { // Iterate over primary and secondary video streams. if (!params_.video[video_idx].enabled) return; // Add a default stream in none specified. if (params_.ss[video_idx].streams.empty())
params_.ss[video_idx].streams.push_back(
VideoQualityTest::DefaultVideoStream(params_, video_idx)); if (params_.ss[video_idx].num_spatial_layers == 0)
params_.ss[video_idx].num_spatial_layers = 1;
if (params_.config) { if (params_.config->loss_percent != 0 ||
params_.config->queue_length_packets != 0) { // Since LayerFilteringTransport changes the sequence numbers, we can't // use that feature with pack loss, since the NACK request would end up // retransmitting the wrong packets.
RTC_CHECK(params_.ss[video_idx].selected_sl == -1 ||
params_.ss[video_idx].selected_sl ==
params_.ss[video_idx].num_spatial_layers - 1);
RTC_CHECK(params_.video[video_idx].selected_tl == -1 ||
params_.video[video_idx].selected_tl ==
params_.video[video_idx].num_temporal_layers - 1);
}
}
// TODO(ivica): Should max_bitrate_bps == -1 represent inf max bitrate, as // it does in some parts of the code?
RTC_CHECK_GE(params_.video[video_idx].max_bitrate_bps,
params_.video[video_idx].target_bitrate_bps);
RTC_CHECK_GE(params_.video[video_idx].target_bitrate_bps,
params_.video[video_idx].min_bitrate_bps); int selected_stream = params_.ss[video_idx].selected_stream; if (params_.video[video_idx].selected_tl > -1) {
RTC_CHECK_LT(selected_stream, params_.ss[video_idx].streams.size())
<< "Can not use --selected_tl when --selected_stream is all streams"; int stream_tl = params_.ss[video_idx]
.streams[selected_stream]
.num_temporal_layers.value_or(1);
RTC_CHECK_LT(params_.video[video_idx].selected_tl, stream_tl);
}
RTC_CHECK_LE(params_.ss[video_idx].selected_stream,
params_.ss[video_idx].streams.size()); for (const VideoStream& stream : params_.ss[video_idx].streams) {
RTC_CHECK_GE(stream.min_bitrate_bps, 0);
RTC_CHECK_GE(stream.target_bitrate_bps, stream.min_bitrate_bps);
RTC_CHECK_GE(stream.max_bitrate_bps, stream.target_bitrate_bps);
} // TODO(ivica): Should we check if the sum of all streams/layers is equal to // the total bitrate? We anyway have to update them in the case bitrate // estimator changes the total bitrates.
RTC_CHECK_GE(params_.ss[video_idx].num_spatial_layers, 1);
RTC_CHECK_LE(params_.ss[video_idx].selected_sl,
params_.ss[video_idx].num_spatial_layers);
RTC_CHECK(
params_.ss[video_idx].spatial_layers.empty() ||
params_.ss[video_idx].spatial_layers.size() == static_cast<size_t>(params_.ss[video_idx].num_spatial_layers)); if (params_.video[video_idx].codec == "VP8") {
RTC_CHECK_EQ(params_.ss[video_idx].num_spatial_layers, 1);
} elseif (params_.video[video_idx].codec == "VP9") {
RTC_CHECK_EQ(params_.ss[video_idx].streams.size(), 1);
}
RTC_CHECK_GE(params_.call.num_thumbnails, 0); if (params_.call.num_thumbnails > 0) {
RTC_CHECK_EQ(params_.ss[video_idx].num_spatial_layers, 1);
RTC_CHECK_EQ(params_.ss[video_idx].streams.size(), 3);
RTC_CHECK_EQ(params_.video[video_idx].num_temporal_layers, 3);
RTC_CHECK_EQ(params_.video[video_idx].codec, "VP8");
} // Dual streams with FEC not supported in tests yet.
RTC_CHECK(!params_.video[video_idx].flexfec || num_video_streams_ == 1);
RTC_CHECK(!params_.video[video_idx].ulpfec || num_video_streams_ == 1);
}
}
// Static.
std::vector<int> VideoQualityTest::ParseCSV(const std::string& str) { // Parse comma separated nonnegative integers, where some elements may be // empty. The empty values are replaced with -1. // E.g. "10,-20,,30,40" --> {10, 20, -1, 30,40} // E.g. ",,10,,20," --> {-1, -1, 10, -1, 20, -1}
std::vector<int> result; if (str.empty()) return result;
constchar* p = str.c_str(); int value = -1; int pos; while (*p) { if (*p == ',') {
result.push_back(value);
value = -1;
++p; continue;
}
RTC_CHECK_EQ(sscanf(p, "%d%n", &value, &pos), 1)
<< "Unexpected non-number value.";
p += pos;
}
result.push_back(value); return result;
}
if (params_.screenshare[video_idx].enabled) { // Fill out codec settings.
video_encoder_configs_[video_idx].content_type =
VideoEncoderConfig::ContentType::kScreen;
video_encoder_configs_[video_idx].legacy_conference_mode = true;
degradation_preference_ = DegradationPreference::MAINTAIN_RESOLUTION; if (params_.video[video_idx].codec == "VP8") {
VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
vp8_settings.denoisingOn = false;
vp8_settings.numberOfTemporalLayers = static_cast<unsignedchar>(
params_.video[video_idx].num_temporal_layers);
video_encoder_configs_[video_idx].encoder_specific_settings =
rtc::make_ref_counted<
VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
} elseif (params_.video[video_idx].codec == "VP9") {
VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
vp9_settings.denoisingOn = false;
vp9_settings.automaticResizeOn = false;
vp9_settings.numberOfTemporalLayers = static_cast<unsignedchar>(
params_.video[video_idx].num_temporal_layers);
vp9_settings.numberOfSpatialLayers = static_cast<unsignedchar>(
params_.ss[video_idx].num_spatial_layers);
vp9_settings.interLayerPred = params_.ss[video_idx].inter_layer_pred; // High FPS vp9 screenshare requires flexible mode. if (params_.ss[video_idx].num_spatial_layers > 1) {
vp9_settings.flexibleMode = true;
}
video_encoder_configs_[video_idx].encoder_specific_settings =
rtc::make_ref_counted<
VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
}
} elseif (params_.ss[video_idx].num_spatial_layers > 1) { // If SVC mode without screenshare, still need to set codec specifics.
RTC_CHECK(params_.video[video_idx].codec == "VP9");
VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
vp9_settings.numberOfTemporalLayers = static_cast<unsignedchar>(
params_.video[video_idx].num_temporal_layers);
vp9_settings.numberOfSpatialLayers = static_cast<unsignedchar>(params_.ss[video_idx].num_spatial_layers);
vp9_settings.interLayerPred = params_.ss[video_idx].inter_layer_pred;
vp9_settings.automaticResizeOn = false;
video_encoder_configs_[video_idx].encoder_specific_settings =
rtc::make_ref_counted<VideoEncoderConfig::Vp9EncoderSpecificSettings>(
vp9_settings);
RTC_DCHECK_EQ(video_encoder_configs_[video_idx].simulcast_layers.size(),
1); // Min bitrate will be enforced by spatial layer config instead.
video_encoder_configs_[video_idx].simulcast_layers[0].min_bitrate_bps = 0;
} elseif (params_.video[video_idx].automatic_scaling) { if (params_.video[video_idx].codec == "VP8") {
VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
vp8_settings.automaticResizeOn = true;
video_encoder_configs_[video_idx].encoder_specific_settings =
rtc::make_ref_counted<
VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
} elseif (params_.video[video_idx].codec == "VP9") {
VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); // Only enable quality scaler for single spatial layer.
vp9_settings.automaticResizeOn =
params_.ss[video_idx].num_spatial_layers == 1;
video_encoder_configs_[video_idx].encoder_specific_settings =
rtc::make_ref_counted<
VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
} elseif (params_.video[video_idx].codec == "H264") { // Quality scaling is always on for H.264.
} elseif (params_.video[video_idx].codec == cricket::kAv1CodecName) { // TODO(bugs.webrtc.org/11404): Propagate the flag to // aom_codec_enc_cfg_t::rc_resize_mode in Av1 encoder wrapper. // Until then do nothing, specially do not crash.
} else {
RTC_DCHECK_NOTREACHED()
<< "Automatic scaling not supported for codec "
<< params_.video[video_idx].codec << ", stream " << video_idx;
}
} else { // Default mode. Single SL, no automatic_scaling, if (params_.video[video_idx].codec == "VP8") {
VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings();
vp8_settings.automaticResizeOn = false;
video_encoder_configs_[video_idx].encoder_specific_settings =
rtc::make_ref_counted<
VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings);
} elseif (params_.video[video_idx].codec == "VP9") {
VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
vp9_settings.automaticResizeOn = false;
video_encoder_configs_[video_idx].encoder_specific_settings =
rtc::make_ref_counted<
VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
} elseif (params_.video[video_idx].codec == "H264") {
video_encoder_configs_[video_idx].encoder_specific_settings = nullptr;
}
}
total_streams_used += num_video_substreams;
}
// FEC supported only for single video stream mode yet. if (params_.video[0].flexfec) { if (decode_all_receive_streams) {
SetSendFecConfig(GetVideoSendConfig()->rtp.ssrcs);
} else {
SetSendFecConfig(
{test::VideoTestConstants::kVideoSendSsrcs[params_.ss[0]
.selected_stream]});
}
params_ = params; // TODO(ivica): Merge with RunWithRenderer and use a flag / argument to // differentiate between the analyzer and the renderer case.
CheckParamsAndInjectionComponents();
if (!params_.analyzer.graph_data_output_filename.empty()) {
graph_data_output_file =
fopen(params_.analyzer.graph_data_output_filename.c_str(), "w");
RTC_CHECK(graph_data_output_file)
<< "Can't open the file " << params_.analyzer.graph_data_output_filename
<< "!";
}
if (graph_data_output_file)
fclose(graph_data_output_file);
send_transport.reset();
recv_transport.reset();
DestroyCalls();
});
analyzer_ = nullptr;
}
rtc::scoped_refptr<AudioDeviceModule> VideoQualityTest::CreateAudioDevice() { #ifdef WEBRTC_WIN
RTC_LOG(LS_INFO) << "Using latest version of ADM on Windows"; // We must initialize the COM library on a thread before we calling any of // the library functions. All COM functions in the ADM will return // CO_E_NOTINITIALIZED otherwise. The legacy ADM for Windows used internal // COM initialization but the new ADM requires COM to be initialized // externally.
com_initializer_ =
std::make_unique<ScopedCOMInitializer>(ScopedCOMInitializer::kMTA);
RTC_CHECK(com_initializer_->Succeeded());
RTC_CHECK(webrtc_win::core_audio_utility::IsSupported());
RTC_CHECK(webrtc_win::core_audio_utility::IsMMCSSSupported()); return CreateWindowsCoreAudioAudioDeviceModule(task_queue_factory_.get()); #else // Use legacy factory method on all platforms except Windows. return AudioDeviceModule::Create(AudioDeviceModule::kPlatformDefaultAudio,
task_queue_factory_.get()); #endif
}
void VideoQualityTest::InitializeAudioDevice(CallConfig* send_call_config,
CallConfig* recv_call_config, bool use_real_adm) {
rtc::scoped_refptr<AudioDeviceModule> audio_device; if (use_real_adm) { // Run test with real ADM (using default audio devices) if user has // explicitly set the --audio and --use_real_adm command-line flags.
audio_device = CreateAudioDevice();
} else { // By default, create a test ADM which fakes audio.
audio_device = TestAudioDeviceModule::Create(
task_queue_factory_.get(),
TestAudioDeviceModule::CreatePulsedNoiseCapturer(32000, 48000),
TestAudioDeviceModule::CreateDiscardRenderer(48000), 1.f);
}
RTC_CHECK(audio_device);
AudioState::Config audio_state_config;
audio_state_config.audio_mixer = AudioMixerImpl::Create();
audio_state_config.audio_processing =
BuiltinAudioProcessingBuilder().Build(send_call_config->env);
audio_state_config.audio_device_module = audio_device;
send_call_config->audio_state = AudioState::Create(audio_state_config);
recv_call_config->audio_state = AudioState::Create(audio_state_config); if (use_real_adm) { // The real ADM requires extra initialization: setting default devices, // setting up number of channels etc. Helper class also calls // AudioDeviceModule::Init().
webrtc::adm_helpers::Init(audio_device.get());
} else {
audio_device->Init();
} // Always initialize the ADM before injecting a valid audio transport.
RTC_CHECK(audio_device->RegisterAudioCallback(
send_call_config->audio_state->audio_transport()) == 0);
}
// TODO(ivica): Remove bitrate_config and use the default CallConfig(), to // match the full stack tests.
CallConfig send_call_config = SendCallConfig();
send_call_config.bitrate_config = params_.call.call_bitrate_config;
CallConfig recv_call_config = RecvCallConfig();
if (params_.audio.enabled)
InitializeAudioDevice(&send_call_config, &recv_call_config,
params_.audio.use_real_adm);
// TODO(minyue): consider if this is a good transport even for audio only // calls.
send_transport = CreateSendTransport();
recv_transport = CreateReceiveTransport();
// TODO(ivica): Use two calls to be able to merge with RunWithAnalyzer or at // least share as much code as possible. That way this test would also match // the full stack tests better.
send_transport->SetReceiver(receiver_call_->Receiver());
recv_transport->SetReceiver(sender_call_->Receiver());
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.