/* * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
namespace { staticconstint kMinSendSidePacketHistorySize = 600; // We don't do MTU discovery, so assume that we have the standard ethernet MTU. staticconst size_t kPathMTU = 1500;
using webrtc_internal_rtp_video_sender::RtpStreamSender;
if (absl::StartsWith(trials.Lookup("WebRTC-DisableUlpFecExperiment"), "Enabled")) {
RTC_LOG(LS_INFO) << "Experiment to disable sending ULPFEC is enabled.";
should_disable_red_and_ulpfec = true;
}
// If enabled, FlexFEC takes priority over RED+ULPFEC. if (flexfec_enabled) { if (IsUlpfecEnabled()) {
RTC_LOG(LS_INFO)
<< "Both FlexFEC and ULPFEC are configured. Disabling ULPFEC.";
}
should_disable_red_and_ulpfec = true;
}
// Payload types without picture ID cannot determine that a stream is complete // without retransmitting FEC, so using ULPFEC + NACK for H.264 (for instance) // is a waste of bandwidth since FEC packets still have to be transmitted. // Note that this is not the case with FlexFEC. if (nack_enabled && IsUlpfecEnabled() &&
!PayloadTypeSupportsSkippingFecPackets(rtp_config.payload_name, trials)) {
RTC_LOG(LS_WARNING)
<< "Transmitting payload type without picture ID using " "NACK+ULPFEC is a waste of bandwidth since ULPFEC packets " "also have to be retransmitted. Disabling ULPFEC.";
should_disable_red_and_ulpfec = true;
}
// Verify payload types. if (IsUlpfecEnabled() ^ IsRedEnabled()) {
RTC_LOG(LS_WARNING)
<< "Only RED or only ULPFEC enabled, but not both. Disabling both.";
should_disable_red_and_ulpfec = true;
}
return should_disable_red_and_ulpfec;
}
// TODO(brandtr): Update this function when we support multistream protection.
std::unique_ptr<VideoFecGenerator> MaybeCreateFecGenerator( const Environment& env, const RtpConfig& rtp, const std::map<uint32_t, RtpState>& suspended_ssrcs, int simulcast_index) { // If flexfec is configured that takes priority. if (rtp.flexfec.payload_type >= 0) {
RTC_DCHECK_GE(rtp.flexfec.payload_type, 0);
RTC_DCHECK_LE(rtp.flexfec.payload_type, 127); if (rtp.flexfec.ssrc == 0) {
RTC_LOG(LS_WARNING) << "FlexFEC is enabled, but no FlexFEC SSRC given. " "Therefore disabling FlexFEC."; return nullptr;
} if (rtp.flexfec.protected_media_ssrcs.empty()) {
RTC_LOG(LS_WARNING)
<< "FlexFEC is enabled, but no protected media SSRC given. " "Therefore disabling FlexFEC."; return nullptr;
}
if (rtp.flexfec.protected_media_ssrcs.size() > 1) {
RTC_LOG(LS_WARNING)
<< "The supplied FlexfecConfig contained multiple protected " "media streams, but our implementation currently only " "supports protecting a single media stream. " "To avoid confusion, disabling FlexFEC completely."; return nullptr;
}
if (absl::c_find(rtp.flexfec.protected_media_ssrcs,
rtp.ssrcs[simulcast_index]) ==
rtp.flexfec.protected_media_ssrcs.end()) { // Media SSRC not among flexfec protected SSRCs. return nullptr;
}
const RtpState* rtp_state = nullptr; auto it = suspended_ssrcs.find(rtp.flexfec.ssrc); if (it != suspended_ssrcs.end()) {
rtp_state = &it->second;
}
RTC_DCHECK_EQ(1U, rtp.flexfec.protected_media_ssrcs.size()); return std::make_unique<FlexfecSender>(
env, rtp.flexfec.payload_type, rtp.flexfec.ssrc,
rtp.flexfec.protected_media_ssrcs[0], rtp.mid, rtp.extensions,
RTPSender::FecExtensionSizes(), rtp_state);
} elseif (rtp.ulpfec.red_payload_type >= 0 &&
rtp.ulpfec.ulpfec_payload_type >= 0 &&
!ShouldDisableRedAndUlpfec(/*flexfec_enabled=*/false, rtp,
env.field_trials())) { // Flexfec not configured, but ulpfec is and is not disabled. return std::make_unique<UlpfecGenerator>(env, rtp.ulpfec.red_payload_type,
rtp.ulpfec.ulpfec_payload_type);
}
// Some streams could have been disabled, but the rids are still there. // This will occur when simulcast has been disabled for a codec (e.g. VP9)
RTC_DCHECK(rtp_config.rids.empty() ||
rtp_config.rids.size() >= rtp_config.ssrcs.size());
for (size_t i = 0; i < rtp_config.ssrcs.size(); ++i) {
RTPSenderVideo::Config video_config;
configuration.local_media_ssrc = rtp_config.ssrcs[i];
// Returns true when some coded video sequence can be decoded starting with // this frame without requiring any previous frames. // e.g. it is the same as a key frame when spatial scalability is not used. // When spatial scalability is used, then it is true for layer frames of // a key frame without inter-layer dependencies. bool IsFirstFrameOfACodedVideoSequence( const EncodedImage& encoded_image, const CodecSpecificInfo* codec_specific_info) { if (encoded_image._frameType != VideoFrameType::kVideoFrameKey) { returnfalse;
}
if (codec_specific_info != nullptr) { if (codec_specific_info->generic_frame_info.has_value()) { // This function is used before // `codec_specific_info->generic_frame_info->frame_diffs` are calculated, // so need to use a more complicated way to check for presence of the // dependencies. return absl::c_none_of(
codec_specific_info->generic_frame_info->encoder_buffers,
[](const CodecBufferUsage& buffer) { return buffer.referenced; });
}
if (codec_specific_info->codecType == VideoCodecType::kVideoCodecVP8 ||
codec_specific_info->codecType == VideoCodecType::kVideoCodecH264 ||
codec_specific_info->codecType == VideoCodecType::kVideoCodecGeneric) { // These codecs do not support intra picture dependencies, so a frame // marked as a key frame should be a key frame. returntrue;
}
}
// Without depenedencies described in generic format do an educated guess. // It might be wrong for VP9 with spatial layer 0 skipped or higher spatial // layer not depending on the spatial layer 0. This corner case is unimportant // for current usage of this helper function.
// Use <= to accept both 0 (i.e. the first) and nullopt (i.e. the only). return encoded_image.SpatialIndex() <= 0;
}
} // namespace
RtpVideoSender::RtpVideoSender( const Environment& env,
absl::Nonnull<TaskQueueBase*> transport_queue, const std::map<uint32_t, RtpState>& suspended_ssrcs, const std::map<uint32_t, RtpPayloadState>& states, const RtpConfig& rtp_config, int rtcp_report_interval_ms,
Transport* send_transport, const RtpSenderObservers& observers,
RtpTransportControllerSendInterface* transport,
RateLimiter* retransmission_limiter,
std::unique_ptr<FecController> fec_controller,
FrameEncryptorInterface* frame_encryptor, const CryptoOptions& crypto_options,
rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)
: env_(env),
use_frame_rate_for_overhead_(absl::StartsWith(
env.field_trials().Lookup("WebRTC-Video-UseFrameRateForOverhead"), "Enabled")),
has_packet_feedback_(TransportSeqNumExtensionConfigured(rtp_config)),
transport_queue_(*transport_queue),
active_(false),
fec_controller_(std::move(fec_controller)),
fec_allowed_(true),
rtp_streams_(CreateRtpStreamSenders(env,
rtp_config,
observers,
rtcp_report_interval_ms,
send_transport,
transport,
suspended_ssrcs,
retransmission_limiter,
frame_encryptor,
crypto_options,
std::move(frame_transformer))),
rtp_config_(rtp_config),
codec_type_(GetVideoCodecType(rtp_config)),
transport_(transport),
independent_frame_ids_(
!env.field_trials().IsDisabled( "WebRTC-Video-SimulcastIndependentFrameIds") &&
env.field_trials().IsDisabled("WebRTC-GenericDescriptorAuth")),
transport_overhead_bytes_per_packet_(0),
encoder_target_rate_bps_(0),
frame_counts_(rtp_config.ssrcs.size()),
frame_count_observer_(observers.frame_count_observer),
safety_(PendingTaskSafetyFlag::CreateAttachedToTaskQueue( /*alive=*/true,
transport_queue)) {
transport_checker_.Detach();
RTC_DCHECK_EQ(rtp_config_.ssrcs.size(), rtp_streams_.size()); if (has_packet_feedback_)
transport_->IncludeOverheadInPacedSender(); // SSRCs are assumed to be sorted in the same order as `rtp_modules`. for (uint32_t ssrc : rtp_config_.ssrcs) { // Restore state if it previously existed. const RtpPayloadState* state = nullptr; auto it = states.find(ssrc); if (it != states.end()) {
state = &it->second;
shared_frame_id_ = std::max(shared_frame_id_, state->shared_frame_id);
}
params_.push_back(RtpPayloadParams(ssrc, state, env.field_trials()));
}
// RTP/RTCP initialization.
for (size_t i = 0; i < rtp_config_.extensions.size(); ++i) { const std::string& extension = rtp_config_.extensions[i].uri; int id = rtp_config_.extensions[i].id;
RTC_DCHECK(RtpExtension::IsSupportedForVideo(extension)); for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->RegisterRtpHeaderExtension(extension, id);
}
}
ConfigureSsrcs(suspended_ssrcs);
if (!rtp_config_.mid.empty()) { for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->SetMid(rtp_config_.mid);
}
}
bool fec_enabled = false; for (const RtpStreamSender& stream : rtp_streams_) { // Simulcast has one module for each layer. Set the CNAME on all modules.
stream.rtp_rtcp->SetCNAME(rtp_config_.c_name.c_str());
stream.rtp_rtcp->SetMaxRtpPacketSize(rtp_config_.max_packet_size);
stream.rtp_rtcp->RegisterSendPayloadFrequency(rtp_config_.payload_type,
kVideoPayloadTypeFrequency); if (stream.fec_generator != nullptr) {
fec_enabled = true;
}
} // Currently, both ULPFEC and FlexFEC use the same FEC rate calculation logic, // so enable that logic if either of those FEC schemes are enabled.
fec_controller_->SetProtectionMethod(fec_enabled, NackEnabled());
fec_controller_->SetProtectionCallback(this);
// Construction happens on the worker thread (see Call::CreateVideoSendStream) // but subseqeuent calls to the RTP state will happen on one of two threads: // * The pacer thread for actually sending packets. // * The transport thread when tearing down and quering GetRtpState(). // Detach thread checkers. for (const RtpStreamSender& stream : rtp_streams_) {
stream.rtp_rtcp->OnPacketSendingThreadSwitched();
}
}
// Sends a kRtcpByeCode when going from true to false.
rtp_module.SetSendingStatus(sending);
rtp_module.SetSendingMediaStatus(sending); if (sending) {
transport_->RegisterSendingRtpStream(rtp_module);
} else {
transport_->DeRegisterSendingRtpStream(rtp_module);
}
}
// RTCPSender has it's own copy of the timestamp offset, added in // RTCPSender::BuildSR, hence we must not add the in the offset for this call. // TODO(nisse): Delete RTCPSender:timestamp_offset_, and see if we can confine // knowledge of the offset to a single place. if (!rtp_streams_[simulcast_index].rtp_rtcp->OnSendingRtpFrame(
encoded_image.RtpTimestamp(), encoded_image.capture_time_ms_,
rtp_config_.payload_type,
encoded_image._frameType == VideoFrameType::kVideoFrameKey)) { // The payload router could be active but this module isn't sending. return Result(Result::ERROR_SEND_FAILED);
}
if (IsFirstFrameOfACodedVideoSequence(encoded_image, codec_specific_info)) { // In order to use the dependency descriptor RTP header extension: // - Pass along any `FrameDependencyStructure` templates produced by the // encoder adapter. // - If none were produced the `RtpPayloadParams::*ToGeneric` for the // particular codec have simulated a dependency structure, so provide a // minimal set of templates. // - Otherwise, don't pass along any templates at all which will disable // the generation of a dependency descriptor.
RTPSenderVideo& sender_video = *rtp_streams_[simulcast_index].sender_video; if (codec_specific_info && codec_specific_info->template_structure) {
sender_video.SetVideoStructure(&*codec_specific_info->template_structure);
} elseif (std::optional<FrameDependencyStructure> structure =
params_[simulcast_index].GenericStructure(
codec_specific_info)) {
sender_video.SetVideoStructure(&*structure);
} else {
sender_video.SetVideoStructure(nullptr);
}
}
std::optional<int64_t> frame_id; if (!independent_frame_ids_) {
frame_id = shared_frame_id_;
}
void RtpVideoSender::OnBitrateAllocationUpdated( const VideoBitrateAllocation& bitrate) {
RTC_DCHECK_RUN_ON(&transport_checker_);
MutexLock lock(&mutex_); if (IsActiveLocked()) { if (rtp_streams_.size() == 1) { // If spatial scalability is enabled, it is covered by a single stream.
rtp_streams_[0].rtp_rtcp->SetVideoBitrateAllocation(bitrate);
} else {
std::vector<std::optional<VideoBitrateAllocation>> layer_bitrates =
bitrate.GetSimulcastAllocations(); // Simulcast is in use, split the VideoBitrateAllocation into one struct // per rtp stream, moving over the temporal layer allocation. for (size_t i = 0; i < rtp_streams_.size(); ++i) { // The next spatial layer could be used if the current one is // inactive. if (layer_bitrates[i]) {
rtp_streams_[i].rtp_rtcp->SetVideoBitrateAllocation(
*layer_bitrates[i]);
} else { // Signal a 0 bitrate on a simulcast stream.
rtp_streams_[i].rtp_rtcp->SetVideoBitrateAllocation(
VideoBitrateAllocation());
}
}
}
}
}
void RtpVideoSender::OnVideoLayersAllocationUpdated( const VideoLayersAllocation& allocation) {
MutexLock lock(&mutex_); if (IsActiveLocked()) { for (size_t i = 0; i < rtp_streams_.size(); ++i) {
VideoLayersAllocation stream_allocation = allocation;
stream_allocation.rtp_stream_index = i;
rtp_streams_[i].sender_video->SetVideoLayersAllocation(
std::move(stream_allocation));
}
// Only send video frames on the rtp module if the encoder is configured // to send. This is to prevent stray frames to be sent after an encoder // has been reconfigured. // Reconfiguration of the RtpRtcp modules must happen on the transport queue // to avoid races with batch sending of packets.
std::vector<bool> sending(rtp_streams_.size(), false); for (const VideoLayersAllocation::SpatialLayer& layer :
allocation.active_spatial_layers) { if (layer.rtp_stream_index < static_cast<int>(sending.size())) {
sending[layer.rtp_stream_index] = true;
}
}
transport_queue_.PostTask(
SafeTask(safety_.flag(), [this, sending = std::move(sending)] {
RTC_DCHECK_RUN_ON(&transport_checker_);
RTC_CHECK_EQ(sending.size(), rtp_streams_.size()); for (size_t i = 0; i < sending.size(); ++i) {
SetModuleIsActive(sending[i], *rtp_streams_[i].rtp_rtcp);
}
}));
}
}
for (size_t i = 0; i < rtp_config_.ssrcs.size(); ++i) {
uint32_t ssrc = rtp_config_.ssrcs[i];
RTC_DCHECK_EQ(ssrc, rtp_streams_[i].rtp_rtcp->SSRC());
rtp_states[ssrc] = rtp_streams_[i].rtp_rtcp->GetRtpState();
// Only happens during shutdown, when RTP module is already inactive, // so OK to call fec generator here. if (rtp_streams_[i].fec_generator) {
std::optional<RtpState> fec_state =
rtp_streams_[i].fec_generator->GetRtpState(); if (fec_state) {
uint32_t ssrc = rtp_config_.flexfec.ssrc;
rtp_states[ssrc] = *fec_state;
}
}
}
for (size_t i = 0; i < rtp_config_.rtx.ssrcs.size(); ++i) {
uint32_t ssrc = rtp_config_.rtx.ssrcs[i];
rtp_states[ssrc] = rtp_streams_[i].rtp_rtcp->GetRtxState();
}
// Get the encoder target rate. It is the estimated network rate - // protection overhead. // TODO(srte): We should multiply with 255 here.
encoder_target_rate_bps_ = fec_controller_->UpdateFecRates(
payload_bitrate_bps, framerate,
rtc::saturated_cast<uint8_t>(update.packet_loss_ratio * 256),
loss_mask_vector_, update.round_trip_time.ms()); if (!fec_allowed_) {
encoder_target_rate_bps_ = payload_bitrate_bps; // fec_controller_->UpdateFecRates() was still called so as to allow // `fec_controller_` to update whatever internal state it might have, // since `fec_allowed_` may be toggled back on at any moment.
}
// Subtract post encode overhead from the encoder target. If target rate // is really low, cap the overhead at 50%. This also avoids the case where // `encoder_target_rate_bps_` is 0 due to encoder pause event while the // packetization rate is positive since packets are still flowing.
uint32_t post_encode_overhead_bps = std::min(
GetPostEncodeOverhead().bps<uint32_t>(), encoder_target_rate_bps_ / 2);
encoder_target_rate_bps_ -= post_encode_overhead_bps;
loss_mask_vector_.clear();
uint32_t encoder_overhead_rate_bps = 0; if (has_packet_feedback_) { // TODO(srte): The packet size should probably be the same as in the // CalculateOverheadRate call above (just max_total_packet_size), it doesn't // make sense to use different packet rates for different overhead // calculations.
DataRate encoder_overhead_rate = CalculateOverheadRate(
DataRate::BitsPerSec(encoder_target_rate_bps_),
max_total_packet_size - DataSize::Bytes(overhead_bytes_per_packet),
packet_overhead, Frequency::Hertz(framerate));
encoder_overhead_rate_bps = std::min(
encoder_overhead_rate.bps<uint32_t>(),
update.target_bitrate.bps<uint32_t>() - encoder_target_rate_bps_);
} const uint32_t media_rate = encoder_target_rate_bps_ +
encoder_overhead_rate_bps +
post_encode_overhead_bps;
RTC_DCHECK_GE(update.target_bitrate, DataRate::BitsPerSec(media_rate)); // `protection_bitrate_bps_` includes overhead.
protection_bitrate_bps_ = update.target_bitrate.bps() - media_rate;
}
void RtpVideoSender::OnPacketFeedbackVector(
std::vector<StreamPacketInfo> packet_feedback_vector) { if (fec_controller_->UseLossVectorMask()) {
MutexLock lock(&mutex_); for (const StreamPacketInfo& packet : packet_feedback_vector) {
loss_mask_vector_.push_back(!packet.received);
}
}
// Map from SSRC to all acked packets for that RTP module.
std::map<uint32_t, std::vector<uint16_t>> acked_packets_per_ssrc; for (const StreamPacketInfo& packet : packet_feedback_vector) { if (packet.received && packet.ssrc) {
acked_packets_per_ssrc[*packet.ssrc].push_back(
packet.rtp_sequence_number);
}
}
// Map from SSRC to vector of RTP sequence numbers that are indicated as // lost by feedback, without being trailed by any received packets.
std::map<uint32_t, std::vector<uint16_t>> early_loss_detected_per_ssrc;
for (const StreamPacketInfo& packet : packet_feedback_vector) { // Only include new media packets, not retransmissions/padding/fec. if (!packet.received && packet.ssrc && !packet.is_retransmission) { // Last known lost packet, might not be detectable as lost by remote // jitter buffer.
early_loss_detected_per_ssrc[*packet.ssrc].push_back(
packet.rtp_sequence_number);
} else { // Packet received, so any loss prior to this is already detectable.
early_loss_detected_per_ssrc.erase(*packet.ssrc);
}
}
for (constauto& kv : early_loss_detected_per_ssrc) { const uint32_t ssrc = kv.first; auto it = ssrc_to_rtp_module_.find(ssrc);
RTC_CHECK(it != ssrc_to_rtp_module_.end());
RTPSender* rtp_sender = it->second->RtpSender(); for (uint16_t sequence_number : kv.second) {
rtp_sender->ReSendPacket(sequence_number);
}
}
for (constauto& kv : acked_packets_per_ssrc) { const uint32_t ssrc = kv.first; auto it = ssrc_to_rtp_module_.find(ssrc); if (it == ssrc_to_rtp_module_.end()) { // No media, likely FEC or padding. Ignore since there's no RTP history to // clean up anyway. continue;
}
rtc::ArrayView<const uint16_t> rtp_sequence_numbers(kv.second);
it->second->OnPacketsAcknowledged(rtp_sequence_numbers);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.