/* * Copyright 2004 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree.
*/
PortAllocatorSession::PortAllocatorSession(absl::string_view content_name, int component,
absl::string_view ice_ufrag,
absl::string_view ice_pwd,
uint32_t flags)
: flags_(flags),
generation_(0),
content_name_(content_name),
component_(component),
ice_ufrag_(ice_ufrag),
ice_pwd_(ice_pwd) { // Pooled sessions are allowed to be created with empty content name, // component, ufrag and password.
RTC_DCHECK(ice_ufrag.empty() == ice_pwd.empty());
}
PortAllocator::PortAllocator()
: flags_(kDefaultPortAllocatorFlags),
min_port_(0),
max_port_(0),
max_ipv6_networks_(kDefaultMaxIPv6Networks),
step_delay_(kDefaultStepDelay),
allow_tcp_listen_(true),
candidate_filter_(CF_ALL),
tiebreaker_(rtc::CreateRandomId64()) { // The allocator will be attached to a thread in Initialize.
thread_checker_.Detach();
}
bool PortAllocator::SetConfiguration( const ServerAddresses& stun_servers, const std::vector<RelayServerConfig>& turn_servers, int candidate_pool_size,
webrtc::PortPrunePolicy turn_port_prune_policy,
webrtc::TurnCustomizer* turn_customizer, const std::optional<int>& stun_candidate_keepalive_interval) {
RTC_DCHECK_GE(candidate_pool_size, 0);
RTC_DCHECK_LE(candidate_pool_size, static_cast<int>(UINT16_MAX));
CheckRunOnValidThreadIfInitialized(); // A positive candidate pool size would lead to the creation of a pooled // allocator session and starting getting ports, which we should only do on // the network thread.
RTC_DCHECK(candidate_pool_size == 0 || thread_checker_.IsCurrent()); bool ice_servers_changed =
(stun_servers != stun_servers_ || turn_servers != turn_servers_);
stun_servers_ = stun_servers;
turn_servers_ = turn_servers;
turn_port_prune_policy_ = turn_port_prune_policy;
candidate_pool_size_ = candidate_pool_size;
// If ICE servers changed, throw away any existing pooled sessions and create // new ones. if (ice_servers_changed) {
pooled_sessions_.clear();
}
turn_customizer_ = turn_customizer;
// If `candidate_pool_size_` is less than the number of pooled sessions, get // rid of the extras. while (candidate_pool_size_ < static_cast<int>(pooled_sessions_.size())) {
pooled_sessions_.back().reset(nullptr);
pooled_sessions_.pop_back();
}
// `stun_candidate_keepalive_interval_` will be used in STUN port allocation // in future sessions. We also update the ready ports in the pooled sessions. // Ports in sessions that are taken and owned by P2PTransportChannel will be // updated there via IceConfig.
stun_candidate_keepalive_interval_ = stun_candidate_keepalive_interval; for (constauto& session : pooled_sessions_) {
session->SetStunKeepaliveIntervalForReadyPorts(
stun_candidate_keepalive_interval_);
}
// If `candidate_pool_size_` is greater than the number of pooled sessions, // create new sessions. while (static_cast<int>(pooled_sessions_.size()) < candidate_pool_size_) {
IceParameters iceCredentials =
IceCredentialsIterator::CreateRandomIceCredentials();
PortAllocatorSession* pooled_session =
CreateSessionInternal("", 0, iceCredentials.ufrag, iceCredentials.pwd);
pooled_session->set_pooled(true);
pooled_session->StartGettingPorts();
pooled_sessions_.push_back(
std::unique_ptr<PortAllocatorSession>(pooled_session));
} returntrue;
}
std::unique_ptr<PortAllocatorSession> PortAllocator::TakePooledSession(
absl::string_view content_name, int component,
absl::string_view ice_ufrag,
absl::string_view ice_pwd) {
CheckRunOnValidThreadAndInitialized();
RTC_DCHECK(!ice_ufrag.empty());
RTC_DCHECK(!ice_pwd.empty()); if (pooled_sessions_.empty()) { return nullptr;
}
IceParameters credentials(ice_ufrag, ice_pwd, false); // If restrict_ice_credentials_change_ is TRUE, then call FindPooledSession // with ice credentials. Otherwise call it with nullptr which means // "find any" pooled session. auto cit = FindPooledSession(restrict_ice_credentials_change_ ? &credentials
: nullptr); if (cit == pooled_sessions_.end()) { return nullptr;
}
auto it =
pooled_sessions_.begin() + std::distance(pooled_sessions_.cbegin(), cit);
std::unique_ptr<PortAllocatorSession> ret = std::move(*it);
ret->SetIceParameters(content_name, component, ice_ufrag, ice_pwd);
ret->set_pooled(false); // According to JSEP, a pooled session should filter candidates only // after it's taken out of the pool.
ret->SetCandidateFilter(candidate_filter());
pooled_sessions_.erase(it); return ret;
}
const PortAllocatorSession* PortAllocator::GetPooledSession( const IceParameters* ice_credentials) const {
CheckRunOnValidThreadAndInitialized(); auto it = FindPooledSession(ice_credentials); if (it == pooled_sessions_.end()) { return nullptr;
} else { return it->get();
}
}
std::vector<std::unique_ptr<PortAllocatorSession>>::const_iterator
PortAllocator::FindPooledSession(const IceParameters* ice_credentials) const { for (auto it = pooled_sessions_.begin(); it != pooled_sessions_.end(); ++it) { if (ice_credentials == nullptr ||
((*it)->ice_ufrag() == ice_credentials->ufrag &&
(*it)->ice_pwd() == ice_credentials->pwd)) { return it;
}
} return pooled_sessions_.end();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.