WebRTC音量统计 audio_level 调用:
audio_send_stream.cc
webrtc::AudioSendStream::Stats AudioSendStream::GetStats(
bool has_remote_tracks) const {
RTC_DCHECK(worker_thread_checker_.IsCurrent());
webrtc::AudioSendStream::Stats stats;
stats.local_ssrc = config_.rtp.ssrc;
stats.target_bitrate_bps = channel_send_->GetBitrate();
webrtc::CallSendStatistics call_stats = channel_send_->GetRTCPStatistics();
stats.payload_bytes_sent = call_stats.payload_bytes_sent;
stats.header_and_padding_bytes_sent =
call_stats.header_and_padding_bytes_sent;
stats.retransmitted_bytes_sent = call_stats.retransmitted_bytes_sent;
stats.packets_sent = call_stats.packetsSent;
stats.retransmitted_packets_sent = call_stats.retransmitted_packets_sent;
// RTT isn't known until a RTCP report is received. Until then, VoiceEngine
// returns 0 to indicate an error value.
if (call_stats.rttMs > 0) {
stats.rtt_ms = call_stats.rttMs;
}
if (config_.send_codec_spec) {
const auto& spec = *config_.send_codec_spec;
stats.codec_name = spec.format.name;
stats.codec_payload_type = spec.payload_type;
stats.sample_rate_hz = encoder_sample_rate_hz_;
// Get data from the last remote RTCP report.
for (const auto& block : channel_send_->GetRemoteRTCPReportBlocks()) {
// Lookup report for send ssrc only.
if (block.source_SSRC == stats.local_ssrc) {
stats.packets_lost = block.cumulative_num_packets_lost;
stats.fraction_lost = Q8ToFloat(block.fraction_lost);
// Convert timestamps to milliseconds.
if (spec.format.clockrate_hz / 1000 > 0) {
stats.jitter_ms =
block.interarrival_jitter / (spec.format.clockrate_hz / 1000);
}
break;
}
}
}
{
rtc::CritScope cs(&audio_level_lock_);
stats.audio_level = audio_level_.LevelFullRange();
stats.total_input_energy = audio_level_.TotalEnergy();
stats.total_input_duration = audio_level_.TotalDuration();
}
stats.typing_noise_detected = audio_state()->typing_noise_detected();
stats.ana_statistics = channel_send_->GetANAStatistics();
AudioProcessing* ap = audio_state_->audio_processing();
if (ap) {
stats.apm_statistics = ap->GetStatistics(has_remote_tracks);
}
stats.report_block_datas = std::move(call_stats.report_block_datas);
return stats;
}
AudioLevel 计算逻辑
class AudioLevel
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "audio/audio_level.h"
#include "api/audio/audio_frame.h"
#include "common_audio/signal_processing/include/signal_processing_library.h"
namespace webrtc {
namespace voe {
AudioLevel::AudioLevel()
: abs_max_(0), count_(0), current_level_full_range_(0) {}
AudioLevel::~AudioLevel() {}
void AudioLevel::Reset() {
rtc::CritScope cs(&crit_sect_);
abs_max_ = 0;
count_ = 0;
current_level_full_range_ = 0;
total_energy_ = 0.0;
total_duration_ = 0.0;
}
int16_t AudioLevel::LevelFullRange() const {
rtc::CritScope cs(&crit_sect_);
return current_level_full_range_;
}
void AudioLevel::ResetLevelFullRange() {
rtc::CritScope cs(&crit_sect_);
abs_max_ = 0;
count_ = 0;
current_level_full_range_ = 0;
}
double AudioLevel::TotalEnergy() const {
rtc::CritScope cs(&crit_sect_);
return total_energy_;
}
double AudioLevel::TotalDuration() const {
rtc::CritScope cs(&crit_sect_);
return total_duration_;
}
void AudioLevel::ComputeLevel(const AudioFrame& audioFrame, double duration) {
// Check speech level (works for 2 channels as well)
int16_t abs_value =
audioFrame.muted()
? 0
: WebRtcSpl_MaxAbsValueW16(
audioFrame.data(),
audioFrame.samples_per_channel_ * audioFrame.num_channels_);
// Protect member access using a lock since this method is called on a
// dedicated audio thread in the RecordedDataIsAvailable() callback.
rtc::CritScope cs(&crit_sect_);
if (abs_value > abs_max_)
abs_max_ = abs_value;
// Update level approximately 9 times per second, assuming audio frame
// duration is approximately 10 ms. (The update frequency is every
// 11th (= |kUpdateFrequency+1|) call: 1000/(11*10)=9.09..., we should
// probably change this behavior, see https://crbug.com/webrtc/10784).
if (count_++ == kUpdateFrequency) {
current_level_full_range_ = abs_max_;
count_ = 0;
// Decay the absolute maximum (divide by 4)
abs_max_ >>= 2;
}
// See the description for "totalAudioEnergy" in the WebRTC stats spec
// (https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy)
// for an explanation of these formulas. In short, we need a value that can
// be used to compute RMS audio levels over different time intervals, by
// taking the difference between the results from two getStats calls. To do
// this, the value needs to be of units "squared sample value * time".
double additional_energy =
static_cast<double>(current_level_full_range_) / INT16_MAX;
additional_energy *= additional_energy;
total_energy_ += additional_energy * duration;
total_duration_ += duration;
}
} // namespace voe
} // namespace webrtc