Commit 24b257a1 authored by sergeyu's avatar sergeyu Committed by Commit bot

Add Audio support in Chromoting host when using WebRTC.

This change adds WebrtcVideoStream that sends audio from the host to
the client. Audio is sent in a media stream separate from video.

BUG=638505

Review-Url: https://codereview.chromium.org/2392963003
Cr-Commit-Position: refs/heads/master@{#424090}
parent 27b6c856
......@@ -268,7 +268,7 @@ void ChromotingHost::OnIncomingSession(
protocol::SessionConfig::Protocol::WEBRTC) {
connection.reset(new protocol::WebrtcConnectionToClient(
base::WrapUnique(session), transport_context_,
video_encode_task_runner_));
video_encode_task_runner_, audio_task_runner_));
} else {
connection.reset(new protocol::IceConnectionToClient(
base::WrapUnique(session), transport_context_,
......
......@@ -206,6 +206,10 @@ static_library("protocol") {
"video_frame_pump.h",
"webrtc_audio_module.cc",
"webrtc_audio_module.h",
"webrtc_audio_source_adapter.cc",
"webrtc_audio_source_adapter.h",
"webrtc_audio_stream.cc",
"webrtc_audio_stream.h",
"webrtc_connection_to_client.cc",
"webrtc_connection_to_client.h",
"webrtc_connection_to_host.cc",
......@@ -247,6 +251,8 @@ static_library("test_support") {
testonly = true
sources = [
"fake_audio_source.cc",
"fake_audio_source.h",
"fake_authenticator.cc",
"fake_authenticator.h",
"fake_connection_to_client.cc",
......@@ -314,6 +320,7 @@ source_set("unit_tests") {
"v2_authenticator_unittest.cc",
"validating_authenticator_unittest.cc",
"video_frame_pump_unittest.cc",
"webrtc_audio_source_adapter_unittest.cc",
"webrtc_transport_unittest.cc",
]
......
......@@ -17,6 +17,7 @@
#include "remoting/proto/audio.pb.h"
#include "remoting/protocol/audio_source.h"
#include "remoting/protocol/audio_stub.h"
#include "remoting/protocol/fake_audio_source.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace remoting {
......@@ -33,24 +34,6 @@ std::unique_ptr<AudioPacket> MakeAudioPacket() {
} // namespace
class FakeAudioSource : public AudioSource {
public:
FakeAudioSource() {}
~FakeAudioSource() override {}
bool Start(const PacketCapturedCallback& callback) override {
callback_ = callback;
return true;
}
const PacketCapturedCallback& callback() { return callback_; }
private:
PacketCapturedCallback callback_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioSource);
};
class FakeAudioEncoder : public AudioEncoder {
public:
FakeAudioEncoder() {}
......
......@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef REMOTING_PROTOCOL_audio_stream_H_
#define REMOTING_PROTOCOL_audio_stream_H_
#ifndef REMOTING_PROTOCOL_AUDIO_STREAM_H_
#define REMOTING_PROTOCOL_AUDIO_STREAM_H_
namespace remoting {
namespace protocol {
......@@ -24,4 +24,4 @@ class AudioStream {
} // namespace protocol
} // namespace remoting
#endif // REMOTING_PROTOCOL_audio_stream_H_
#endif // REMOTING_PROTOCOL_AUDIO_STREAM_H_
......@@ -119,7 +119,7 @@ class ConnectionTest : public testing::Test,
host_connection_.reset(new WebrtcConnectionToClient(
base::WrapUnique(host_session_),
TransportContext::ForTests(protocol::TransportRole::SERVER),
message_loop_.task_runner()));
message_loop_.task_runner(), message_loop_.task_runner()));
client_connection_.reset(new WebrtcConnectionToHost());
} else {
......
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "remoting/protocol/fake_audio_source.h"
namespace remoting {
namespace protocol {
FakeAudioSource::FakeAudioSource() {}
FakeAudioSource::~FakeAudioSource() {}
bool FakeAudioSource::Start(const PacketCapturedCallback& callback) {
callback_ = callback;
return true;
}
} // namespace protocol
} // namespace remoting
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef REMOTING_PROTOCOL_FAKE_AUDIO_SOURCE_H_
#define REMOTING_PROTOCOL_FAKE_AUDIO_SOURCE_H_
#include "base/callback.h"
#include "remoting/protocol/audio_source.h"
namespace remoting {
namespace protocol {
class FakeAudioSource : public AudioSource {
public:
FakeAudioSource();
~FakeAudioSource() override;
// AudioSource interface.
bool Start(const PacketCapturedCallback& callback) override;
const PacketCapturedCallback& callback() { return callback_; }
private:
PacketCapturedCallback callback_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioSource);
};
} // namespace protocol
} // namespace remoting
#endif // REMOTING_PROTOCOL_FAKE_AUDIO_SOURCE_H_
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "remoting/protocol/webrtc_audio_source_adapter.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "remoting/proto/audio.pb.h"
#include "remoting/protocol/audio_source.h"
namespace remoting {
namespace protocol {
static const int kChannels = 2;
static const int kBytesPerSample = 2;
// Frame size expected by webrtc::AudioTrackSinkInterface.
static constexpr base::TimeDelta kAudioFrameDuration =
base::TimeDelta::FromMilliseconds(10);
class WebrtcAudioSourceAdapter::Core {
public:
Core();
~Core();
void Start(std::unique_ptr<AudioSource> audio_source);
void Pause(bool pause);
void AddSink(webrtc::AudioTrackSinkInterface* sink);
void RemoveSink(webrtc::AudioTrackSinkInterface* sink);
private:
void OnAudioPacket(std::unique_ptr<AudioPacket> packet);
std::unique_ptr<AudioSource> audio_source_;
bool paused_ = false;
int sampling_rate_ = 0;
// webrtc::AudioTrackSinkInterface expects to get audio in 10ms frames (see
// kAudioFrameDuration). AudioSource may generate AudioPackets for time
// intervals that are not multiple of 10ms. In that case the left-over samples
// are kept in |partial_frame_| until the next AudioPacket is captured by the
// AudioSource.
std::vector<uint8_t> partial_frame_;
base::ObserverList<webrtc::AudioTrackSinkInterface> audio_sinks_;
base::Lock audio_sinks_lock_;
base::ThreadChecker thread_checker_;
};
WebrtcAudioSourceAdapter::Core::Core() {
thread_checker_.DetachFromThread();
}
WebrtcAudioSourceAdapter::Core::~Core() {}
void WebrtcAudioSourceAdapter::Core::Start(
std::unique_ptr<AudioSource> audio_source) {
DCHECK(thread_checker_.CalledOnValidThread());
audio_source_ = std::move(audio_source);
audio_source_->Start(
base::Bind(&Core::OnAudioPacket, base::Unretained(this)));
}
void WebrtcAudioSourceAdapter::Core::Pause(bool pause) {
DCHECK(thread_checker_.CalledOnValidThread());
paused_ = pause;
}
void WebrtcAudioSourceAdapter::Core::AddSink(
webrtc::AudioTrackSinkInterface* sink) {
// Can be called on any thread.
base::AutoLock lock(audio_sinks_lock_);
audio_sinks_.AddObserver(sink);
}
void WebrtcAudioSourceAdapter::Core::RemoveSink(
webrtc::AudioTrackSinkInterface* sink) {
// Can be called on any thread.
base::AutoLock lock(audio_sinks_lock_);
audio_sinks_.RemoveObserver(sink);
}
void WebrtcAudioSourceAdapter::Core::OnAudioPacket(
std::unique_ptr<AudioPacket> packet) {
DCHECK(thread_checker_.CalledOnValidThread());
if (paused_)
return;
DCHECK_EQ(packet->channels(), kChannels);
DCHECK_EQ(packet->bytes_per_sample(), kBytesPerSample);
if (sampling_rate_ != packet->sampling_rate()) {
sampling_rate_ = packet->sampling_rate();
partial_frame_.clear();
}
size_t samples_per_frame =
kAudioFrameDuration * sampling_rate_ / base::TimeDelta::FromSeconds(1);
size_t bytes_per_frame = kBytesPerSample * kChannels * samples_per_frame;
const std::string& data = packet->data(0);
size_t position = 0;
base::AutoLock lock(audio_sinks_lock_);
if (!partial_frame_.empty()) {
size_t bytes_to_append =
std::min(bytes_per_frame - partial_frame_.size(), data.size());
position += bytes_to_append;
partial_frame_.insert(partial_frame_.end(), data.data(),
data.data() + bytes_to_append);
if (partial_frame_.size() < bytes_per_frame) {
// Still don't have full frame.
return;
}
// Here |partial_frame_| always contains a full frame.
DCHECK_EQ(partial_frame_.size(), bytes_per_frame);
FOR_EACH_OBSERVER(webrtc::AudioTrackSinkInterface, audio_sinks_,
OnData(&partial_frame_.front(), kBytesPerSample * 8,
sampling_rate_, kChannels, samples_per_frame));
}
while (position + bytes_per_frame <= data.size()) {
FOR_EACH_OBSERVER(webrtc::AudioTrackSinkInterface, audio_sinks_,
OnData(data.data() + position, kBytesPerSample * 8,
sampling_rate_, kChannels, samples_per_frame));
position += bytes_per_frame;
}
partial_frame_.assign(data.data() + position, data.data() + data.size());
}
WebrtcAudioSourceAdapter::WebrtcAudioSourceAdapter(
scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner)
: audio_task_runner_(audio_task_runner), core_(new Core()) {}
WebrtcAudioSourceAdapter::~WebrtcAudioSourceAdapter() {
audio_task_runner_->DeleteSoon(FROM_HERE, core_.release());
}
void WebrtcAudioSourceAdapter::Start(
std::unique_ptr<AudioSource> audio_source) {
audio_task_runner_->PostTask(
FROM_HERE, base::Bind(&Core::Start, base::Unretained(core_.get()),
base::Passed(&audio_source)));
}
void WebrtcAudioSourceAdapter::Pause(bool pause) {
audio_task_runner_->PostTask(
FROM_HERE,
base::Bind(&Core::Pause, base::Unretained(core_.get()), pause));
}
WebrtcAudioSourceAdapter::SourceState WebrtcAudioSourceAdapter::state() const {
return kLive;
}
bool WebrtcAudioSourceAdapter::remote() const {
return false;
}
void WebrtcAudioSourceAdapter::RegisterAudioObserver(AudioObserver* observer) {}
void WebrtcAudioSourceAdapter::UnregisterAudioObserver(
AudioObserver* observer) {}
void WebrtcAudioSourceAdapter::AddSink(webrtc::AudioTrackSinkInterface* sink) {
core_->AddSink(sink);
}
void WebrtcAudioSourceAdapter::RemoveSink(
webrtc::AudioTrackSinkInterface* sink) {
core_->RemoveSink(sink);
}
void WebrtcAudioSourceAdapter::RegisterObserver(
webrtc::ObserverInterface* observer) {}
void WebrtcAudioSourceAdapter::UnregisterObserver(
webrtc::ObserverInterface* observer) {}
} // namespace protocol
} // namespace remoting
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef REMOTING_PROTOCOL_WEBRTC_AUDIO_SOURCE_ADAPTER_H_
#define REMOTING_PROTOCOL_WEBRTC_AUDIO_SOURCE_ADAPTER_H_
#include <memory>
#include "base/macros.h"
#include "base/observer_list.h"
#include "third_party/webrtc/api/mediastreaminterface.h"
namespace webrtc {
class AudioTrackSinkInterface;
} // namespace webrtc
namespace remoting {
class AudioPacket;
namespace protocol {
class AudioSource;
class WebrtcAudioSourceAdapter : public webrtc::AudioSourceInterface {
public:
explicit WebrtcAudioSourceAdapter(
scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner);
~WebrtcAudioSourceAdapter() override;
void Start(std::unique_ptr<AudioSource> audio_source);
void Pause(bool pause);
// webrtc::AudioSourceInterface implementation.
SourceState state() const override;
bool remote() const override;
void RegisterAudioObserver(AudioObserver* observer) override;
void UnregisterAudioObserver(AudioObserver* observer) override;
void AddSink(webrtc::AudioTrackSinkInterface* sink) override;
void RemoveSink(webrtc::AudioTrackSinkInterface* sink) override;
// webrtc::NotifierInterface implementation.
void RegisterObserver(webrtc::ObserverInterface* observer) override;
void UnregisterObserver(webrtc::ObserverInterface* observer) override;
private:
class Core;
scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner_;
// Core running on |audio_task_runner_|.
std::unique_ptr<Core> core_;
DISALLOW_COPY_AND_ASSIGN(WebrtcAudioSourceAdapter);
};
} // namespace protocol
} // namespace remoting
#endif // REMOTING_PROTOCOL_WEBRTC_AUDIO_SOURCE_ADAPTER_H_
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "remoting/protocol/webrtc_audio_source_adapter.h"
#include <numeric>
#include <vector>
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "remoting/proto/audio.pb.h"
#include "remoting/protocol/fake_audio_source.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/webrtc/api/mediastreaminterface.h"
#include "third_party/webrtc/base/refcount.h"
namespace remoting {
namespace protocol {
namespace {
const int kSampleRate = 48000;
const int kBytesPerSample = 2;
const int kChannels = 2;
constexpr base::TimeDelta kFrameDuration =
base::TimeDelta::FromMilliseconds(10);
class FakeAudioSink : public webrtc::AudioTrackSinkInterface{
public:
FakeAudioSink() {}
~FakeAudioSink() override {}
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_samples) override {
EXPECT_EQ(kSampleRate, sample_rate);
EXPECT_EQ(kBytesPerSample * 8, bits_per_sample);
EXPECT_EQ(kChannels, static_cast<int>(number_of_channels));
EXPECT_EQ(kSampleRate * kFrameDuration / base::TimeDelta::FromSeconds(1),
static_cast<int>(number_of_samples));
const int16_t* samples = reinterpret_cast<const int16_t*>(audio_data);
samples_.insert(samples_.end(), samples,
samples + number_of_samples * kChannels);
}
const std::vector<int16_t>& samples() { return samples_; }
private:
std::vector<int16_t> samples_;
};
} // namespace
class WebrtcAudioSourceAdapterTest : public testing::Test {
public:
void SetUp() override {
audio_source_adapter_ = new rtc::RefCountedObject<WebrtcAudioSourceAdapter>(
message_loop_.task_runner());
audio_source_ = new FakeAudioSource();
audio_source_adapter_->Start(base::WrapUnique(audio_source_));
audio_source_adapter_->AddSink(&sink_);
base::RunLoop().RunUntilIdle();
}
void TearDown() override {
audio_source_adapter_ = nullptr;
base::RunLoop().RunUntilIdle();
}
protected:
base::MessageLoop message_loop_;
FakeAudioSource* audio_source_;
scoped_refptr<WebrtcAudioSourceAdapter> audio_source_adapter_;
FakeAudioSink sink_;
};
TEST_F(WebrtcAudioSourceAdapterTest, PartialFrames) {
int16_t sample_value = 1;
std::vector<int> frame_sizes_ms = {10, 12, 18, 2, 5, 7, 55, 13, 8};
for (int frame_size_ms : frame_sizes_ms) {
int num_samples = frame_size_ms * kSampleRate / 1000;
std::vector<int16_t> data(num_samples * kChannels);
for (int i = 0; i < num_samples; ++i) {
data[i * kChannels] = sample_value;
data[i * kChannels + 1] = -sample_value;
++sample_value;
}
std::unique_ptr<AudioPacket> packet(new AudioPacket());
packet->add_data(reinterpret_cast<char*>(&(data[0])),
num_samples * kChannels * sizeof(int16_t));
packet->set_encoding(AudioPacket::ENCODING_RAW);
packet->set_sampling_rate(AudioPacket::SAMPLING_RATE_48000);
packet->set_bytes_per_sample(AudioPacket::BYTES_PER_SAMPLE_2);
packet->set_channels(AudioPacket::CHANNELS_STEREO);
audio_source_->callback().Run(std::move(packet));
}
int total_length_ms =
std::accumulate(frame_sizes_ms.begin(), frame_sizes_ms.end(), 0,
[](int sum, int x) { return sum + x; });
const std::vector<int16_t>& received = sink_.samples();
int total_samples = total_length_ms * kSampleRate / 1000;
ASSERT_EQ(total_samples * kChannels, static_cast<int>(received.size()));
sample_value = 1;
for (int i = 0; i < total_samples; ++i) {
ASSERT_EQ(sample_value, received[i * kChannels]) << i;
ASSERT_EQ(-sample_value, received[i * kChannels + 1]);
++sample_value;
}
}
} // namespace protocol
} // namespace remoting
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "remoting/protocol/webrtc_audio_stream.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "remoting/base/constants.h"
#include "remoting/protocol/audio_source.h"
#include "remoting/protocol/webrtc_audio_source_adapter.h"
#include "remoting/protocol/webrtc_transport.h"
#include "third_party/webrtc/api/mediastreaminterface.h"
#include "third_party/webrtc/api/peerconnectioninterface.h"
#include "third_party/webrtc/base/refcount.h"
namespace remoting {
namespace protocol {
const char kAudioStreamLabel[] = "audio_stream";
const char kAudioTrackLabel[] = "system_audio";
WebrtcAudioStream::WebrtcAudioStream() {}
WebrtcAudioStream::~WebrtcAudioStream() {
if (stream_) {
for (const auto& track : stream_->GetAudioTracks()) {
stream_->RemoveTrack(track.get());
}
peer_connection_->RemoveStream(stream_.get());
}
}
void WebrtcAudioStream::Start(
scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner,
std::unique_ptr<AudioSource> audio_source,
WebrtcTransport* webrtc_transport) {
DCHECK(webrtc_transport);
source_adapter_ =
new rtc::RefCountedObject<WebrtcAudioSourceAdapter>(audio_task_runner);
source_adapter_->Start(std::move(audio_source));
scoped_refptr<webrtc::PeerConnectionFactoryInterface> peer_connection_factory(
webrtc_transport->peer_connection_factory());
peer_connection_ = webrtc_transport->peer_connection();
rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track =
peer_connection_factory->CreateAudioTrack(kAudioTrackLabel,
source_adapter_.get());
stream_ = peer_connection_factory->CreateLocalMediaStream(kAudioStreamLabel);
// AddTrack() may fail only if there is another track with the same name,
// which is impossible because it's a brand new stream.
bool result = stream_->AddTrack(audio_track.get());
DCHECK(result);
// AddStream() may fail if there is another stream with the same name or when
// the PeerConnection is closed, neither is expected.
result = peer_connection_->AddStream(stream_.get());
DCHECK(result);
}
void WebrtcAudioStream::Pause(bool pause) {
source_adapter_->Pause(pause);
}
} // namespace protocol
} // namespace remoting
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef REMOTING_PROTOCOL_WEBRTC_AUDIO_STREAM_H_
#define REMOTING_PROTOCOL_WEBRTC_AUDIO_STREAM_H_
#include <memory>
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "remoting/protocol/audio_stream.h"
namespace base {
class SingleThreadTaskRunner;
} // namespace webrtc
namespace webrtc {
class MediaStreamInterface;
class PeerConnectionInterface;
} // namespace webrtc
namespace remoting {
namespace protocol {
class AudioSource;
class WebrtcAudioSourceAdapter;
class WebrtcTransport;
class WebrtcAudioStream : public AudioStream {
public:
WebrtcAudioStream();
~WebrtcAudioStream() override;
void Start(scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner,
std::unique_ptr<AudioSource> audio_source,
WebrtcTransport* webrtc_transport);
// AudioStream interface.
void Pause(bool pause) override;
private:
scoped_refptr<WebrtcAudioSourceAdapter> source_adapter_;
scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
scoped_refptr<webrtc::MediaStreamInterface> stream_;
DISALLOW_COPY_AND_ASSIGN(WebrtcAudioStream);
};
} // namespace protocol
} // namespace remoting
#endif // REMOTING_PROTOCOL_WEBRTC_AUDIO_STREAM_H_
......@@ -21,6 +21,7 @@
#include "remoting/protocol/input_stub.h"
#include "remoting/protocol/message_pipe.h"
#include "remoting/protocol/transport_context.h"
#include "remoting/protocol/webrtc_audio_stream.h"
#include "remoting/protocol/webrtc_transport.h"
#include "remoting/protocol/webrtc_video_stream.h"
#include "third_party/webrtc/api/mediastreaminterface.h"
......@@ -37,13 +38,15 @@ namespace protocol {
WebrtcConnectionToClient::WebrtcConnectionToClient(
std::unique_ptr<protocol::Session> session,
scoped_refptr<protocol::TransportContext> transport_context,
scoped_refptr<base::SingleThreadTaskRunner> video_encode_task_runner)
scoped_refptr<base::SingleThreadTaskRunner> video_encode_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner)
: transport_(
new WebrtcTransport(jingle_glue::JingleThreadWrapper::current(),
transport_context,
this)),
session_(