Skip to content

Commit

Permalink
Squashed commit of the following:
Browse files Browse the repository at this point in the history
commit 102940838f859941a41b2f3062d4eb8a61be6414
Author: Hiroshi Horie <548776+hiroshihorie@users.noreply.github.com>
Date:   Thu Dec 5 15:27:34 2024 +0700

    audio engine 1

commit d31187b
Author: Hiroshi Horie <548776+hiroshihorie@users.noreply.github.com>
Date:   Thu Dec 5 15:26:38 2024 +0700

    Connect voice engine mute to adm

commit 3df68d4
Author: Hiroshi Horie <548776+hiroshihorie@users.noreply.github.com>
Date:   Fri Oct 11 20:41:44 2024 +0900

    Revert "Stop recording on mute (turn off mic indicator) (#55)"

    This reverts commit c0209ef.

commit b99fd2c
Author: Michael Sloan <mgsloan@gmail.com>
Date:   Mon Dec 2 18:59:22 2024 -0700

    Use `rtc::ToString` instead of `std::to_string` in `SocketAddress::PortAsString()` (#156)

    Justification for this change is that `std::to_string` should be avoided
    as it uses the user's locale and calls to it get serialized, which is
    bad for concurrency.

    My actual motivation for this is quite bizarre. Before this change, with
    Zed's use of the LiveKit Rust SDK, I was getting connection strings that
    were not valid utf-8, instead having a port of
    `3\u0000\u0000\u001c\u0000`. I have not figured out how that could
    happen or why this change fixes it.

commit 543121b
Author: davidliu <davidliu@deviange.net>
Date:   Wed Oct 30 20:33:46 2024 -0700

    Custom audio input for Android (#154)
  • Loading branch information
hiroshihorie committed Dec 5, 2024
1 parent d29d62c commit 9c8d5b0
Show file tree
Hide file tree
Showing 19 changed files with 1,219 additions and 201 deletions.
5 changes: 0 additions & 5 deletions audio/audio_send_stream.cc
Original file line number Diff line number Diff line change
Expand Up @@ -415,11 +415,6 @@ void AudioSendStream::SetMuted(bool muted) {
channel_send_->SetInputMute(muted);
}

bool AudioSendStream::GetMuted() {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
return channel_send_->InputMute();
}

webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const {
return GetStats(true);
}
Expand Down
1 change: 0 additions & 1 deletion audio/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ class AudioSendStream final : public webrtc::AudioSendStream,
int payload_frequency,
int event,
int duration_ms) override;
bool GetMuted() override;
void SetMuted(bool muted) override;
webrtc::AudioSendStream::Stats GetStats() const override;
webrtc::AudioSendStream::Stats GetStats(
Expand Down
72 changes: 14 additions & 58 deletions audio/audio_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,26 +98,22 @@ void AudioState::AddSendingStream(webrtc::AudioSendStream* stream,
UpdateAudioTransportWithSendingStreams();

// Make sure recording is initialized; start recording if enabled.
if (ShouldRecord()) {
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {

// TODO: Verify if the following windows only logic is still required.
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {
#if defined(WEBRTC_WIN)
if (adm->BuiltInAECIsAvailable() && !adm->Playing()) {
if (!adm->PlayoutIsInitialized()) {
adm->InitPlayout();
}
adm->StartPlayout();
if (adm->BuiltInAECIsAvailable() && !adm->Playing()) {
if (!adm->PlayoutIsInitialized()) {
adm->InitPlayout();
}
#endif
adm->StartRecording();
adm->StartPlayout();
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
#endif
adm->StartRecording();
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
}
}
}
Expand All @@ -127,10 +123,7 @@ void AudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) {
auto count = sending_streams_.erase(stream);
RTC_DCHECK_EQ(1, count);
UpdateAudioTransportWithSendingStreams();

bool should_record = ShouldRecord();
RTC_LOG(LS_INFO) << "RemoveSendingStream: should_record = " << should_record;
if (!should_record) {
if (sending_streams_.empty()) {
config_.audio_device_module->StopRecording();
}
}
Expand Down Expand Up @@ -158,7 +151,7 @@ void AudioState::SetRecording(bool enabled) {
if (recording_enabled_ != enabled) {
recording_enabled_ = enabled;
if (enabled) {
if (ShouldRecord()) {
if (!sending_streams_.empty()) {
config_.audio_device_module->StartRecording();
}
} else {
Expand Down Expand Up @@ -218,43 +211,6 @@ void AudioState::UpdateNullAudioPollerState() {
null_audio_poller_.Stop();
}
}

void AudioState::OnMuteStreamChanged() {

auto* adm = config_.audio_device_module.get();
bool should_record = ShouldRecord();

RTC_LOG(LS_INFO) << "OnMuteStreamChanged: should_record = " << should_record;
if (should_record && !adm->Recording()) {
if (adm->InitRecording() == 0) {
adm->StartRecording();
}
} else if (!should_record && adm->Recording()) {
adm->StopRecording();
}
}

bool AudioState::ShouldRecord() {
RTC_LOG(LS_INFO) << "ShouldRecord";
// no streams to send
if (sending_streams_.empty()) {
RTC_LOG(LS_INFO) << "ShouldRecord: send stream = empty";
return false;
}

int stream_count = sending_streams_.size();

int muted_count = 0;
for (const auto& kv : sending_streams_) {
if (kv.first->GetMuted()) {
muted_count++;
}
}

RTC_LOG(LS_INFO) << "ShouldRecord: " << muted_count << " muted, " << stream_count << " sending";
return muted_count != stream_count;
}

} // namespace internal

rtc::scoped_refptr<AudioState> AudioState::Create(
Expand Down
5 changes: 0 additions & 5 deletions audio/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,6 @@ class AudioState : public webrtc::AudioState {

void SetStereoChannelSwapping(bool enable) override;

void OnMuteStreamChanged() override;

AudioDeviceModule* audio_device_module() {
RTC_DCHECK(config_.audio_device_module);
return config_.audio_device_module.get();
Expand All @@ -66,9 +64,6 @@ class AudioState : public webrtc::AudioState {
void UpdateAudioTransportWithSendingStreams();
void UpdateNullAudioPollerState() RTC_RUN_ON(&thread_checker_);

// Returns true when at least 1 stream exists and all streams are not muted.
bool ShouldRecord();

SequenceChecker thread_checker_;
SequenceChecker process_thread_checker_{SequenceChecker::kDetached};
const webrtc::AudioState::Config config_;
Expand Down
4 changes: 2 additions & 2 deletions audio/channel_send.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,6 @@ class ChannelSend : public ChannelSendInterface,
// Muting, Volume and Level.
void SetInputMute(bool enable) override;

bool InputMute() const override;

// Stats.
ANAStats GetANAStatistics() const override;

Expand Down Expand Up @@ -165,6 +163,8 @@ class ChannelSend : public ChannelSendInterface,
size_t payloadSize,
int64_t absolute_capture_timestamp_ms) override;

bool InputMute() const;

int32_t SendRtpAudio(AudioFrameType frameType,
uint8_t payloadType,
uint32_t rtp_timestamp_without_offset,
Expand Down
2 changes: 0 additions & 2 deletions audio/channel_send.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,6 @@ class ChannelSendInterface {
virtual bool SendTelephoneEventOutband(int event, int duration_ms) = 0;
virtual void OnBitrateAllocation(BitrateAllocationUpdate update) = 0;
virtual int GetTargetBitrate() const = 0;

virtual bool InputMute() const = 0;
virtual void SetInputMute(bool muted) = 0;

virtual void ProcessAndEncodeAudio(
Expand Down
1 change: 0 additions & 1 deletion call/audio_send_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ class AudioSendStream : public AudioSender {
int event,
int duration_ms) = 0;

virtual bool GetMuted() = 0;
virtual void SetMuted(bool muted) = 0;

virtual Stats GetStats() const = 0;
Expand Down
3 changes: 0 additions & 3 deletions call/audio_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,6 @@ class AudioState : public rtc::RefCountInterface {

virtual void SetStereoChannelSwapping(bool enable) = 0;

// Notify the AudioState that a stream updated it's mute state.
virtual void OnMuteStreamChanged() = 0;

static rtc::scoped_refptr<AudioState> Create(
const AudioState::Config& config);

Expand Down
6 changes: 4 additions & 2 deletions media/engine/webrtc_voice_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1698,8 +1698,10 @@ bool WebRtcVoiceSendChannel::MuteStream(uint32_t ssrc, bool muted) {
ap->set_output_will_be_muted(all_muted);
}

// Notfy the AudioState that the mute state has updated.
engine_->audio_state()->OnMuteStreamChanged();
webrtc::AudioDeviceModule* adm = engine()->adm();
if (adm) {
adm->SetMicrophoneMute(all_muted);
}

return true;
}
Expand Down
3 changes: 1 addition & 2 deletions media/engine/webrtc_voice_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,8 +132,6 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

absl::optional<webrtc::AudioDeviceModule::Stats> GetAudioDeviceStats()
override;
// Moved to public so WebRtcVoiceMediaChannel can access it.
webrtc::AudioState* audio_state();

private:
// Every option that is "set" will be applied. Every option not "set" will be
Expand All @@ -147,6 +145,7 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface {

webrtc::AudioDeviceModule* adm();
webrtc::AudioProcessing* apm() const;
webrtc::AudioState* audio_state();

std::vector<AudioCodec> CollectCodecs(
const std::vector<webrtc::AudioCodecSpec>& specs) const;
Expand Down
14 changes: 7 additions & 7 deletions modules/audio_device/audio_device_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,12 @@
#if defined(WEBRTC_ENABLE_LINUX_PULSE)
#include "modules/audio_device/linux/audio_device_pulse_linux.h"
#endif
#elif defined(WEBRTC_IOS)
#include "sdk/objc/native/src/audio/audio_device_ios.h"
#elif defined(WEBRTC_MAC)
#include "modules/audio_device/mac/audio_device_mac.h"
#endif

#if defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
#include "sdk/objc/native/src/audio/audio_device_audioengine.h"
#endif

#if defined(WEBRTC_DUMMY_FILE_DEVICES)
#include "modules/audio_device/dummy/file_audio_device.h"
#include "modules/audio_device/dummy/file_audio_device_factory.h"
Expand Down Expand Up @@ -246,16 +247,15 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects() {
// iOS ADM implementation.
#if defined(WEBRTC_IOS)
if (audio_layer == kPlatformDefaultAudio) {
audio_device_.reset(
new ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/bypass_voice_processing_));
audio_device_.reset(new ios_adm::AudioDeviceAudioEngine(/*bypass_voice_processing=*/bypass_voice_processing_));
RTC_LOG(LS_INFO) << "iPhone Audio APIs will be utilized.";
}
// END #if defined(WEBRTC_IOS)

// Mac OS X ADM implementation.
#elif defined(WEBRTC_MAC)
if (audio_layer == kPlatformDefaultAudio) {
audio_device_.reset(new AudioDeviceMac());
audio_device_.reset(new ios_adm::AudioDeviceAudioEngine(/*bypass_voice_processing=*/false));
RTC_LOG(LS_INFO) << "Mac OS X Audio APIs will be utilized.";
}
#endif // WEBRTC_MAC
Expand Down
2 changes: 1 addition & 1 deletion rtc_base/socket_address.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ std::string SocketAddress::HostAsSensitiveURIString() const {
}

std::string SocketAddress::PortAsString() const {
return std::to_string(port_);
return rtc::ToString(port_);
}

std::string SocketAddress::ToString() const {
Expand Down
4 changes: 2 additions & 2 deletions sdk/BUILD.gn
Original file line number Diff line number Diff line change
Expand Up @@ -292,8 +292,6 @@ if (is_ios || is_mac) {
visibility = [ "*" ]

sources = [
"objc/native/src/audio/audio_device_ios.h",
"objc/native/src/audio/audio_device_ios.mm",
"objc/native/src/audio/audio_device_module_ios.h",
"objc/native/src/audio/audio_device_module_ios.mm",
"objc/native/src/audio/helpers.h",
Expand Down Expand Up @@ -535,6 +533,8 @@ if (is_ios || is_mac) {
sources = [
"objc/native/api/objc_audio_device_module.h",
"objc/native/api/objc_audio_device_module.mm",
"objc/native/src/audio/audio_device_audioengine.h",
"objc/native/src/audio/audio_device_audioengine.mm",
]

deps = [
Expand Down
27 changes: 26 additions & 1 deletion sdk/android/api/org/webrtc/audio/JavaAudioDeviceModule.java
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import android.media.AudioManager;
import android.os.Build;
import androidx.annotation.RequiresApi;
import java.nio.ByteBuffer;
import java.util.concurrent.ScheduledExecutorService;
import org.webrtc.JniCommon;
import org.webrtc.Logging;
Expand Down Expand Up @@ -43,6 +44,7 @@ public static class Builder {
private AudioRecordErrorCallback audioRecordErrorCallback;
private SamplesReadyCallback samplesReadyCallback;
private PlaybackSamplesReadyCallback playbackSamplesReadyCallback;
private AudioBufferCallback audioBufferCallback;
private AudioTrackStateCallback audioTrackStateCallback;
private AudioRecordStateCallback audioRecordStateCallback;
private boolean useHardwareAcousticEchoCanceler = isBuiltInAcousticEchoCancelerSupported();
Expand Down Expand Up @@ -141,6 +143,14 @@ public Builder setSamplesReadyCallback(SamplesReadyCallback samplesReadyCallback
return this;
}

/**
* Set a callback to listen for buffer requests from the AudioRecord.
*/
public Builder setAudioBufferCallback(AudioBufferCallback audioBufferCallback) {
this.audioBufferCallback = audioBufferCallback;
return this;
}

/**
* Set a callback to listen to the audio output passed to the AudioTrack.
*/
Expand Down Expand Up @@ -264,7 +274,8 @@ public JavaAudioDeviceModule createAudioDeviceModule() {
}
final WebRtcAudioRecord audioInput = new WebRtcAudioRecord(context, executor, audioManager,
audioSource, audioFormat, audioRecordErrorCallback, audioRecordStateCallback,
samplesReadyCallback, useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor);
samplesReadyCallback, audioBufferCallback, useHardwareAcousticEchoCanceler,
useHardwareNoiseSuppressor);
final WebRtcAudioTrack audioOutput =
new WebRtcAudioTrack(context, audioManager, audioAttributes, audioTrackErrorCallback,
audioTrackStateCallback, playbackSamplesReadyCallback, useLowLatency, enableVolumeLogger);
Expand Down Expand Up @@ -358,6 +369,16 @@ public static interface AudioTrackStateCallback {
void onWebRtcAudioTrackStop();
}

public static interface AudioBufferCallback {
/**
* Called when new audio samples are ready.
* @param buffer the buffer of audio bytes. Changes to this buffer will be published on the audio track.
* @param captureTimeNs the capture timestamp of the original audio data.
* @return the capture timestamp in nanoseconds. Return 0 if not available.
*/
long onBuffer(ByteBuffer buffer, int audioFormat, int channelCount, int sampleRate, int bytesRead, long captureTimeNs);
}

/**
* Returns true if the device supports built-in HW AEC, and the UUID is approved (some UUIDs can
* be excluded).
Expand Down Expand Up @@ -432,6 +453,10 @@ public void setMicrophoneMute(boolean mute) {
audioInput.setMicrophoneMute(mute);
}

public void setAudioRecordEnabled(boolean enable) {
audioInput.setUseAudioRecord(enable);
}

@Override
public boolean setNoiseSuppressorEnabled(boolean enabled) {
Logging.d(TAG, "setNoiseSuppressorEnabled: " + enabled);
Expand Down
Loading

0 comments on commit 9c8d5b0

Please sign in to comment.