Fully migrate to Webrtc::Environment.

This commit is contained in:
John Preston 2024-01-24 12:42:37 +04:00
parent 9a6ab3b0f2
commit 104ba4db7c
21 changed files with 329 additions and 528 deletions

View file

@ -1235,8 +1235,6 @@ PRIVATE
platform/mac/touchbar/mac_touchbar_manager.mm
platform/mac/touchbar/mac_touchbar_media_view.h
platform/mac/touchbar/mac_touchbar_media_view.mm
platform/win/audio_win.cpp
platform/win/audio_win.h
platform/win/file_utilities_win.cpp
platform/win/file_utilities_win.h
platform/win/launcher_win.cpp
@ -1260,7 +1258,6 @@ PRIVATE
platform/win/windows_autostart_task.h
platform/win/windows_toast_activator.cpp
platform/win/windows_toast_activator.h
platform/platform_audio.h
platform/platform_file_utilities.h
platform/platform_launcher.h
platform/platform_integration.cpp

View file

@ -25,8 +25,8 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "media/audio/media_audio_track.h"
#include "base/platform/base_platform_info.h"
#include "calls/calls_panel.h"
#include "webrtc/webrtc_environment.h"
#include "webrtc/webrtc_video_track.h"
#include "webrtc/webrtc_media_devices.h"
#include "webrtc/webrtc_create_adm.h"
#include "data/data_user.h"
#include "data/data_session.h"
@ -429,30 +429,37 @@ void Call::setMuted(bool mute) {
void Call::setupMediaDevices() {
_playbackDeviceId.changes() | rpl::filter([=] {
return _instance != nullptr;
return _instance && _setDeviceIdCallback;
}) | rpl::start_with_next([=](const QString &deviceId) {
_setDeviceIdCallback(
Webrtc::DeviceType::Playback,
deviceId);
_instance->setAudioOutputDevice(deviceId.toStdString());
}, _lifetime);
_captureDeviceId.changes() | rpl::filter([=] {
return _instance != nullptr;
return _instance && _setDeviceIdCallback;
}) | rpl::start_with_next([=](const QString &deviceId) {
_setDeviceIdCallback(
Webrtc::DeviceType::Capture,
deviceId);
_instance->setAudioInputDevice(deviceId.toStdString());
}, _lifetime);
}
void Call::setupOutgoingVideo() {
static const auto hasDevices = [] {
return !Webrtc::GetVideoInputList().empty();
const auto cameraId = [] {
return Core::App().mediaDevices().defaultId(
Webrtc::DeviceType::Camera);
};
const auto started = _videoOutgoing->state();
if (!hasDevices()) {
if (cameraId().isEmpty()) {
_videoOutgoing->setState(Webrtc::VideoState::Inactive);
}
_videoOutgoing->stateValue(
) | rpl::start_with_next([=](Webrtc::VideoState state) {
if (state != Webrtc::VideoState::Inactive
&& !hasDevices()
&& cameraId().isEmpty()
&& !_videoCaptureIsScreencast) {
_errors.fire({ ErrorType::NoCamera });
_videoOutgoing->setState(Webrtc::VideoState::Inactive);
@ -892,6 +899,33 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
const auto versionString = version.toStdString();
const auto &settings = Core::App().settings();
const auto weak = base::make_weak(this);
_setDeviceIdCallback = nullptr;
const auto playbackDeviceIdInitial = _playbackDeviceId.current();
const auto captureDeviceIdInitial = _captureDeviceId.current();
const auto saveSetDeviceIdCallback = [=](
Fn<void(Webrtc::DeviceType, QString)> setDeviceIdCallback) {
setDeviceIdCallback(
Webrtc::DeviceType::Playback,
playbackDeviceIdInitial);
setDeviceIdCallback(
Webrtc::DeviceType::Capture,
captureDeviceIdInitial);
crl::on_main(weak, [=] {
_setDeviceIdCallback = std::move(setDeviceIdCallback);
const auto playback = _playbackDeviceId.current();
if (_instance && playback != playbackDeviceIdInitial) {
_setDeviceIdCallback(Webrtc::DeviceType::Playback, playback);
_instance->setAudioOutputDevice(playback.toStdString());
}
const auto capture = _captureDeviceId.current();
if (_instance && capture != captureDeviceIdInitial) {
_setDeviceIdCallback(Webrtc::DeviceType::Capture, capture);
_instance->setAudioInputDevice(capture.toStdString());
}
});
};
tgcalls::Descriptor descriptor = {
.version = versionString,
.config = tgcalls::Config{
@ -910,8 +944,8 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
std::move(encryptionKeyValue),
(_type == Type::Outgoing)),
.mediaDevicesConfig = tgcalls::MediaDevicesConfig{
.audioInputId = _captureDeviceId.current().toStdString(),
.audioOutputId = _playbackDeviceId.current().toStdString(),
.audioInputId = captureDeviceIdInitial.toStdString(),
.audioOutputId = playbackDeviceIdInitial.toStdString(),
.inputVolume = 1.f,//settings.callInputVolume() / 100.f,
.outputVolume = 1.f,//settings.callOutputVolume() / 100.f,
},
@ -942,7 +976,7 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
});
},
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
settings.callAudioBackend()),
saveSetDeviceIdCallback),
};
if (Logs::DebugEnabled()) {
const auto callLogFolder = cWorkingDir() + u"DebugLogs"_q;

View file

@ -271,6 +271,7 @@ private:
base::DelayedCallTimer _finishByTimeoutTimer;
base::Timer _discardByTimeoutTimer;
Fn<void(Webrtc::DeviceType, QString)> _setDeviceIdCallback;
Webrtc::DeviceId _playbackDeviceId;
Webrtc::DeviceId _captureDeviceId;
Webrtc::DeviceId _cameraDeviceId;

View file

@ -48,8 +48,8 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "base/power_save_blocker.h"
#include "media/streaming/media_streaming_utility.h"
#include "window/main_window.h"
#include "webrtc/webrtc_environment.h"
#include "webrtc/webrtc_video_track.h"
#include "webrtc/webrtc_media_devices.h"
#include "styles/style_calls.h"
#include "styles/style_chat.h"
@ -238,13 +238,14 @@ void Panel::initControls() {
}
});
_screencast->entity()->setClickedCallback([=] {
const auto env = &Core::App().mediaDevices();
if (!_call) {
return;
} else if (!Webrtc::DesktopCaptureAllowed()) {
} else if (!env->desktopCaptureAllowed()) {
if (auto box = Group::ScreenSharingPrivacyRequestBox()) {
_layerBg->showBox(std::move(box));
}
} else if (const auto source = Webrtc::UniqueDesktopCaptureSource()) {
} else if (const auto source = env->uniqueDesktopCaptureSource()) {
if (_call->isSharingScreen()) {
_call->toggleScreenSharing(std::nullopt);
} else {

View file

@ -2065,14 +2065,16 @@ void GroupCall::applyOtherParticipantUpdate(
void GroupCall::setupMediaDevices() {
_playbackDeviceId.changes() | rpl::filter([=] {
return _instance != nullptr;
return _instance && _setDeviceIdCallback;
}) | rpl::start_with_next([=](const QString &deviceId) {
_setDeviceIdCallback(Webrtc::DeviceType::Playback, deviceId);
_instance->setAudioOutputDevice(deviceId.toStdString());
}, _lifetime);
_captureDeviceId.changes() | rpl::filter([=] {
return _instance != nullptr;
return _instance && _setDeviceIdCallback;
}) | rpl::start_with_next([=](const QString &deviceId) {
_setDeviceIdCallback(Webrtc::DeviceType::Capture, deviceId);
_instance->setAudioInputDevice(deviceId.toStdString());
}, _lifetime);
@ -2338,6 +2340,31 @@ bool GroupCall::tryCreateController() {
const auto weak = base::make_weak(&_instanceGuard);
const auto myLevel = std::make_shared<tgcalls::GroupLevelValue>();
const auto playbackDeviceIdInitial = _playbackDeviceId.current();
const auto captureDeviceIdInitial = _captureDeviceId.current();
const auto saveSetDeviceIdCallback = [=](
Fn<void(Webrtc::DeviceType, QString)> setDeviceIdCallback) {
setDeviceIdCallback(
Webrtc::DeviceType::Playback,
playbackDeviceIdInitial);
setDeviceIdCallback(
Webrtc::DeviceType::Capture,
captureDeviceIdInitial);
crl::on_main(weak, [=] {
_setDeviceIdCallback = std::move(setDeviceIdCallback);
const auto playback = _playbackDeviceId.current();
if (_instance && playback != playbackDeviceIdInitial) {
_setDeviceIdCallback(Webrtc::DeviceType::Playback, playback);
_instance->setAudioOutputDevice(playback.toStdString());
}
const auto capture = _captureDeviceId.current();
if (_instance && capture != captureDeviceIdInitial) {
_setDeviceIdCallback(Webrtc::DeviceType::Capture, capture);
_instance->setAudioInputDevice(capture.toStdString());
}
});
};
tgcalls::GroupInstanceDescriptor descriptor = {
.threads = tgcalls::StaticThreads::getThreads(),
.config = tgcalls::GroupConfig{
@ -2360,10 +2387,10 @@ bool GroupCall::tryCreateController() {
}
crl::on_main(weak, [=] { audioLevelsUpdated(data); });
},
.initialInputDeviceId = _captureDeviceId.current().toStdString(),
.initialOutputDeviceId = _playbackDeviceId.current().toStdString(),
.initialInputDeviceId = captureDeviceIdInitial.toStdString(),
.initialOutputDeviceId = playbackDeviceIdInitial.toStdString(),
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
settings.callAudioBackend()),
saveSetDeviceIdCallback),
.videoCapture = _cameraCapture,
.requestCurrentTime = [=, call = base::make_weak(this)](
std::function<void(int64_t)> done) {

View file

@ -667,6 +667,7 @@ private:
crl::time _lastSendProgressUpdate = 0;
Fn<void(Webrtc::DeviceType, QString)> _setDeviceIdCallback;
Webrtc::DeviceId _playbackDeviceId;
Webrtc::DeviceId _captureDeviceId;
Webrtc::DeviceId _cameraDeviceId;

View file

@ -54,8 +54,8 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "base/power_save_blocker.h"
#include "apiwrap.h" // api().kick.
#include "api/api_chat_participants.h" // api().kick.
#include "webrtc/webrtc_environment.h"
#include "webrtc/webrtc_video_track.h"
#include "webrtc/webrtc_media_devices.h" // UniqueDesktopCaptureSource.
#include "webrtc/webrtc_audio_input_tester.h"
#include "styles/style_calls.h"
#include "styles/style_layers.h"
@ -1374,9 +1374,10 @@ void Panel::chooseShareScreenSource() {
return;
}
const auto choose = [=] {
if (!Webrtc::DesktopCaptureAllowed()) {
const auto env = &Core::App().mediaDevices();
if (!env->desktopCaptureAllowed()) {
screenSharingPrivacyRequest();
} else if (const auto source = Webrtc::UniqueDesktopCaptureSource()) {
} else if (const auto source = env->uniqueDesktopCaptureSource()) {
if (_call->isSharingScreen()) {
_call->toggleScreenSharing(std::nullopt);
} else {
@ -2003,7 +2004,8 @@ void Panel::trackControlOver(not_null<Ui::RpWidget*> control, bool over) {
}
void Panel::showStickedTooltip() {
static const auto kHasCamera = !Webrtc::GetVideoInputList().empty();
static const auto kHasCamera = !Core::App().mediaDevices().defaultId(
Webrtc::DeviceType::Camera).isEmpty();
const auto callReady = (_call->state() == State::Joined
|| _call->state() == State::Connecting);
if (!(_stickedTooltipsShown & StickedTooltip::Camera)

View file

@ -42,7 +42,6 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "core/application.h"
#include "core/core_settings.h"
#include "webrtc/webrtc_audio_input_tester.h"
#include "webrtc/webrtc_media_devices.h"
#include "settings/settings_calls.h"
#include "main/main_session.h"
#include "apiwrap.h"
@ -250,6 +249,7 @@ void SettingsBox(
const auto weakBox = Ui::MakeWeak(box);
struct State {
std::unique_ptr<Webrtc::DeviceId> computedDeviceId;
std::unique_ptr<Webrtc::AudioInputTester> micTester;
Ui::LevelMeter *micTestLevel = nullptr;
float micLevel = 0.;
@ -327,9 +327,6 @@ void SettingsBox(
crl::guard(box, [=](const QString &id) {
Core::App().settings().setCallCaptureDeviceId(id);
Core::App().saveSettingsDelayed();
if (state->micTester) {
state->micTester->setDeviceId(id);
}
}),
&st::groupCallCheckbox,
&st::groupCallRadio));
@ -773,9 +770,14 @@ void SettingsBox(
box->setShowFinishedCallback([=] {
// Means we finished showing the box.
crl::on_main(box, [=] {
state->computedDeviceId = std::make_unique<Webrtc::DeviceId>(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Capture,
Webrtc::DeviceIdValueWithFallback(
Core::App().settings().callCaptureDeviceIdValue(),
Core::App().settings().captureDeviceIdValue()));
state->micTester = std::make_unique<Webrtc::AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callCaptureDeviceId());
state->computedDeviceId->value());
state->levelUpdateTimer.callEach(kMicTestUpdateInterval);
});
});
@ -884,8 +886,9 @@ MicLevelTester::MicLevelTester(Fn<void()> show)
, _timer([=] { check(); })
, _tester(
std::make_unique<Webrtc::AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callCaptureDeviceId())) {
Webrtc::DeviceIdValueWithFallback(
Core::App().settings().callCaptureDeviceIdValue(),
Core::App().settings().captureDeviceIdValue()))) {
_timer.callEach(kMicrophoneTooltipCheckInterval);
}

View file

@ -431,7 +431,7 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
qint32 groupCallPushToTalk = _groupCallPushToTalk ? 1 : 0;
QByteArray groupCallPushToTalkShortcut = _groupCallPushToTalkShortcut;
qint64 groupCallPushToTalkDelay = _groupCallPushToTalkDelay;
qint32 callAudioBackend = 0;
qint32 legacyCallAudioBackend = 0;
qint32 disableCallsLegacy = 0;
QByteArray windowPosition;
std::vector<RecentEmojiPreload> recentEmojiPreload;
@ -565,7 +565,7 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
>> groupCallPushToTalkDelay;
}
if (!stream.atEnd()) {
stream >> callAudioBackend;
stream >> legacyCallAudioBackend;
}
if (!stream.atEnd()) {
stream >> disableCallsLegacy;
@ -991,10 +991,6 @@ void Settings::setTabbedReplacedWithInfo(bool enabled) {
}
}
Webrtc::Backend Settings::callAudioBackend() const {
return Webrtc::Backend::OpenAL;
}
void Settings::setDialogsWidthRatio(float64 ratio) {
_dialogsWidthRatio = ratio;
}

View file

@ -29,10 +29,6 @@ namespace Window {
enum class Column;
} // namespace Window
namespace Webrtc {
enum class Backend;
} // namespace Webrtc
namespace Calls::Group {
enum class StickedTooltip;
} // namespace Calls::Group
@ -343,7 +339,6 @@ public:
void setCallAudioDuckingEnabled(bool value) {
_callAudioDuckingEnabled = value;
}
[[nodiscard]] Webrtc::Backend callAudioBackend() const;
[[nodiscard]] bool disableCallsLegacy() const {
return _disableCallsLegacy;
}

View file

@ -14,11 +14,9 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "media/media_common.h"
#include "media/streaming/media_streaming_utility.h"
#include "webrtc/webrtc_environment.h"
#include "webrtc/webrtc_media_devices.h"
#include "data/data_document.h"
#include "data/data_file_origin.h"
#include "data/data_session.h"
#include "platform/platform_audio.h"
#include "core/application.h"
#include "core/core_settings.h"
#include "main/main_session.h"
@ -73,57 +71,6 @@ bool PlaybackErrorHappened() {
return false;
}
void EnumeratePlaybackDevices() {
auto deviceNames = QStringList();
auto devices = [&] {
if (alcIsExtensionPresent(nullptr, "ALC_ENUMERATE_ALL_EXT")) {
return alcGetString(nullptr, alcGetEnumValue(nullptr, "ALC_ALL_DEVICES_SPECIFIER"));
} else {
return alcGetString(nullptr, ALC_DEVICE_SPECIFIER);
}
}();
Assert(devices != nullptr);
while (*devices != 0) {
auto deviceName8Bit = QByteArray(devices);
auto deviceName = QString::fromUtf8(deviceName8Bit);
deviceNames.append(deviceName);
devices += deviceName8Bit.size() + 1;
}
LOG(("Audio Playback Devices: %1").arg(deviceNames.join(';')));
auto device = [&] {
if (alcIsExtensionPresent(nullptr, "ALC_ENUMERATE_ALL_EXT")) {
return alcGetString(nullptr, alcGetEnumValue(nullptr, "ALC_DEFAULT_ALL_DEVICES_SPECIFIER"));
} else {
return alcGetString(nullptr, ALC_DEFAULT_DEVICE_SPECIFIER);
}
}();
if (device) {
LOG(("Audio Playback Default Device: %1").arg(QString::fromUtf8(device)));
} else {
LOG(("Audio Playback Default Device: (null)"));
}
}
void EnumerateCaptureDevices() {
auto deviceNames = QStringList();
auto devices = alcGetString(nullptr, ALC_CAPTURE_DEVICE_SPECIFIER);
Assert(devices != nullptr);
while (*devices != 0) {
auto deviceName8Bit = QByteArray(devices);
auto deviceName = QString::fromUtf8(deviceName8Bit);
deviceNames.append(deviceName);
devices += deviceName8Bit.size() + 1;
}
LOG(("Audio Capture Devices: %1").arg(deviceNames.join(';')));
if (auto device = alcGetString(nullptr, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) {
LOG(("Audio Capture Default Device: %1").arg(QString::fromUtf8(device)));
} else {
LOG(("Audio Capture Default Device: (null)"));
}
}
// Thread: Any. Must be locked: AudioMutex.
void DestroyPlaybackDevice() {
if (AudioContext) {
@ -142,7 +89,7 @@ void DestroyPlaybackDevice() {
bool CreatePlaybackDevice() {
if (AudioDevice) return true;
const auto id = Current().deviceId().toStdString();
const auto id = Current().playbackDeviceId().toStdString();
AudioDevice = alcOpenDevice(id.c_str());
if (!AudioDevice) {
LOG(("Audio Error: Could not create default playback device, refreshing.."));
@ -193,25 +140,14 @@ void Start(not_null<Instance*> instance) {
qRegisterMetaType<AudioMsgId>();
qRegisterMetaType<VoiceWaveform>();
if (!Webrtc::InitPipewireStubs()) {
LOG(("Audio Info: Failed to load pipewire 0.3 stubs."));
}
auto loglevel = getenv("ALSOFT_LOGLEVEL");
const auto loglevel = getenv("ALSOFT_LOGLEVEL");
LOG(("OpenAL Logging Level: %1").arg(loglevel ? loglevel : "(not set)"));
EnumeratePlaybackDevices();
EnumerateCaptureDevices();
MixerInstance = new Player::Mixer(instance);
//Platform::Audio::Init();
}
// Thread: Main.
void Finish(not_null<Instance*> instance) {
//Platform::Audio::DeInit();
// MixerInstance variable should be modified under AudioMutex protection.
// So it is modified in the ~Mixer() destructor after all tracks are cleared.
delete MixerInstance;

View file

@ -9,6 +9,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "media/audio/media_audio_capture_common.h"
#include "media/audio/media_audio_ffmpeg_loader.h"
#include "media/audio/media_audio_track.h"
#include "ffmpeg/ffmpeg_utility.h"
#include "base/timer.h"
@ -84,7 +85,7 @@ public:
Inner(QThread *thread);
~Inner();
void start(Fn<void(Update)> updated, Fn<void()> error);
void start(QString id, Fn<void(Update)> updated, Fn<void()> error);
void stop(Fn<void(Result&&)> callback = nullptr);
void pause(bool value, Fn<void(Result&&)> callback);
@ -129,8 +130,9 @@ Instance::Instance() : _inner(std::make_unique<Inner>(&_thread)) {
void Instance::start() {
_updates.fire_done();
const auto id = Audio::Current().captureDeviceId();
InvokeQueued(_inner.get(), [=] {
_inner->start([=](Update update) {
_inner->start(id, [=](Update update) {
crl::on_main(this, [=] {
_updates.fire_copy(update);
});
@ -292,7 +294,10 @@ void Instance::Inner::fail() {
}
}
void Instance::Inner::start(Fn<void(Update)> updated, Fn<void()> error) {
void Instance::Inner::start(
QString id,
Fn<void(Update)> updated,
Fn<void()> error) {
_updated = std::move(updated);
_error = std::move(error);
if (_paused) {
@ -300,7 +305,12 @@ void Instance::Inner::start(Fn<void(Update)> updated, Fn<void()> error) {
}
// Start OpenAL Capture
d->device = alcCaptureOpenDevice(nullptr, kCaptureFrequency, AL_FORMAT_MONO16, kCaptureFrequency / 5);
const auto utf = id.toStdString();
d->device = alcCaptureOpenDevice(
utf.c_str(),
kCaptureFrequency,
AL_FORMAT_MONO16,
kCaptureFrequency / 5);
if (!d->device) {
LOG(("Audio Error: capture device not present!"));
fail();

View file

@ -248,7 +248,12 @@ Instance::Instance()
&Core::App().mediaDevices(),
Webrtc::DeviceType::Playback,
Webrtc::DeviceIdOrDefault(
Core::App().settings().playbackDeviceIdValue())) {
Core::App().settings().playbackDeviceIdValue()))
, _captureDeviceId(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Capture,
Webrtc::DeviceIdOrDefault(
Core::App().settings().captureDeviceIdValue())) {
_updateTimer.setCallback([this] {
auto hasActive = false;
for (auto track : _tracks) {
@ -273,10 +278,14 @@ Instance::Instance()
}, _lifetime);
}
QString Instance::deviceId() const {
QString Instance::playbackDeviceId() const {
return _playbackDeviceId.current();
}
QString Instance::captureDeviceId() const {
return _captureDeviceId.current();
}
std::unique_ptr<Track> Instance::createTrack() {
return std::make_unique<Track>(this);
}

View file

@ -95,7 +95,8 @@ public:
// Thread: Main.
Instance();
[[nodiscard]] QString deviceId() const;
[[nodiscard]] QString playbackDeviceId() const;
[[nodiscard]] QString captureDeviceId() const;
[[nodiscard]] std::unique_ptr<Track> createTrack();
@ -119,6 +120,7 @@ private:
private:
std::set<Track*> _tracks;
Webrtc::DeviceId _playbackDeviceId;
Webrtc::DeviceId _captureDeviceId;
base::Timer _updateTimer;

View file

@ -1,36 +0,0 @@
/*
This file is part of Telegram Desktop,
the official desktop application for the Telegram messaging service.
For license and copyright information please follow this link:
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
*/
#pragma once
namespace Platform {
namespace Audio {
void Init();
void DeInit();
} // namespace Audio
} // namespace Platform
// Platform dependent implementations.
#if defined Q_OS_WINRT || defined Q_OS_WIN
#include "platform/win/audio_win.h"
#else // Q_OS_WINRT || Q_OS_WIN
namespace Platform {
namespace Audio {
inline void Init() {
}
inline void DeInit() {
}
} // namespace Audio
} // namespace Platform
#endif // Q_OS_WINRT || Q_OS_WIN

View file

@ -1,180 +0,0 @@
/*
This file is part of Telegram Desktop,
the official desktop application for the Telegram messaging service.
For license and copyright information please follow this link:
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
*/
#include "platform/win/audio_win.h"
#include "platform/win/windows_dlls.h"
#include "media/audio/media_audio.h"
#include <initguid.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#include <wrl/client.h>
using namespace Microsoft::WRL;
namespace Platform {
namespace Audio {
namespace {
// Inspired by Chromium.
class DeviceListener : public IMMNotificationClient {
public:
DeviceListener() = default;
DeviceListener(const DeviceListener &other) = delete;
DeviceListener &operator=(const DeviceListener &other) = delete;
virtual ~DeviceListener() = default;
private:
// IMMNotificationClient implementation.
STDMETHOD_(ULONG, AddRef)() override {
return 1;
}
STDMETHOD_(ULONG, Release)() override {
return 1;
}
STDMETHOD(QueryInterface)(REFIID iid, void** object) override;
STDMETHOD(OnPropertyValueChanged)(LPCWSTR device_id, const PROPERTYKEY key) override;
STDMETHOD(OnDeviceAdded)(LPCWSTR device_id) override {
return S_OK;
}
STDMETHOD(OnDeviceRemoved)(LPCWSTR device_id) override {
return S_OK;
}
STDMETHOD(OnDeviceStateChanged)(LPCWSTR device_id, DWORD new_state) override;
STDMETHOD(OnDefaultDeviceChanged)(EDataFlow flow, ERole role, LPCWSTR new_default_device_id) override;
};
STDMETHODIMP DeviceListener::QueryInterface(REFIID iid, void** object) {
if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) {
*object = static_cast<IMMNotificationClient*>(this);
return S_OK;
}
*object = NULL;
return E_NOINTERFACE;
}
STDMETHODIMP DeviceListener::OnPropertyValueChanged(LPCWSTR device_id, const PROPERTYKEY key) {
auto deviceName = device_id ? '"' + QString::fromWCharArray(device_id) + '"' : QString("nullptr");
constexpr auto kKeyBufferSize = 1024;
WCHAR keyBuffer[kKeyBufferSize] = { 0 };
auto hr = Dlls::PSStringFromPropertyKey ? Dlls::PSStringFromPropertyKey(key, keyBuffer, kKeyBufferSize) : E_FAIL;
auto keyName = Dlls::PSStringFromPropertyKey ? (SUCCEEDED(hr) ? '"' + QString::fromWCharArray(keyBuffer) + '"' : QString("unknown")) : QString("unsupported");
// BAD GUID { 0xD4EF3098, 0xC967, 0x4A4E, { 0xB2, 0x19, 0xAC, 0xB6, 0xDA, 0x1D, 0xC3, 0x73 } };
// BAD GUID { 0x3DE556E2, 0xE087, 0x4721, { 0xBE, 0x97, 0xEC, 0x16, 0x2D, 0x54, 0x81, 0xF8 } };
// VERY BAD GUID { 0x91F1336D, 0xC37C, 0x4C48, { 0xAD, 0xEB, 0x92, 0x17, 0x2F, 0xA8, 0x7E, 0xEB } };
// It is fired somewhere from CloseAudioPlaybackDevice() causing deadlock on AudioMutex.
// Sometimes unknown value change events come very frequently, like each 0.5 seconds.
// So we will handle only special value change events from mmdeviceapi.h
//
// We have logs of PKEY_AudioEndpoint_Disable_SysFx property change 3-5 times each second.
// So for now we disable PKEY_AudioEndpoint and both PKEY_AudioUnknown changes handling
//.
// constexpr GUID pkey_AudioEndpoint = { 0x1da5d803, 0xd492, 0x4edd, { 0x8c, 0x23, 0xe0, 0xc0, 0xff, 0xee, 0x7f, 0x0e } };
constexpr GUID pkey_AudioEngine_Device = { 0xf19f064d, 0x82c, 0x4e27, { 0xbc, 0x73, 0x68, 0x82, 0xa1, 0xbb, 0x8e, 0x4c } };
constexpr GUID pkey_AudioEngine_OEM = { 0xe4870e26, 0x3cc5, 0x4cd2, { 0xba, 0x46, 0xca, 0xa, 0x9a, 0x70, 0xed, 0x4 } };
// constexpr GUID pkey_AudioUnknown1 = { 0x3d6e1656, 0x2e50, 0x4c4c, { 0x8d, 0x85, 0xd0, 0xac, 0xae, 0x3c, 0x6c, 0x68 } };
// constexpr GUID pkey_AudioUnknown2 = { 0x624f56de, 0xfd24, 0x473e, { 0x81, 0x4a, 0xde, 0x40, 0xaa, 0xca, 0xed, 0x16 } };
if (false
// || key.fmtid == pkey_AudioEndpoint
|| key.fmtid == pkey_AudioEngine_Device
|| key.fmtid == pkey_AudioEngine_OEM
// || key.fmtid == pkey_AudioUnknown1
// || key.fmtid == pkey_AudioUnknown2
|| false) {
LOG(("Audio Info: OnPropertyValueChanged(%1, %2) scheduling detach from audio device.").arg(deviceName).arg(keyName));
Media::Audio::ScheduleDetachFromDeviceSafe();
} else {
DEBUG_LOG(("Audio Info: OnPropertyValueChanged(%1, %2) unknown, skipping.").arg(deviceName).arg(keyName));
}
return S_OK;
}
STDMETHODIMP DeviceListener::OnDeviceStateChanged(LPCWSTR device_id, DWORD new_state) {
auto deviceName = device_id ? '"' + QString::fromWCharArray(device_id) + '"' : QString("nullptr");
LOG(("Audio Info: OnDeviceStateChanged(%1, %2) scheduling detach from audio device.").arg(deviceName).arg(new_state));
Media::Audio::ScheduleDetachFromDeviceSafe();
return S_OK;
}
STDMETHODIMP DeviceListener::OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR new_default_device_id) {
// Only listen for console and communication device changes.
if ((role != eConsole && role != eCommunications) || (flow != eRender && flow != eCapture)) {
LOG(("Audio Info: skipping OnDefaultDeviceChanged() flow %1, role %2, new_default_device_id: %3").arg(flow).arg(role).arg(new_default_device_id ? '"' + QString::fromWCharArray(new_default_device_id) + '"' : QString("nullptr")));
return S_OK;
}
LOG(("Audio Info: OnDefaultDeviceChanged() scheduling detach from audio device, flow %1, role %2, new_default_device_id: %3").arg(flow).arg(role).arg(new_default_device_id ? '"' + QString::fromWCharArray(new_default_device_id) + '"' : QString("nullptr")));
Media::Audio::ScheduleDetachFromDeviceSafe();
return S_OK;
}
auto WasCoInitialized = false;
ComPtr<IMMDeviceEnumerator> Enumerator;
DeviceListener *Listener = nullptr;
} // namespace
void Init() {
auto hr = CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&Enumerator));
if (FAILED(hr)) {
Enumerator.Reset();
if (hr == CO_E_NOTINITIALIZED) {
LOG(("Audio Info: CoCreateInstance fails with CO_E_NOTINITIALIZED"));
hr = CoInitialize(nullptr);
if (SUCCEEDED(hr)) {
WasCoInitialized = true;
hr = CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&Enumerator));
if (FAILED(hr)) {
Enumerator.Reset();
LOG(("Audio Error: could not CoCreateInstance of MMDeviceEnumerator, HRESULT: %1").arg(hr));
return;
}
}
} else {
LOG(("Audio Error: could not CoCreateInstance of MMDeviceEnumerator, HRESULT: %1").arg(hr));
return;
}
}
Listener = new DeviceListener();
hr = Enumerator->RegisterEndpointNotificationCallback(Listener);
if (FAILED(hr)) {
LOG(("Audio Error: RegisterEndpointNotificationCallback failed, HRESULT: %1").arg(hr));
delete base::take(Listener);
}
}
void DeInit() {
if (Enumerator) {
if (Listener) {
auto hr = Enumerator->UnregisterEndpointNotificationCallback(Listener);
if (FAILED(hr)) {
LOG(("Audio Error: UnregisterEndpointNotificationCallback failed, HRESULT: %1").arg(hr));
}
delete base::take(Listener);
}
Enumerator.Reset();
}
if (WasCoInitialized) {
CoUninitialize();
}
}
} // namespace Audio
} // namespace Platform

View file

@ -1,18 +0,0 @@
/*
This file is part of Telegram Desktop,
the official desktop application for the Telegram messaging service.
For license and copyright information please follow this link:
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
*/
#pragma once
namespace Platform {
namespace Audio {
void Init();
void DeInit();
} // namespace Audio
} // namespace Platform

View file

@ -30,7 +30,6 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "apiwrap.h"
#include "api/api_authorizations.h"
#include "webrtc/webrtc_environment.h"
#include "webrtc/webrtc_media_devices.h"
#include "webrtc/webrtc_video_track.h"
#include "webrtc/webrtc_audio_input_tester.h"
#include "webrtc/webrtc_create_adm.h" // Webrtc::Backend.
@ -46,12 +45,15 @@ using namespace Webrtc;
DeviceType type,
rpl::producer<QString> id) {
return std::move(id) | rpl::map([type](const QString &id) {
const auto list = Core::App().mediaDevices().devices(type);
const auto i = ranges::find(list, id, &DeviceInfo::id);
return (i != end(list))
? i->name
: tr::lng_settings_call_device_default(tr::now);
});
return Core::App().mediaDevices().devicesValue(
type
) | rpl::map([id](const std::vector<DeviceInfo> &list) {
const auto i = ranges::find(list, id, &DeviceInfo::id);
return (i != end(list) && !i->inactive)
? i->name
: tr::lng_settings_call_device_default(tr::now);
});
}) | rpl::flatten_latest();
}
} // namespace
@ -82,10 +84,6 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
const auto hasCall = (Core::App().calls().currentCall() != nullptr);
const auto cameraNameStream = lifetime.make_state<
rpl::event_stream<QString>
>();
auto capturerOwner = lifetime.make_state<
std::shared_ptr<tgcalls::VideoCaptureInterface>
>();
@ -95,62 +93,30 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
? VideoState::Inactive
: VideoState::Active));
const auto currentCameraName = [&] {
const auto cameras = GetVideoInputList();
const auto i = ranges::find(
cameras,
Core::App().settings().cameraDeviceId(),
&VideoInput::id);
return (i != end(cameras))
? i->name
: tr::lng_settings_call_device_default(tr::now);
}();
const auto deviceId = lifetime.make_state<rpl::variable<QString>>(
Core::App().settings().cameraDeviceId());
auto resolvedId = rpl::deferred([=] {
return DeviceIdOrDefault(deviceId->value());
});
AddButtonWithLabel(
content,
tr::lng_settings_call_input_device(),
rpl::single(
currentCameraName
) | rpl::then(
cameraNameStream->events()
),
CameraDeviceNameValue(rpl::duplicate(resolvedId)),
st::settingsButtonNoIcon
)->addClickHandler([=] {
const auto &devices = GetVideoInputList();
const auto options = ranges::views::concat(
ranges::views::single(
tr::lng_settings_call_device_default(tr::now)),
devices | ranges::views::transform(&VideoInput::name)
) | ranges::to_vector;
const auto i = ranges::find(
devices,
Core::App().settings().cameraDeviceId(),
&VideoInput::id);
const auto currentOption = (i != end(devices))
? int(i - begin(devices) + 1)
: 0;
const auto save = crl::guard(content, [=](int option) {
cameraNameStream->fire_copy(options[option]);
const auto deviceId = option
? devices[option - 1].id
: kDefaultDeviceId;
if (saveToSettings) {
Core::App().settings().setCameraDeviceId(deviceId);
Core::App().saveSettingsDelayed();
}
if (*capturerOwner) {
(*capturerOwner)->switchToDevice(
deviceId.toStdString(),
false);
}
});
show->showBox(Box([=](not_null<Ui::GenericBox*> box) {
SingleChoiceBox(box, {
.title = tr::lng_settings_call_camera(),
.options = options,
.initialSelection = currentOption,
.callback = save,
});
show->show(ChooseCameraDeviceBox(
rpl::duplicate(resolvedId),
[=](const QString &id) {
*deviceId = id;
if (saveToSettings) {
Core::App().settings().setCameraDeviceId(id);
Core::App().saveSettingsDelayed();
}
if (*capturerOwner) {
(*capturerOwner)->switchToDevice(
id.toStdString(),
false);
}
}));
});
const auto bubbleWrap = content->add(object_ptr<Ui::RpWidget>(content));
@ -221,9 +187,7 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
}
void Calls::sectionSaveChanges(FnMut<void()> done) {
if (_micTester) {
_micTester.reset();
}
_testingMicrophone = false;
done();
}
@ -234,61 +198,25 @@ void Calls::setupContent() {
Ui::AddSkip(content);
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_output());
const auto playbackIdWithFallback = [=] {
return DeviceIdOrDefault(settings->playbackDeviceIdValue());
};
AddButtonWithLabel(
initPlaybackButton(
content,
tr::lng_settings_call_output_device(),
PlaybackDeviceNameValue(playbackIdWithFallback()),
st::settingsButtonNoIcon
)->addClickHandler([=] {
_controller->show(ChoosePlaybackDeviceBox(
playbackIdWithFallback(),
crl::guard(this, [=](const QString &id) {
settings->setPlaybackDeviceId(id);
Core::App().saveSettingsDelayed();
})));
});
rpl::deferred([=] {
return DeviceIdOrDefault(settings->playbackDeviceIdValue());
}),
[=](const QString &id) { settings->setPlaybackDeviceId(id); });
Ui::AddSkip(content);
Ui::AddDivider(content);
Ui::AddSkip(content);
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_input());
const auto captureIdWithFallback = [=] {
return DeviceIdOrDefault(settings->captureDeviceIdValue());
};
AddButtonWithLabel(
initCaptureButton(
content,
tr::lng_settings_call_input_device(),
CaptureDeviceNameValue(captureIdWithFallback()),
st::settingsButtonNoIcon
)->addClickHandler([=] {
_controller->show(ChooseCaptureDeviceBox(
captureIdWithFallback(),
crl::guard(this, [=](const QString &id) {
settings->setCaptureDeviceId(id);
Core::App().saveSettingsDelayed();
if (_micTester) {
_micTester->setDeviceId(id);
}
})));
});
_micTestLevel = content->add(
object_ptr<Ui::LevelMeter>(
content,
st::defaultLevelMeter),
st::settingsLevelMeterPadding);
_micTestLevel->resize(QSize(0, st::defaultLevelMeter.height));
_levelUpdateTimer.setCallback([=] {
const auto was = _micLevel;
_micLevel = _micTester->getAndResetLevel();
_micLevelAnimation.start([=] {
_micTestLevel->setValue(_micLevelAnimation.value(_micLevel));
}, was, _micLevel, kMicTestAnimationDuration);
});
rpl::deferred([=] {
return DeviceIdOrDefault(settings->captureDeviceIdValue());
}),
[=](const QString &id) { settings->setCaptureDeviceId(id); });
Ui::AddSkip(content);
Ui::AddDivider(content);
@ -329,50 +257,30 @@ void Calls::setupContent() {
content,
object_ptr<Ui::VerticalLayout>(content)));
const auto calls = different->entity();
const auto callPlaybackIdWithFallback = [=] {
return DeviceIdValueWithFallback(
settings->callPlaybackDeviceIdValue(),
settings->playbackDeviceIdValue());
};
AddButtonWithLabel(
initPlaybackButton(
calls,
tr::lng_group_call_speakers(),
PlaybackDeviceNameValue(callPlaybackIdWithFallback()),
st::settingsButtonNoIcon
)->addClickHandler([=] {
_controller->show(ChoosePlaybackDeviceBox(
callPlaybackIdWithFallback(),
crl::guard(this, [=](const QString &id) {
settings->setCallPlaybackDeviceId(orDefault(id));
Core::App().saveSettingsDelayed();
})));
});
const auto callCaptureIdWithFallback = [=] {
return DeviceIdValueWithFallback(
settings->callCaptureDeviceIdValue(),
settings->captureDeviceIdValue());
};
AddButtonWithLabel(
rpl::deferred([=] {
return DeviceIdValueWithFallback(
settings->callPlaybackDeviceIdValue(),
settings->playbackDeviceIdValue());
}),
[=](const QString &id) { settings->setCallPlaybackDeviceId(id); });
initCaptureButton(
calls,
tr::lng_group_call_microphone(),
CaptureDeviceNameValue(callCaptureIdWithFallback()),
st::settingsButtonNoIcon
)->addClickHandler([=] {
_controller->show(ChooseCaptureDeviceBox(
callCaptureIdWithFallback(),
crl::guard(this, [=](const QString &id) {
settings->setCallCaptureDeviceId(orDefault(id));
Core::App().saveSettingsDelayed();
//if (_micTester) {
// _micTester->setDeviceId(id);
//}
})));
});
rpl::deferred([=] {
return DeviceIdValueWithFallback(
settings->callCaptureDeviceIdValue(),
settings->captureDeviceIdValue());
}),
[=](const QString &id) { settings->setCallCaptureDeviceId(id); });
different->toggleOn(same->toggledValue() | rpl::map(!rpl::mappers::_1));
Ui::AddSkip(content);
Ui::AddDivider(content);
if (!GetVideoInputList().empty()) {
if (!Core::App().mediaDevices().defaultId(
Webrtc::DeviceType::Camera).isEmpty()) {
Ui::AddSkip(content);
Ui::AddSubsectionTitle(content, tr::lng_settings_call_camera());
AddCameraSubsection(_controller->uiShow(), content, true);
@ -416,18 +324,98 @@ void Calls::setupContent() {
Ui::ResizeFitChild(this, content);
}
void Calls::initPlaybackButton(
not_null<Ui::VerticalLayout*> container,
rpl::producer<QString> text,
rpl::producer<QString> resolvedId,
Fn<void(QString)> set) {
AddButtonWithLabel(
container,
tr::lng_settings_call_output_device(),
PlaybackDeviceNameValue(rpl::duplicate(resolvedId)),
st::settingsButtonNoIcon
)->addClickHandler([=] {
_controller->show(ChoosePlaybackDeviceBox(
rpl::duplicate(resolvedId),
[=](const QString &id) {
set(id);
Core::App().saveSettingsDelayed();
}));
});
}
void Calls::initCaptureButton(
not_null<Ui::VerticalLayout*> container,
rpl::producer<QString> text,
rpl::producer<QString> resolvedId,
Fn<void(QString)> set) {
AddButtonWithLabel(
container,
tr::lng_settings_call_input_device(),
CaptureDeviceNameValue(rpl::duplicate(resolvedId)),
st::settingsButtonNoIcon
)->addClickHandler([=] {
_controller->show(ChooseCaptureDeviceBox(
rpl::duplicate(resolvedId),
[=](const QString &id) {
set(id);
Core::App().saveSettingsDelayed();
}));
});
struct LevelState {
std::unique_ptr<Webrtc::DeviceId> computedDeviceId;
std::unique_ptr<Webrtc::AudioInputTester> tester;
base::Timer timer;
Ui::Animations::Simple animation;
float level = 0.;
};
const auto level = container->add(
object_ptr<Ui::LevelMeter>(
container,
st::defaultLevelMeter),
st::settingsLevelMeterPadding);
const auto state = level->lifetime().make_state<LevelState>();
level->resize(QSize(0, st::defaultLevelMeter.height));
state->timer.setCallback([=] {
const auto was = state->level;
state->level = state->tester->getAndResetLevel();
state->animation.start([=] {
level->setValue(state->animation.value(state->level));
}, was, state->level, kMicTestAnimationDuration);
});
_testingMicrophone.value() | rpl::start_with_next([=](bool testing) {
if (testing) {
state->computedDeviceId = std::make_unique<Webrtc::DeviceId>(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Capture,
rpl::duplicate(resolvedId));
state->tester = std::make_unique<AudioInputTester>(
state->computedDeviceId->value());
state->timer.callEach(kMicTestUpdateInterval);
} else {
state->timer.cancel();
state->animation.stop();
state->tester = nullptr;
state->computedDeviceId = nullptr;
}
}, level->lifetime());
}
void Calls::requestPermissionAndStartTestingMicrophone() {
using namespace ::Platform;
const auto status = GetPermissionStatus(
PermissionType::Microphone);
if (status == PermissionStatus::Granted) {
startTestingMicrophone();
_testingMicrophone = true;
} else if (status == PermissionStatus::CanRequest) {
const auto startTestingChecked = crl::guard(this, [=](
PermissionStatus status) {
if (status == PermissionStatus::Granted) {
crl::on_main(crl::guard(this, [=] {
startTestingMicrophone();
_testingMicrophone = true;
}));
}
});
@ -448,13 +436,6 @@ void Calls::requestPermissionAndStartTestingMicrophone() {
}
}
void Calls::startTestingMicrophone() {
_levelUpdateTimer.callEach(kMicTestUpdateInterval);
_micTester = std::make_unique<AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callCaptureDeviceId());
}
rpl::producer<QString> PlaybackDeviceNameValue(rpl::producer<QString> id) {
return DeviceNameValue(DeviceType::Playback, std::move(id));
}
@ -463,7 +444,12 @@ rpl::producer<QString> CaptureDeviceNameValue(rpl::producer<QString> id) {
return DeviceNameValue(DeviceType::Capture, std::move(id));
}
void ChooseAudioDeviceBox(
rpl::producer<QString> CameraDeviceNameValue(
rpl::producer<QString> id) {
return DeviceNameValue(DeviceType::Camera, std::move(id));
}
void ChooseMediaDeviceBox(
not_null<Ui::GenericBox*> box,
rpl::producer<QString> title,
rpl::producer<std::vector<DeviceInfo>> devicesValue,
@ -495,6 +481,14 @@ void ChooseAudioDeviceBox(
const auto state = box->lifetime().make_state<State>();
state->currentId = std::move(currentId);
const auto choose = [=](const QString &id) {
const auto weak = Ui::MakeWeak(box);
chosen(id);
if (weak) {
box->closeBox();
}
};
const auto group = std::make_shared<Ui::RadiobuttonGroup>();
const auto fake = std::make_shared<Ui::RadiobuttonGroup>(0);
const auto buttons = layout->add(object_ptr<Ui::VerticalLayout>(layout));
@ -513,6 +507,12 @@ void ChooseAudioDeviceBox(
*st,
*radioSt),
margins);
def->clicks(
) | rpl::filter([=] {
return !group->value();
}) | rpl::start_with_next([=] {
choose(kDefaultDeviceId);
}, def->lifetime());
const auto showUnavailable = [=](QString text) {
AddSkip(other);
AddSubsectionTitle(other, tr::lng_settings_devices_inactive());
@ -572,14 +572,6 @@ void ChooseAudioDeviceBox(
}
};
const auto choose = [=](const QString &id) {
const auto weak = Ui::MakeWeak(box);
chosen(id);
if (weak) {
box->closeBox();
}
};
std::move(
devicesValue
) | rpl::start_with_next([=](std::vector<DeviceInfo> &&list) {
@ -615,7 +607,7 @@ void ChooseAudioDeviceBox(
button->finishAnimating();
button->clicks(
) | rpl::filter([=] {
return (current == id);
return (group->value() == index);
}) | rpl::start_with_next([=] {
choose(id);
}, button->lifetime());
@ -656,7 +648,7 @@ object_ptr<Ui::GenericBox> ChoosePlaybackDeviceBox(
const style::Checkbox *st,
const style::Radio *radioSt) {
return Box(
ChooseAudioDeviceBox,
ChooseMediaDeviceBox,
tr::lng_settings_call_output_device(),
Core::App().mediaDevices().devicesValue(DeviceType::Playback),
std::move(currentId),
@ -671,7 +663,7 @@ object_ptr<Ui::GenericBox> ChooseCaptureDeviceBox(
const style::Checkbox *st,
const style::Radio *radioSt) {
return Box(
ChooseAudioDeviceBox,
ChooseMediaDeviceBox,
tr::lng_settings_call_input_device(),
Core::App().mediaDevices().devicesValue(DeviceType::Capture),
std::move(currentId),
@ -680,5 +672,20 @@ object_ptr<Ui::GenericBox> ChooseCaptureDeviceBox(
radioSt);
}
object_ptr<Ui::GenericBox> ChooseCameraDeviceBox(
rpl::producer<QString> currentId,
Fn<void(QString id)> chosen,
const style::Checkbox *st,
const style::Radio *radioSt) {
return Box(
ChooseMediaDeviceBox,
tr::lng_settings_call_device_default(),
Core::App().mediaDevices().devicesValue(DeviceType::Camera),
std::move(currentId),
std::move(chosen),
st,
radioSt);
}
} // namespace Settings

View file

@ -50,15 +50,21 @@ public:
private:
void setupContent();
void requestPermissionAndStartTestingMicrophone();
void startTestingMicrophone();
void initPlaybackButton(
not_null<Ui::VerticalLayout*> container,
rpl::producer<QString> text,
rpl::producer<QString> resolvedId,
Fn<void(QString)> set);
void initCaptureButton(
not_null<Ui::VerticalLayout*> container,
rpl::producer<QString> text,
rpl::producer<QString> resolvedId,
Fn<void(QString)> set);
const not_null<Window::SessionController*> _controller;
rpl::event_stream<QString> _cameraNameStream;
std::unique_ptr<Webrtc::AudioInputTester> _micTester;
Ui::LevelMeter *_micTestLevel = nullptr;
float _micLevel = 0.;
Ui::Animations::Simple _micLevelAnimation;
base::Timer _levelUpdateTimer;
rpl::variable<bool> _testingMicrophone;
};
@ -69,6 +75,8 @@ inline constexpr auto kMicTestAnimationDuration = crl::time(200);
rpl::producer<QString> id);
[[nodiscard]] rpl::producer<QString> CaptureDeviceNameValue(
rpl::producer<QString> id);
[[nodiscard]] rpl::producer<QString> CameraDeviceNameValue(
rpl::producer<QString> id);
[[nodiscard]] object_ptr<Ui::GenericBox> ChoosePlaybackDeviceBox(
rpl::producer<QString> currentId,
Fn<void(QString id)> chosen,
@ -79,6 +87,11 @@ inline constexpr auto kMicTestAnimationDuration = crl::time(200);
Fn<void(QString id)> chosen,
const style::Checkbox *st = nullptr,
const style::Radio *radioSt = nullptr);
[[nodiscard]] object_ptr<Ui::GenericBox> ChooseCameraDeviceBox(
rpl::producer<QString> currentId,
Fn<void(QString id)> chosen,
const style::Checkbox *st = nullptr,
const style::Radio *radioSt = nullptr);
} // namespace Settings

View file

@ -34,7 +34,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "media/streaming/media_streaming_player.h"
#include "media/streaming/media_streaming_document.h"
#include "settings/settings_calls.h" // Calls::AddCameraSubsection.
#include "webrtc/webrtc_media_devices.h" // Webrtc::GetVideoInputList.
#include "webrtc/webrtc_environment.h"
#include "webrtc/webrtc_video_track.h"
#include "ui/widgets/popup_menu.h"
#include "window/window_controller.h"
@ -53,7 +53,8 @@ namespace {
[[nodiscard]] bool IsCameraAvailable() {
return (Core::App().calls().currentCall() == nullptr)
&& !Webrtc::GetVideoInputList().empty();
&& !Core::App().mediaDevices().defaultId(
Webrtc::DeviceType::Camera).isEmpty();
}
void CameraBox(

@ -1 +1 @@
Subproject commit b78e51ad98cd5bf70e916becae0b13496b9f6aca
Subproject commit 222ecc82441dd9d80cbd642bb9d89a59caa12944