Implement audio speed using libavfilter.

This commit is contained in:
John Preston 2023-03-08 13:31:58 +04:00
parent 0880a83c2c
commit 9c74c04738
17 changed files with 871 additions and 669 deletions

View file

@ -321,6 +321,12 @@ FramePointer MakeFramePointer() {
return FramePointer(av_frame_alloc());
}
FramePointer DuplicateFramePointer(AVFrame *frame) {
return frame
? FramePointer(av_frame_clone(frame))
: FramePointer();
}
bool FrameHasData(AVFrame *frame) {
return (frame && frame->data[0] != nullptr);
}

View file

@ -141,6 +141,7 @@ struct FrameDeleter {
};
using FramePointer = std::unique_ptr<AVFrame, FrameDeleter>;
[[nodiscard]] FramePointer MakeFramePointer();
[[nodiscard]] FramePointer DuplicateFramePointer(AVFrame *frame);
[[nodiscard]] bool FrameHasData(AVFrame *frame);
void ClearFrameMemory(AVFrame *frame);

View file

@ -45,15 +45,6 @@ ALCcontext *AudioContext = nullptr;
auto VolumeMultiplierAll = 1.;
auto VolumeMultiplierSong = 1.;
// Value for AL_PITCH_SHIFTER_COARSE_TUNE effect, 0.5 <= speed <= 2.
int CoarseTuneForSpeed(float64 speed) {
Expects(speed >= 0.5 && speed <= 2.);
constexpr auto kTuneSteps = 12;
const auto tuneRatio = std::log(speed) / std::log(2.);
return -int(base::SafeRound(kTuneSteps * tuneRatio));
}
} // namespace
namespace Media {
@ -205,7 +196,6 @@ void Start(not_null<Instance*> instance) {
auto loglevel = getenv("ALSOFT_LOGLEVEL");
LOG(("OpenAL Logging Level: %1").arg(loglevel ? loglevel : "(not set)"));
OpenAL::LoadEFXExtension();
EnumeratePlaybackDevices();
EnumerateCaptureDevices();
@ -244,9 +234,9 @@ bool AttachToDevice() {
return false;
}
if (auto m = Player::mixer()) {
if (const auto m = Player::mixer()) {
m->reattachTracks();
m->faderOnTimer();
m->scheduleFaderCallback();
}
crl::on_main([] {
@ -282,16 +272,7 @@ void StopDetachIfNotUsedSafe() {
}
bool SupportsSpeedControl() {
return OpenAL::HasEFXExtension()
&& (alGetEnumValue("AL_AUXILIARY_SEND_FILTER") != 0)
&& (alGetEnumValue("AL_DIRECT_FILTER") != 0)
&& (alGetEnumValue("AL_EFFECT_TYPE") != 0)
&& (alGetEnumValue("AL_EFFECT_PITCH_SHIFTER") != 0)
&& (alGetEnumValue("AL_FILTER_TYPE") != 0)
&& (alGetEnumValue("AL_FILTER_LOWPASS") != 0)
&& (alGetEnumValue("AL_LOWPASS_GAIN") != 0)
&& (alGetEnumValue("AL_PITCH_SHIFTER_COARSE_TUNE") != 0)
&& (alGetEnumValue("AL_EFFECTSLOT_EFFECT") != 0);
return true;
}
} // namespace Audio
@ -300,7 +281,7 @@ namespace Player {
namespace {
constexpr auto kVolumeRound = 10000;
constexpr auto kPreloadSamples = 2LL * kDefaultFrequency; // preload next part if less than 2 seconds remains
constexpr auto kPreloadSeconds = 2LL; // preload next part if less than 2 seconds remains
constexpr auto kFadeDuration = crl::time(500);
constexpr auto kCheckPlaybackPositionTimeout = crl::time(100); // 100ms per check audio position
constexpr auto kCheckPlaybackPositionDelta = 2400LL; // update position called each 2400 samples
@ -348,43 +329,6 @@ void Mixer::Track::createStream(AudioMsgId::Type type) {
alGetEnumValue("AL_REMIX_UNMATCHED_SOFT"));
}
alGenBuffers(3, stream.buffers);
if (speedEffect) {
applySourceSpeedEffect();
} else {
removeSourceSpeedEffect();
}
}
void Mixer::Track::removeSourceSpeedEffect() {
if (!Audio::SupportsSpeedControl()) {
return;
}
alSource3i(stream.source, alGetEnumValue("AL_AUXILIARY_SEND_FILTER"), alGetEnumValue("AL_EFFECTSLOT_NULL"), 0, 0);
alSourcei(stream.source, alGetEnumValue("AL_DIRECT_FILTER"), alGetEnumValue("AL_FILTER_NULL"));
alSourcef(stream.source, AL_PITCH, 1.f);
}
void Mixer::Track::applySourceSpeedEffect() {
if (!Audio::SupportsSpeedControl()) {
return;
}
Expects(speedEffect != nullptr);
if (!speedEffect->effect || !OpenAL::alIsEffect(speedEffect->effect)) {
OpenAL::alGenAuxiliaryEffectSlots(1, &speedEffect->effectSlot);
OpenAL::alGenEffects(1, &speedEffect->effect);
OpenAL::alGenFilters(1, &speedEffect->filter);
OpenAL::alEffecti(speedEffect->effect, alGetEnumValue("AL_EFFECT_TYPE"), alGetEnumValue("AL_EFFECT_PITCH_SHIFTER"));
OpenAL::alFilteri(speedEffect->filter, alGetEnumValue("AL_FILTER_TYPE"), alGetEnumValue("AL_FILTER_LOWPASS"));
OpenAL::alFilterf(speedEffect->filter, alGetEnumValue("AL_LOWPASS_GAIN"), 0.f);
}
OpenAL::alEffecti(speedEffect->effect, alGetEnumValue("AL_PITCH_SHIFTER_COARSE_TUNE"), speedEffect->coarseTune);
OpenAL::alAuxiliaryEffectSloti(speedEffect->effectSlot, alGetEnumValue("AL_EFFECTSLOT_EFFECT"), speedEffect->effect);
alSourcef(stream.source, AL_PITCH, speedEffect->speed);
alSource3i(stream.source, alGetEnumValue("AL_AUXILIARY_SEND_FILTER"), speedEffect->effectSlot, 0, 0);
alSourcei(stream.source, alGetEnumValue("AL_DIRECT_FILTER"), speedEffect->filter);
}
void Mixer::Track::destroyStream() {
@ -396,45 +340,32 @@ void Mixer::Track::destroyStream() {
for (auto i = 0; i != 3; ++i) {
stream.buffers[i] = 0;
}
resetSpeedEffect();
}
void Mixer::Track::resetSpeedEffect() {
if (!Audio::SupportsSpeedControl()) {
return;
}
if (!speedEffect) {
return;
} else if (speedEffect->effect && OpenAL::alIsEffect(speedEffect->effect)) {
if (isStreamCreated()) {
removeSourceSpeedEffect();
}
if (Player::mixer()) {
// Don't destroy effect slot immediately.
// See https://github.com/kcat/openal-soft/issues/486
Player::mixer()->scheduleEffectDestruction(*speedEffect);
}
}
speedEffect->effect = speedEffect->effectSlot = speedEffect->filter = 0;
}
void Mixer::Track::reattach(AudioMsgId::Type type) {
if (isStreamCreated()
|| (!samplesCount[0] && !state.id.externalPlayId())) {
|| (!withSpeed.samples[0] && !state.id.externalPlayId())) {
return;
}
createStream(type);
for (auto i = 0; i != kBuffersCount; ++i) {
if (!samplesCount[i]) {
if (!withSpeed.samples[i]) {
break;
}
alBufferData(stream.buffers[i], format, bufferSamples[i].constData(), bufferSamples[i].size(), frequency);
alBufferData(
stream.buffers[i],
format,
withSpeed.buffered[i].constData(),
withSpeed.buffered[i].size(),
state.frequency);
alSourceQueueBuffers(stream.source, 1, stream.buffers + i);
}
alSourcei(stream.source, AL_SAMPLE_OFFSET, qMax(state.position - bufferedPosition, 0LL));
alSourcei(
stream.source,
AL_SAMPLE_OFFSET,
qMax(withSpeed.position - withSpeed.bufferedPosition, 0LL));
if (!IsStopped(state.state)
&& (state.state != State::PausedAtEnd)
&& !state.waitingForData) {
@ -461,18 +392,12 @@ void Mixer::Track::clear() {
state = TrackState();
file = Core::FileLocation();
data = QByteArray();
bufferedPosition = 0;
bufferedLength = 0;
format = 0;
loading = false;
loaded = false;
fadeStartPosition = 0;
format = 0;
frequency = kDefaultFrequency;
for (int i = 0; i != kBuffersCount; ++i) {
samplesCount[i] = 0;
bufferSamples[i] = QByteArray();
}
waitingForBuffer = false;
withSpeed = WithSpeed();
speed = 1.;
setExternalData(nullptr);
lastUpdateWhen = 0;
@ -482,17 +407,9 @@ void Mixer::Track::clear() {
void Mixer::Track::started() {
resetStream();
bufferedPosition = 0;
bufferedLength = 0;
loaded = false;
fadeStartPosition = 0;
format = 0;
frequency = kDefaultFrequency;
for (auto i = 0; i != kBuffersCount; ++i) {
samplesCount[i] = 0;
bufferSamples[i] = QByteArray();
}
loaded = false;
withSpeed = WithSpeed();
}
bool Mixer::Track::isStreamCreated() const {
@ -507,7 +424,7 @@ void Mixer::Track::ensureStreamCreated(AudioMsgId::Type type) {
int Mixer::Track::getNotQueuedBufferIndex() {
// See if there are no free buffers right now.
while (samplesCount[kBuffersCount - 1] != 0) {
while (withSpeed.samples[kBuffersCount - 1] != 0) {
// Try to unqueue some buffer.
ALint processed = 0;
alGetSourcei(stream.source, AL_BUFFERS_PROCESSED, &processed);
@ -523,17 +440,17 @@ int Mixer::Track::getNotQueuedBufferIndex() {
bool found = false;
for (auto i = 0; i != kBuffersCount; ++i) {
if (stream.buffers[i] == buffer) {
auto samplesInBuffer = samplesCount[i];
bufferedPosition += samplesInBuffer;
bufferedLength -= samplesInBuffer;
const auto samplesInBuffer = withSpeed.samples[i];
withSpeed.bufferedPosition += samplesInBuffer;
withSpeed.bufferedLength -= samplesInBuffer;
for (auto j = i + 1; j != kBuffersCount; ++j) {
samplesCount[j - 1] = samplesCount[j];
withSpeed.samples[j - 1] = withSpeed.samples[j];
stream.buffers[j - 1] = stream.buffers[j];
bufferSamples[j - 1] = bufferSamples[j];
withSpeed.buffered[j - 1] = withSpeed.buffered[j];
}
samplesCount[kBuffersCount - 1] = 0;
withSpeed.samples[kBuffersCount - 1] = 0;
stream.buffers[kBuffersCount - 1] = buffer;
bufferSamples[kBuffersCount - 1] = QByteArray();
withSpeed.buffered[kBuffersCount - 1] = QByteArray();
found = true;
break;
}
@ -545,7 +462,7 @@ int Mixer::Track::getNotQueuedBufferIndex() {
}
for (auto i = 0; i != kBuffersCount; ++i) {
if (!samplesCount[i]) {
if (!withSpeed.samples[i]) {
return i;
}
}
@ -554,28 +471,33 @@ int Mixer::Track::getNotQueuedBufferIndex() {
void Mixer::Track::setExternalData(
std::unique_ptr<ExternalSoundData> data) {
changeSpeedEffect(data ? data->speed : 1.);
nextSpeed = speed = data ? data->speed : 1.;
externalData = std::move(data);
}
void Mixer::Track::changeSpeedEffect(float64 speed) {
if (!Audio::SupportsSpeedControl()) {
return;
}
void Mixer::Track::updateStatePosition() {
state.position = SpeedIndependentPosition(withSpeed.position, speed);
}
if (speed != 1.) {
if (!speedEffect) {
speedEffect = std::make_unique<SpeedEffect>();
}
speedEffect->speed = speed;
speedEffect->coarseTune = CoarseTuneForSpeed(speed);
if (isStreamCreated()) {
applySourceSpeedEffect();
}
} else if (speedEffect) {
resetSpeedEffect();
speedEffect = nullptr;
}
void Mixer::Track::updateWithSpeedPosition() {
withSpeed.position = SpeedDependentPosition(state.position, speed);
withSpeed.fineTunedPosition = withSpeed.position;
}
int64 Mixer::Track::SpeedIndependentPosition(
int64 position,
float64 speed) {
Expects(speed < 2.5);
return int64(base::SafeRound(position * speed));
}
int64 Mixer::Track::SpeedDependentPosition(
int64 position,
float64 speed) {
Expects(speed >= 0.5);
return int64(base::SafeRound(position / speed));
}
void Mixer::Track::resetStream() {
@ -589,24 +511,26 @@ Mixer::Track::~Track() = default;
Mixer::Mixer(not_null<Audio::Instance*> instance)
: _instance(instance)
, _effectsDestructionTimer([=] { destroyStaleEffectsSafe(); })
, _volumeVideo(kVolumeRound)
, _volumeSong(kVolumeRound)
, _fader(new Fader(&_faderThread))
, _loader(new Loaders(&_loaderThread)) {
connect(this, SIGNAL(faderOnTimer()), _fader, SLOT(onTimer()), Qt::QueuedConnection);
connect(this, SIGNAL(suppressSong()), _fader, SLOT(onSuppressSong()));
connect(this, SIGNAL(unsuppressSong()), _fader, SLOT(onUnsuppressSong()));
connect(this, SIGNAL(suppressAll(qint64)), _fader, SLOT(onSuppressAll(qint64)));
Core::App().settings().songVolumeChanges(
) | rpl::start_with_next([=] {
QMetaObject::invokeMethod(_fader, "onSongVolumeChanged");
InvokeQueued(_fader, [fader = _fader] {
fader->songVolumeChanged();
});
}, _lifetime);
Core::App().settings().videoVolumeChanges(
) | rpl::start_with_next([=] {
QMetaObject::invokeMethod(_fader, "onVideoVolumeChanged");
InvokeQueued(_fader, [fader = _fader] {
fader->videoVolumeChanged();
});
}, _lifetime);
connect(this, SIGNAL(loaderOnStart(const AudioMsgId&, qint64)), _loader, SLOT(onStart(const AudioMsgId&, qint64)));
@ -645,6 +569,12 @@ Mixer::~Mixer() {
_loaderThread.wait();
}
void Mixer::scheduleFaderCallback() {
InvokeQueued(_fader, [fader = _fader] {
fader->onTimer();
});
}
void Mixer::onUpdated(const AudioMsgId &audio) {
if (audio.externalPlayId()) {
externalSoundProgress(audio);
@ -656,60 +586,6 @@ void Mixer::onUpdated(const AudioMsgId &audio) {
});
}
// Thread: Any. Must be locked: AudioMutex.
void Mixer::scheduleEffectDestruction(const SpeedEffect &effect) {
_effectsForDestruction.emplace_back(
crl::now() + kEffectDestructionDelay,
effect);
scheduleEffectsDestruction();
}
// Thread: Any. Must be locked: AudioMutex.
void Mixer::scheduleEffectsDestruction() {
if (_effectsForDestruction.empty()) {
return;
}
InvokeQueued(this, [=] {
if (!_effectsDestructionTimer.isActive()) {
_effectsDestructionTimer.callOnce(kEffectDestructionDelay + 1);
}
});
}
// Thread: Main. Locks: AudioMutex.
void Mixer::destroyStaleEffectsSafe() {
QMutexLocker lock(&AudioMutex);
destroyStaleEffects();
}
// Thread: Main. Must be locked: AudioMutex.
void Mixer::destroyStaleEffects() {
const auto now = crl::now();
const auto checkAndDestroy = [&](
const std::pair<crl::time, SpeedEffect> &pair) {
const auto &[when, effect] = pair;
if (when && when > now) {
return false;
}
OpenAL::alDeleteEffects(1, &effect.effect);
OpenAL::alDeleteAuxiliaryEffectSlots(1, &effect.effectSlot);
OpenAL::alDeleteFilters(1, &effect.filter);
return true;
};
_effectsForDestruction.erase(
ranges::remove_if(_effectsForDestruction, checkAndDestroy),
end(_effectsForDestruction));
scheduleEffectsDestruction();
}
// Thread: Main. Must be locked: AudioMutex.
void Mixer::destroyEffectsOnClose() {
for (auto &[when, effect] : _effectsForDestruction) {
when = 0;
}
destroyStaleEffects();
}
void Mixer::onError(const AudioMsgId &audio) {
stoppedOnError(audio);
@ -789,7 +665,7 @@ void Mixer::resetFadeStartPosition(AudioMsgId::Type type, int positionInBuffered
} else if ((alState == AL_STOPPED)
&& (alSampleOffset == 0)
&& !internal::CheckAudioDeviceConnected()) {
track->fadeStartPosition = track->state.position;
track->withSpeed.fadeStartPosition = track->withSpeed.position;
return;
}
@ -798,17 +674,19 @@ void Mixer::resetFadeStartPosition(AudioMsgId::Type type, int positionInBuffered
&& (!IsStopped(track->state.state)
|| IsStoppedAtEnd(track->state.state)));
positionInBuffered = stoppedAtEnd
? track->bufferedLength
? track->withSpeed.bufferedLength
: alSampleOffset;
} else {
positionInBuffered = 0;
}
}
auto fullPosition = track->samplesCount[0]
? (track->bufferedPosition + positionInBuffered)
: track->state.position;
track->state.position = fullPosition;
track->fadeStartPosition = fullPosition;
const auto withSpeedPosition = track->withSpeed.samples[0]
? (track->withSpeed.bufferedPosition + positionInBuffered)
: track->withSpeed.position;
track->withSpeed.fineTunedPosition = withSpeedPosition;
track->withSpeed.position = withSpeedPosition;
track->withSpeed.fadeStartPosition = withSpeedPosition;
track->updateStatePosition();
}
bool Mixer::fadedStop(AudioMsgId::Type type, bool *fadedStart) {
@ -862,7 +740,7 @@ void Mixer::play(
}
if (current->state.id) {
loaderOnCancel(current->state.id);
faderOnTimer();
scheduleFaderCallback();
}
if (type != AudioMsgId::Type::Video) {
auto foundCurrent = currentIndex(type);
@ -887,6 +765,7 @@ void Mixer::play(
current->setExternalData(std::move(externalData));
current->state.position = (positionMs * current->state.frequency)
/ 1000LL;
current->updateWithSpeedPosition();
current->state.state = current->externalData
? State::Paused
: fadedStart
@ -916,7 +795,15 @@ void Mixer::setSpeedFromExternal(const AudioMsgId &audioId, float64 speed) {
QMutexLocker lock(&AudioMutex);
const auto track = trackForType(audioId.type());
if (track->state.id == audioId) {
track->changeSpeedEffect(speed);
track->nextSpeed = speed;
if (track->speed != track->nextSpeed
&& !IsStoppedOrStopping(track->state.state)) {
track->loading = true;
track->loaded = false;
InvokeQueued(_loader, [loader = _loader, id = audioId] {
loader->onLoad(id);
});
}
}
}
@ -932,6 +819,10 @@ Streaming::TimePoint Mixer::getExternalSyncTimePoint(
if (track && track->state.id == audio && track->lastUpdateWhen > 0) {
result.trackTime = track->lastUpdatePosition;
result.worldTime = track->lastUpdateWhen;
LOG(("Sync: Track Time %1, World Time: %2, Speed: %3"
).arg(result.trackTime / 1000.
).arg(result.worldTime / 1000.
).arg(track->speed));
}
return result;
}
@ -957,9 +848,11 @@ void Mixer::externalSoundProgress(const AudioMsgId &audio) {
QMutexLocker lock(&AudioMutex);
const auto current = trackForType(type);
if (current && current->state.length && current->state.frequency) {
if (current->state.id == audio && current->state.state == State::Playing) {
if (current->state.id == audio
&& current->state.state == State::Playing) {
current->lastUpdateWhen = crl::now();
current->lastUpdatePosition = (current->state.position * 1000ULL) / current->state.frequency;
current->lastUpdatePosition = (current->state.position * 1000LL)
/ current->state.frequency;
}
}
}
@ -1014,7 +907,7 @@ void Mixer::pause(const AudioMsgId &audio, bool fast) {
}
}
faderOnTimer();
scheduleFaderCallback();
track->lastUpdateWhen = 0;
track->lastUpdatePosition = 0;
@ -1061,7 +954,10 @@ void Mixer::resume(const AudioMsgId &audio, bool fast) {
if (!checkCurrentALError(type)) return;
if (state == AL_STOPPED) {
alSourcei(track->stream.source, AL_SAMPLE_OFFSET, qMax(track->state.position - track->bufferedPosition, 0LL));
alSourcei(
track->stream.source,
AL_SAMPLE_OFFSET,
qMax(track->withSpeed.position - track->withSpeed.bufferedPosition, 0LL));
if (!checkCurrentALError(type)) return;
}
alSourcePlay(track->stream.source);
@ -1073,78 +969,10 @@ void Mixer::resume(const AudioMsgId &audio, bool fast) {
}
} break;
}
faderOnTimer();
scheduleFaderCallback();
}
if (current) updated(current);
}
//
// Right now all the music is played in the streaming player.
//
//void Mixer::seek(AudioMsgId::Type type, crl::time positionMs) {
// QMutexLocker lock(&AudioMutex);
//
// const auto current = trackForType(type);
// const auto audio = current->state.id;
//
// Audio::AttachToDevice();
// const auto streamCreated = current->isStreamCreated();
// const auto position = (positionMs * current->frequency) / 1000LL;
// const auto fastSeek = [&] {
// const auto loadedStart = current->bufferedPosition;
// const auto loadedLength = current->bufferedLength;
// const auto skipBack = (current->loaded ? 0 : kDefaultFrequency);
// const auto availableEnd = loadedStart + loadedLength - skipBack;
// if (position < loadedStart) {
// return false;
// } else if (position >= availableEnd) {
// return false;
// } else if (!streamCreated) {
// return false;
// } else if (IsStoppedOrStopping(current->state.state)) {
// return false;
// }
// return true;
// }();
// if (fastSeek) {
// alSourcei(current->stream.source, AL_SAMPLE_OFFSET, position - current->bufferedPosition);
// if (!checkCurrentALError(type)) return;
//
// alSourcef(current->stream.source, AL_GAIN, ComputeVolume(type));
// if (!checkCurrentALError(type)) return;
//
// resetFadeStartPosition(type, position - current->bufferedPosition);
// } else {
// setStoppedState(current);
// }
// switch (current->state.state) {
// case State::Pausing:
// case State::Paused:
// case State::PausedAtEnd: {
// if (current->state.state == State::PausedAtEnd) {
// current->state.state = State::Paused;
// }
// lock.unlock();
// return resume(audio, true);
// } break;
// case State::Starting:
// case State::Resuming:
// case State::Playing: {
// current->state.state = State::Pausing;
// resetFadeStartPosition(type);
// if (type == AudioMsgId::Type::Voice) {
// emit unsuppressSong();
// }
// } break;
// case State::Stopping:
// case State::Stopped:
// case State::StoppedAtEnd:
// case State::StoppedAtError:
// case State::StoppedAtStart: {
// lock.unlock();
// } return play(audio, positionMs);
// }
// emit faderOnTimer();
//}
void Mixer::stop(const AudioMsgId &audio) {
AudioMsgId current;
@ -1239,6 +1067,8 @@ TrackState Mixer::currentState(AudioMsgId::Type type) {
void Mixer::setStoppedState(Track *current, State state) {
current->state.state = state;
current->state.position = 0;
current->withSpeed.position = 0;
current->withSpeed.fineTunedPosition = 0;
if (current->isStreamCreated()) {
alSourceStop(current->stream.source);
alSourcef(current->stream.source, AL_GAIN, 1);
@ -1255,8 +1085,6 @@ void Mixer::prepareToCloseDevice() {
trackForType(AudioMsgId::Type::Song, i)->detach();
}
_videoTrack.detach();
destroyEffectsOnClose();
}
// Thread: Main. Must be locked: AudioMutex.
@ -1427,12 +1255,13 @@ int32 Fader::updateOnePlayback(Mixer::Track *track, bool &hasPlaying, bool &hasF
&& (!IsStopped(track->state.state)
|| IsStoppedAtEnd(track->state.state)));
const auto positionInBuffered = stoppedAtEnd
? track->bufferedLength
? track->withSpeed.bufferedLength
: alSampleOffset;
const auto waitingForDataOld = track->state.waitingForData;
track->state.waitingForData = stoppedAtEnd
&& (track->state.state != State::Stopping);
const auto fullPosition = track->bufferedPosition + positionInBuffered;
const auto withSpeedPosition = track->withSpeed.bufferedPosition
+ positionInBuffered;
auto playing = (track->state.state == State::Playing);
auto fading = IsFading(track->state.state);
@ -1451,7 +1280,8 @@ int32 Fader::updateOnePlayback(Mixer::Track *track, bool &hasPlaying, bool &hasF
emitSignals |= EmitStopped;
}
} else if (fading && alState == AL_PLAYING) {
auto fadingForSamplesCount = (fullPosition - track->fadeStartPosition);
const auto fadingForSamplesCount = withSpeedPosition
- track->withSpeed.fadeStartPosition;
if (crl::time(1000) * fadingForSamplesCount >= kFadeDuration * track->state.frequency) {
fading = false;
alSourcef(track->stream.source, AL_GAIN, 1. * volumeMultiplier);
@ -1488,20 +1318,22 @@ int32 Fader::updateOnePlayback(Mixer::Track *track, bool &hasPlaying, bool &hasF
if (errorHappened()) return EmitError;
}
}
if (alState == AL_PLAYING && fullPosition >= track->state.position + kCheckPlaybackPositionDelta) {
track->state.position = fullPosition;
track->withSpeed.fineTunedPosition = withSpeedPosition;
if (alState == AL_PLAYING && withSpeedPosition >= track->withSpeed.position + kCheckPlaybackPositionDelta) {
track->withSpeed.position = withSpeedPosition;
track->updateStatePosition();
emitSignals |= EmitPositionUpdated;
} else if (track->state.waitingForData && !waitingForDataOld) {
if (fullPosition > track->state.position) {
track->state.position = fullPosition;
if (withSpeedPosition > track->withSpeed.position) {
track->withSpeed.position = withSpeedPosition;
}
// When stopped because of insufficient data while streaming,
// inform the player about the last position we were at.
emitSignals |= EmitPositionUpdated;
}
if (playing || track->state.state == State::Starting || track->state.state == State::Resuming) {
if (!track->loaded && !track->loading) {
auto needPreload = (track->state.position + kPreloadSamples > track->bufferedPosition + track->bufferedLength);
if ((!track->loaded && !track->loading) || track->waitingForBuffer) {
auto needPreload = (track->withSpeed.position + kPreloadSeconds * track->state.frequency > track->withSpeed.bufferedPosition + track->withSpeed.bufferedLength);
if (needPreload) {
track->loading = true;
emitSignals |= EmitNeedToPreload;
@ -1549,12 +1381,12 @@ void Fader::onSuppressAll(qint64 duration) {
onTimer();
}
void Fader::onSongVolumeChanged() {
void Fader::songVolumeChanged() {
_volumeChangedSong = true;
onTimer();
}
void Fader::onVideoVolumeChanged() {
void Fader::videoVolumeChanged() {
_volumeChangedVideo = true;
onTimer();
}
@ -1615,13 +1447,11 @@ public:
: AbstractFFMpegLoader(file, data, bytes::vector()) {
}
bool open(crl::time positionMs) override {
if (!AbstractFFMpegLoader::open(positionMs)) {
bool open(crl::time positionMs, float64 speed = 1.) override {
if (!AbstractFFMpegLoader::open(positionMs, speed)) {
return false;
}
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
for (int32 i = 0, l = fmtContext->nb_streams; i < l; ++i) {
const auto stream = fmtContext->streams[i];
if (stream->disposition & AV_DISPOSITION_ATTACHED_PIC) {
@ -1644,11 +1474,10 @@ public:
}
}
} else if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
DEBUG_LOG(("Audio Read Error: Found video stream in file '%1', data size '%2', error %3, %4")
DEBUG_LOG(("Audio Read Error: Found video stream in file '%1', data size '%2', stream %3.")
.arg(_file.name())
.arg(_data.size())
.arg(i)
.arg(av_make_error_string(err, sizeof(err), streamId)));
.arg(i));
return false;
}
}
@ -1729,8 +1558,8 @@ Ui::PreparedFileInformation PrepareForSending(
auto result = Ui::PreparedFileInformation::Song();
FFMpegAttributesReader reader(Core::FileLocation(fname), data);
const auto positionMs = crl::time(0);
if (reader.open(positionMs) && reader.samplesCount() > 0) {
result.duration = reader.samplesCount() / reader.samplesFrequency();
if (reader.open(positionMs) && reader.duration() > 0) {
result.duration = reader.duration() / 1000;
result.title = reader.title();
result.performer = reader.performer();
result.cover = reader.cover();
@ -1745,17 +1574,18 @@ public:
FFMpegWaveformCounter(const Core::FileLocation &file, const QByteArray &data) : FFMpegLoader(file, data, bytes::vector()) {
}
bool open(crl::time positionMs) override {
if (!FFMpegLoader::open(positionMs)) {
bool open(crl::time positionMs, float64 speed = 1.) override {
if (!FFMpegLoader::open(positionMs, speed)) {
return false;
}
QByteArray buffer;
buffer.reserve(kWaveformCounterBufferSize);
int64 countbytes = sampleSize() * samplesCount();
const auto samplesCount = samplesFrequency() * duration() / 1000;
int64 countbytes = sampleSize() * samplesCount;
int64 processed = 0;
int64 sumbytes = 0;
if (samplesCount() < Media::Player::kWaveformSamplesCount) {
if (samplesCount < Media::Player::kWaveformSamplesCount) {
return false;
}
@ -1775,16 +1605,16 @@ public:
};
while (processed < countbytes) {
const auto result = readMore();
const auto sampleBytes = v::is<bytes::const_span>(result)
? v::get<bytes::const_span>(result)
: bytes::const_span();
if (result == ReadError::Other
Assert(result != ReadError::Wait); // Not a child loader.
if (result == ReadError::Retry) {
continue;
} else if (result == ReadError::Other
|| result == ReadError::EndOfFile) {
break;
} else if (sampleBytes.empty()) {
continue;
}
Assert(v::is<bytes::const_span>(result));
const auto sampleBytes = v::get<bytes::const_span>(result);
Assert(!sampleBytes.empty());
if (fmt == AL_FORMAT_MONO8 || fmt == AL_FORMAT_STEREO8) {
Media::Audio::IterateSamples<uchar>(sampleBytes, callback);
} else if (fmt == AL_FORMAT_MONO16 || fmt == AL_FORMAT_STEREO16) {

View file

@ -35,6 +35,9 @@ namespace Audio {
class Instance;
inline constexpr auto kSpeedMin = 0.5;
inline constexpr auto kSpeedMax = 2.5;
// Thread: Main.
void Start(not_null<Instance*> instance);
void Finish(not_null<Instance*> instance);
@ -180,6 +183,8 @@ public:
void setVideoVolume(float64 volume);
float64 getVideoVolume() const;
void scheduleFaderCallback();
~Mixer();
private Q_SLOTS:
@ -194,21 +199,11 @@ Q_SIGNALS:
void loaderOnStart(const AudioMsgId &audio, qint64 positionMs);
void loaderOnCancel(const AudioMsgId &audio);
void faderOnTimer();
void suppressSong();
void unsuppressSong();
void suppressAll(qint64 duration);
private:
struct SpeedEffect {
uint32 effect = 0;
uint32 effectSlot = 0;
uint32 filter = 0;
int coarseTune = 0;
float64 speed = 1.;
};
class Track {
public:
static constexpr int kBuffersCount = 3;
@ -229,7 +224,16 @@ private:
// Thread: Main. Must be locked: AudioMutex.
void setExternalData(std::unique_ptr<ExternalSoundData> data);
void changeSpeedEffect(float64 speed);
void updateStatePosition();
void updateWithSpeedPosition();
[[nodiscard]] static int64 SpeedIndependentPosition(
int64 position,
float64 speed);
[[nodiscard]] static int64 SpeedDependentPosition(
int64 position,
float64 speed);
~Track();
@ -237,25 +241,35 @@ private:
Core::FileLocation file;
QByteArray data;
int64 bufferedPosition = 0;
int64 bufferedLength = 0;
int format = 0;
bool loading = false;
bool loaded = false;
int64 fadeStartPosition = 0;
bool waitingForBuffer = false;
int32 format = 0;
int32 frequency = kDefaultFrequency;
int samplesCount[kBuffersCount] = { 0 };
QByteArray bufferSamples[kBuffersCount];
// Speed dependent values.
float64 speed = 1.;
float64 nextSpeed = 1.;
struct WithSpeed {
int64 fineTunedPosition = 0;
int64 position = 0;
int64 length = 0;
int64 bufferedPosition = 0;
int64 bufferedLength = 0;
int64 fadeStartPosition = 0;
int samples[kBuffersCount] = { 0 };
QByteArray buffered[kBuffersCount];
};
WithSpeed withSpeed;
struct Stream {
uint32 source = 0;
uint32 buffers[kBuffersCount] = { 0 };
};
Stream stream;
std::unique_ptr<ExternalSoundData> externalData;
std::unique_ptr<SpeedEffect> speedEffect;
crl::time lastUpdateWhen = 0;
crl::time lastUpdatePosition = 0;
@ -263,9 +277,6 @@ private:
void createStream(AudioMsgId::Type type);
void destroyStream();
void resetStream();
void resetSpeedEffect();
void applySourceSpeedEffect();
void removeSourceSpeedEffect();
};
@ -283,17 +294,6 @@ private:
int *currentIndex(AudioMsgId::Type type);
const int *currentIndex(AudioMsgId::Type type) const;
// Thread: Any. Must be locked: AudioMutex.
void scheduleEffectDestruction(const SpeedEffect &effect);
void scheduleEffectsDestruction();
// Thread: Main. Must be locked: AudioMutex.
void destroyStaleEffects();
void destroyEffectsOnClose();
// Thread: Main. Locks: AudioMutex.
void destroyStaleEffectsSafe();
const not_null<Audio::Instance*> _instance;
int _audioCurrent = 0;
@ -304,9 +304,6 @@ private:
Track _videoTrack;
std::vector<std::pair<crl::time, SpeedEffect>> _effectsForDestruction;
base::Timer _effectsDestructionTimer;
QAtomicInt _volumeVideo;
QAtomicInt _volumeSong;
@ -329,6 +326,9 @@ class Fader : public QObject {
public:
Fader(QThread *thread);
void songVolumeChanged();
void videoVolumeChanged();
Q_SIGNALS:
void error(const AudioMsgId &audio);
void playPositionUpdated(const AudioMsgId &audio);
@ -342,8 +342,6 @@ public Q_SLOTS:
void onSuppressSong();
void onUnsuppressSong();
void onSuppressAll(qint64 duration);
void onSongVolumeChanged();
void onVideoVolumeChanged();
private:
enum {

View file

@ -11,7 +11,18 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "ffmpeg/ffmpeg_utility.h"
#include "base/bytes.h"
extern "C" {
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
} // extern "C"
namespace Media {
namespace {
using FFmpeg::AvErrorWrap;
using FFmpeg::LogError;
} // namespace
#if !DA_FFMPEG_NEW_CHANNEL_LAYOUT
uint64_t AbstractFFMpegLoader::ComputeChannelLayout(
@ -30,14 +41,11 @@ int64 AbstractFFMpegLoader::Mul(int64 value, AVRational rational) {
return value * rational.num / rational.den;
}
bool AbstractFFMpegLoader::open(crl::time positionMs) {
bool AbstractFFMpegLoader::open(crl::time positionMs, float64 speed) {
if (!AudioPlayerLoader::openFile()) {
return false;
}
int res = 0;
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
ioBuffer = (uchar *)av_malloc(FFmpeg::kAVBlockSize);
if (!_data.isEmpty()) {
ioContext = avio_alloc_context(ioBuffer, FFmpeg::kAVBlockSize, 0, reinterpret_cast<void *>(this), &AbstractFFMpegLoader::ReadData, 0, &AbstractFFMpegLoader::SeekData);
@ -48,27 +56,26 @@ bool AbstractFFMpegLoader::open(crl::time positionMs) {
}
fmtContext = avformat_alloc_context();
if (!fmtContext) {
DEBUG_LOG(("Audio Read Error: Unable to avformat_alloc_context for file '%1', data size '%2'").arg(_file.name()).arg(_data.size()));
LogError(u"avformat_alloc_context"_q);
return false;
}
fmtContext->pb = ioContext;
if ((res = avformat_open_input(&fmtContext, 0, 0, 0)) < 0) {
ioBuffer = 0;
DEBUG_LOG(("Audio Read Error: Unable to avformat_open_input for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
if (AvErrorWrap error = avformat_open_input(&fmtContext, 0, 0, 0)) {
ioBuffer = nullptr;
LogError(u"avformat_open_input"_q, error);
return false;
}
_opened = true;
if ((res = avformat_find_stream_info(fmtContext, 0)) < 0) {
DEBUG_LOG(("Audio Read Error: Unable to avformat_find_stream_info for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(res).arg(av_make_error_string(err, sizeof(err), res)));
if (AvErrorWrap error = avformat_find_stream_info(fmtContext, 0)) {
LogError(u"avformat_find_stream_info"_q, error);
return false;
}
streamId = av_find_best_stream(fmtContext, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
if (streamId < 0) {
LOG(("Audio Error: Unable to av_find_best_stream for file '%1', data size '%2', error %3, %4").arg(_file.name()).arg(_data.size()).arg(streamId).arg(av_make_error_string(err, sizeof(err), streamId)));
FFmpeg::LogError(u"av_find_best_stream"_q, AvErrorWrap(streamId));
return false;
}
@ -76,15 +83,11 @@ bool AbstractFFMpegLoader::open(crl::time positionMs) {
const auto params = stream->codecpar;
_samplesFrequency = params->sample_rate;
if (stream->duration != AV_NOPTS_VALUE) {
_samplesCount = Mul(
stream->duration * _samplesFrequency,
stream->time_base);
_duration = Mul(stream->duration * 1000, stream->time_base);
} else {
_samplesCount = Mul(
fmtContext->duration * _samplesFrequency,
{ 1, AV_TIME_BASE });
_duration = Mul(fmtContext->duration * 1000, { 1, AV_TIME_BASE });
}
_startedAtSample = (positionMs * _samplesFrequency) / 1000LL;
return true;
}
@ -203,10 +206,42 @@ AbstractAudioFFMpegLoader::AbstractAudioFFMpegLoader(
, _frame(FFmpeg::MakeFramePointer()) {
}
void AbstractAudioFFMpegLoader::dropFramesTill(int64 samples) {
const auto isAfter = [&](const EnqueuedFrame &frame) {
return frame.position > samples;
};
const auto from = begin(_framesQueued);
const auto after = ranges::find_if(_framesQueued, isAfter);
if (from == after) {
return;
}
const auto till = after - 1;
const auto erasing = till - from;
if (erasing > 0) {
if (_framesQueuedIndex >= 0) {
Assert(_framesQueuedIndex >= erasing);
_framesQueuedIndex -= erasing;
}
_framesQueued.erase(from, till);
if (_framesQueued.empty()) {
_framesQueuedIndex = -1;
}
}
}
int64 AbstractAudioFFMpegLoader::startReadingQueuedFrames(float64 newSpeed) {
changeSpeedFilter(newSpeed);
if (_framesQueued.empty()) {
_framesQueuedIndex = -1;
return -1;
}
_framesQueuedIndex = 0;
return _framesQueued.front().position;
}
bool AbstractAudioFFMpegLoader::initUsingContext(
not_null<AVCodecContext*> context,
int64 initialCount,
int initialFrequency) {
float64 speed) {
_swrSrcSampleFormat = context->sample_fmt;
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
const AVChannelLayout mono = AV_CHANNEL_LAYOUT_MONO;
@ -275,15 +310,8 @@ bool AbstractAudioFFMpegLoader::initUsingContext(
}
}
if (_swrDstRate == initialFrequency) {
_outputSamplesCount = initialCount;
} else {
_outputSamplesCount = av_rescale_rnd(
initialCount,
_swrDstRate,
initialFrequency,
AV_ROUND_UP);
}
createSpeedFilter(speed);
return true;
}
@ -297,44 +325,98 @@ auto AbstractAudioFFMpegLoader::replaceFrameAndRead(
auto AbstractAudioFFMpegLoader::readFromReadyContext(
not_null<AVCodecContext*> context)
-> ReadResult {
const auto res = avcodec_receive_frame(context, _frame.get());
if (res >= 0) {
if (_filterGraph) {
AvErrorWrap error = av_buffersink_get_frame(
_filterSink,
_filteredFrame.get());
if (!error) {
if (!_filteredFrame->nb_samples) {
return ReadError::Retry;
}
return bytes::const_span(
reinterpret_cast<const bytes::type*>(
_filteredFrame->extended_data[0]),
_filteredFrame->nb_samples * _outputSampleSize);
} else if (error.code() == AVERROR_EOF) {
return ReadError::EndOfFile;
} else if (error.code() != AVERROR(EAGAIN)) {
LogError(u"av_buffersink_get_frame"_q, error);
return ReadError::Other;
}
}
using Enqueued = not_null<const EnqueuedFrame*>;
const auto queueResult = fillFrameFromQueued();
if (queueResult == ReadError::RetryNotQueued) {
return ReadError::RetryNotQueued;
} else if (const auto enqueued = std::get_if<Enqueued>(&queueResult)) {
const auto raw = (*enqueued)->frame.get();
Assert(frameHasDesiredFormat(raw));
return readOrBufferForFilter(raw, (*enqueued)->samples);
}
const auto queueError = v::get<ReadError>(queueResult);
AvErrorWrap error = (queueError == ReadError::EndOfFile)
? AVERROR_EOF
: avcodec_receive_frame(context, _frame.get());
if (!error) {
return readFromReadyFrame();
}
if (res == AVERROR_EOF) {
return ReadError::EndOfFile;
} else if (res != AVERROR(EAGAIN)) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: "
"Unable to avcodec_receive_frame() file '%1', data size '%2', "
"error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
if (error.code() == AVERROR_EOF) {
enqueueFramesFinished();
if (!_filterGraph) {
return ReadError::EndOfFile;
}
AvErrorWrap error = av_buffersrc_add_frame(_filterSrc, nullptr);
if (!error) {
return ReadError::Retry;
}
LogError(u"av_buffersrc_add_frame"_q, error);
return ReadError::Other;
} else if (error.code() != AVERROR(EAGAIN)) {
LogError(u"avcodec_receive_frame"_q, error);
return ReadError::Other;
}
return ReadError::Wait;
}
bool AbstractAudioFFMpegLoader::frameHasDesiredFormat() const {
auto AbstractAudioFFMpegLoader::fillFrameFromQueued()
-> std::variant<not_null<const EnqueuedFrame*>, ReadError> {
if (_framesQueuedIndex == _framesQueued.size()) {
_framesQueuedIndex = -1;
return ReadError::RetryNotQueued;
} else if (_framesQueuedIndex < 0) {
return ReadError::Wait;
}
const auto &queued = _framesQueued[_framesQueuedIndex];
++_framesQueuedIndex;
if (!queued.frame) {
return ReadError::EndOfFile;
}
LOG(("Returning At %1 Data: %2"
).arg(queued.position
).arg(quintptr(queued.frame->extended_data[0])));
return &queued;
}
bool AbstractAudioFFMpegLoader::frameHasDesiredFormat(
not_null<AVFrame*> frame) const {
const auto sameChannelLayout = [&] {
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
return !av_channel_layout_compare(
&_frame->ch_layout,
&frame->ch_layout,
&_swrDstChannelLayout);
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
const auto frameChannelLayout = ComputeChannelLayout(
_frame->channel_layout,
_frame->channels);
frame->channel_layout,
frame->channels);
return (frameChannelLayout == _swrDstChannelLayout);
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
};
return true
&& (_frame->format == _swrDstSampleFormat)
&& (_frame->sample_rate == _swrDstRate)
&& (frame->format == _swrDstSampleFormat)
&& (frame->sample_rate == _swrDstRate)
&& sameChannelLayout();
}
@ -392,9 +474,9 @@ bool AbstractAudioFFMpegLoader::initResampleForFrame() {
}
bool AbstractAudioFFMpegLoader::initResampleUsingFormat() {
int res = 0;
AvErrorWrap error = 0;
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
res = swr_alloc_set_opts2(
error = swr_alloc_set_opts2(
&_swrContext,
&_swrDstChannelLayout,
_swrDstSampleFormat,
@ -416,38 +498,25 @@ bool AbstractAudioFFMpegLoader::initResampleUsingFormat() {
0,
nullptr);
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
if (res < 0 || !_swrContext) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: "
"Unable to swr_alloc_set_opts2 for file '%1', data size '%2', "
"error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
if (error || !_swrContext) {
LogError(u"swr_alloc_set_opts2"_q, error);
return false;
} else if ((res = swr_init(_swrContext)) < 0) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: "
"Unable to swr_init for file '%1', data size '%2', "
"error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
} else if (AvErrorWrap error = swr_init(_swrContext)) {
LogError(u"swr_init"_q, error);
return false;
}
if (_swrDstData) {
av_freep(&_swrDstData[0]);
_swrDstDataCapacity = -1;
}
_resampledFrame = nullptr;
_resampledFrameCapacity = 0;
return true;
}
bool AbstractAudioFFMpegLoader::ensureResampleSpaceAvailable(int samples) {
if (_swrDstData != nullptr && _swrDstDataCapacity >= samples) {
const auto enlarge = (_resampledFrameCapacity < samples);
if (!_resampledFrame) {
_resampledFrame = FFmpeg::MakeFramePointer();
} else if (enlarge || !av_frame_is_writable(_resampledFrame.get())) {
av_frame_unref(_resampledFrame.get());
} else {
return true;
}
const auto allocate = std::max(samples, int(av_rescale_rnd(
@ -455,46 +524,166 @@ bool AbstractAudioFFMpegLoader::ensureResampleSpaceAvailable(int samples) {
_swrDstRate,
_swrSrcRate,
AV_ROUND_UP)));
if (_swrDstData) {
av_freep(&_swrDstData[0]);
}
const auto res = _swrDstData
? av_samples_alloc(
_swrDstData,
nullptr,
_outputChannels,
allocate,
_swrDstSampleFormat,
0)
: av_samples_alloc_array_and_samples(
&_swrDstData,
nullptr,
_outputChannels,
allocate,
_swrDstSampleFormat,
0);
if (res < 0) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: "
"Unable to av_samples_alloc for file '%1', data size '%2', "
"error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
_resampledFrame->sample_rate = _swrDstRate;
_resampledFrame->format = _swrDstSampleFormat;
av_channel_layout_copy(
&_resampledFrame->ch_layout,
&_swrDstChannelLayout);
_resampledFrame->nb_samples = allocate;
if (AvErrorWrap error = av_frame_get_buffer(_resampledFrame.get(), 0)) {
LogError(u"av_frame_get_buffer"_q, error);
return false;
}
_swrDstDataCapacity = allocate;
_resampledFrameCapacity = allocate;
return true;
}
auto AbstractAudioFFMpegLoader::readFromReadyFrame() -> ReadResult {
if (frameHasDesiredFormat()) {
return bytes::const_span(
reinterpret_cast<const bytes::type*>(_frame->extended_data[0]),
_frame->nb_samples * _outputSampleSize);
bool AbstractAudioFFMpegLoader::changeSpeedFilter(float64 speed) {
speed = std::clamp(speed, Audio::kSpeedMin, Audio::kSpeedMax);
if (_filterSpeed == speed) {
return false;
}
avfilter_graph_free(&_filterGraph);
const auto guard = gsl::finally([&] {
if (!_filterGraph) {
_filteredFrame = nullptr;
_filterSpeed = 1.;
}
});
createSpeedFilter(speed);
return true;
}
void AbstractAudioFFMpegLoader::createSpeedFilter(float64 speed) {
Expects(!_filterGraph);
if (speed == 1.) {
return;
}
const auto abuffer = avfilter_get_by_name("abuffer");
const auto abuffersink = avfilter_get_by_name("abuffersink");
const auto atempo = avfilter_get_by_name("atempo");
if (!abuffer || !abuffersink || !atempo) {
LOG(("FFmpeg Error: Could not find abuffer / abuffersink /atempo."));
return;
}
auto graph = avfilter_graph_alloc();
if (!graph) {
LOG(("FFmpeg Error: Unable to create filter graph."));
return;
}
const auto guard = gsl::finally([&] {
avfilter_graph_free(&graph);
});
_filterSrc = avfilter_graph_alloc_filter(graph, abuffer, "src");
_atempo = avfilter_graph_alloc_filter(graph, atempo, "atempo");
_filterSink = avfilter_graph_alloc_filter(graph, abuffersink, "sink");
if (!_filterSrc || !atempo || !_filterSink) {
LOG(("FFmpeg Error: "
"Could not allocate abuffer / abuffersink /atempo."));
return;
}
char layout[64] = { 0 };
av_channel_layout_describe(
&_swrDstChannelLayout,
layout,
sizeof(layout));
av_opt_set(
_filterSrc,
"channel_layout",
layout,
AV_OPT_SEARCH_CHILDREN);
av_opt_set_sample_fmt(
_filterSrc,
"sample_fmt",
_swrDstSampleFormat,
AV_OPT_SEARCH_CHILDREN);
av_opt_set_q(
_filterSrc,
"time_base",
AVRational{ 1, _swrDstRate },
AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(
_filterSrc,
"sample_rate",
_swrDstRate,
AV_OPT_SEARCH_CHILDREN);
av_opt_set_double(
_atempo,
"tempo",
speed,
AV_OPT_SEARCH_CHILDREN);
AvErrorWrap error = 0;
if ((error = avfilter_init_str(_filterSrc, nullptr))) {
LogError(u"avfilter_init_str(src)"_q, error);
return;
} else if ((error = avfilter_init_str(_atempo, nullptr))) {
LogError(u"avfilter_init_str(atempo)"_q, error);
avfilter_graph_free(&graph);
return;
} else if ((error = avfilter_init_str(_filterSink, nullptr))) {
LogError(u"avfilter_init_str(sink)"_q, error);
avfilter_graph_free(&graph);
return;
} else if ((error = avfilter_link(_filterSrc, 0, _atempo, 0))) {
LogError(u"avfilter_link(src->atempo)"_q, error);
avfilter_graph_free(&graph);
return;
} else if ((error = avfilter_link(_atempo, 0, _filterSink, 0))) {
LogError(u"avfilter_link(atempo->sink)"_q, error);
avfilter_graph_free(&graph);
return;
} else if ((error = avfilter_graph_config(graph, nullptr))) {
LogError("avfilter_link(atempo->sink)"_q, error);
avfilter_graph_free(&graph);
return;
}
_filterGraph = base::take(graph);
_filteredFrame = FFmpeg::MakeFramePointer();
_filterSpeed = speed;
}
void AbstractAudioFFMpegLoader::enqueueNormalFrame(
not_null<AVFrame*> frame,
int64 samples) {
if (_framesQueuedIndex >= 0) {
return;
}
if (!samples) {
samples = frame->nb_samples;
}
_framesQueued.push_back({
.position = startedAtSample() + _framesQueuedSamples,
.samples = samples,
.frame = FFmpeg::DuplicateFramePointer(frame),
});
LOG(("Added At %1 Data: %2"
).arg(_framesQueued.back().position
).arg(quintptr(_framesQueued.back().frame->extended_data[0])));
_framesQueuedSamples += samples;
}
void AbstractAudioFFMpegLoader::enqueueFramesFinished() {
if (_framesQueuedIndex >= 0) {
return;
}
_framesQueued.push_back({
.position = startedAtSample() + _framesQueuedSamples,
});
}
auto AbstractAudioFFMpegLoader::readFromReadyFrame()
-> ReadResult {
const auto raw = _frame.get();
if (frameHasDesiredFormat(raw)) {
if (!raw->nb_samples) {
return ReadError::Retry;
}
return readOrBufferForFilter(raw, raw->nb_samples);
} else if (!initResampleForFrame()) {
return ReadError::Other;
}
@ -509,57 +698,68 @@ auto AbstractAudioFFMpegLoader::readFromReadyFrame() -> ReadResult {
}
const auto samples = swr_convert(
_swrContext,
_swrDstData,
(uint8_t**)_resampledFrame->extended_data,
maxSamples,
(const uint8_t * *)_frame->extended_data,
(const uint8_t **)_frame->extended_data,
_frame->nb_samples);
if (samples < 0) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: "
"Unable to swr_convert for file '%1', data size '%2', "
"error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(samples
).arg(av_make_error_string(err, sizeof(err), samples)
));
if (AvErrorWrap error = samples) {
LogError(u"swr_convert"_q, error);
return ReadError::Other;
} else if (!samples) {
return ReadError::Retry;
}
return readOrBufferForFilter(_resampledFrame.get(), samples);
}
auto AbstractAudioFFMpegLoader::readOrBufferForFilter(
not_null<AVFrame*> frame,
int64 samplesOverride)
-> ReadResult {
enqueueNormalFrame(frame, samplesOverride);
const auto was = frame->nb_samples;
frame->nb_samples = samplesOverride;
const auto guard = gsl::finally([&] {
frame->nb_samples = was;
});
if (!_filterGraph) {
return bytes::const_span(
reinterpret_cast<const bytes::type*>(frame->extended_data[0]),
frame->nb_samples * _outputSampleSize);
}
AvErrorWrap error = av_buffersrc_add_frame_flags(
_filterSrc,
frame,
AV_BUFFERSRC_FLAG_KEEP_REF);
if (error) {
LogError(u"av_buffersrc_add_frame_flags"_q, error);
return ReadError::Other;
}
return bytes::const_span(
reinterpret_cast<const bytes::type*>(_swrDstData[0]),
samples * _outputSampleSize);
return ReadError::Retry;
}
AbstractAudioFFMpegLoader::~AbstractAudioFFMpegLoader() {
if (_filterGraph) {
avfilter_graph_free(&_filterGraph);
}
if (_swrContext) {
swr_free(&_swrContext);
}
if (_swrDstData) {
if (_swrDstData[0]) {
av_freep(&_swrDstData[0]);
}
av_freep(&_swrDstData);
}
}
FFMpegLoader::FFMpegLoader(
const Core::FileLocation & file,
const QByteArray & data,
bytes::vector && buffer)
const Core::FileLocation &file,
const QByteArray &data,
bytes::vector &&buffer)
: AbstractAudioFFMpegLoader(file, data, std::move(buffer)) {
}
bool FFMpegLoader::open(crl::time positionMs) {
if (!AbstractFFMpegLoader::open(positionMs)) {
return false;
}
if (!openCodecContext()) {
return false;
}
if (!initUsingContext(_codecContext, _samplesCount, _samplesFrequency)) {
return false;
}
return seekTo(positionMs);
bool FFMpegLoader::open(crl::time positionMs, float64 speed) {
return AbstractFFMpegLoader::open(positionMs)
&& openCodecContext()
&& initUsingContext(_codecContext, speed)
&& seekTo(positionMs);
}
bool FFMpegLoader::openCodecContext() {
@ -577,31 +777,18 @@ bool FFMpegLoader::openCodecContext() {
}
const auto stream = fmtContext->streams[streamId];
if ((res = avcodec_parameters_to_context(
AvErrorWrap error = avcodec_parameters_to_context(
_codecContext,
stream->codecpar)) < 0) {
LOG(("Audio Error: "
"Unable to avcodec_parameters_to_context for file '%1', "
"data size '%2', error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
stream->codecpar);
if (error) {
LogError(u"avcodec_parameters_to_context"_q, error);
return false;
}
_codecContext->pkt_timebase = stream->time_base;
av_opt_set_int(_codecContext, "refcounted_frames", 1, 0);
if ((res = avcodec_open2(_codecContext, codec, 0)) < 0) {
LOG(("Audio Error: "
"Unable to avcodec_open2 for file '%1', data size '%2', "
"error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
if (AvErrorWrap error = avcodec_open2(_codecContext, codec, 0)) {
LogError(u"avcodec_open2"_q, error);
return false;
}
return true;
@ -630,48 +817,34 @@ FFMpegLoader::ReadResult FFMpegLoader::readMore() {
return readResult;
}
auto res = 0;
if ((res = av_read_frame(fmtContext, &_packet)) < 0) {
if (res != AVERROR_EOF) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: "
"Unable to av_read_frame() file '%1', data size '%2', "
"error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
if (AvErrorWrap error = av_read_frame(fmtContext, &_packet)) {
if (error.code() != AVERROR_EOF) {
LogError(u"av_read_frame"_q, error);
return ReadError::Other;
}
avcodec_send_packet(_codecContext, nullptr); // drain
return bytes::const_span();
error = avcodec_send_packet(_codecContext, nullptr); // drain
if (!error) {
return ReadError::Retry;
}
LogError(u"avcodec_send_packet"_q, error);
return ReadError::Other;
}
if (_packet.stream_index == streamId) {
res = avcodec_send_packet(_codecContext, &_packet);
if (res < 0) {
AvErrorWrap error = avcodec_send_packet(_codecContext, &_packet);
if (error) {
av_packet_unref(&_packet);
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: "
"Unable to avcodec_send_packet() file '%1', data size '%2', "
"error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
LogError(u"avcodec_send_packet"_q, error);
// There is a sample voice message where skipping such packet
// results in a crash (read_access to nullptr) in swr_convert().
//if (res == AVERROR_INVALIDDATA) {
// return ReadResult::NotYet; // try to skip bad packet
//if (error.code() == AVERROR_INVALIDDATA) {
// return ReadResult::Retry; // try to skip bad packet
//}
return ReadError::Other;
}
}
av_packet_unref(&_packet);
return bytes::const_span();
return ReadError::Retry;
}
FFMpegLoader::~FFMpegLoader() {

View file

@ -16,6 +16,7 @@ extern "C" {
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libswresample/swresample.h>
#include <libavfilter/avfilter.h>
} // extern "C"
#include <al.h>
@ -35,10 +36,14 @@ public:
: AudioPlayerLoader(file, data, std::move(buffer)) {
}
bool open(crl::time positionMs) override;
bool open(crl::time positionMs, float64 speed = 1.) override;
int64 samplesCount() override {
return _samplesCount;
crl::time duration() override {
return _duration;
}
void overrideDuration(int64 startedAtSample, crl::time duration) {
_startedAtSample = startedAtSample;
_duration = duration;
}
int samplesFrequency() override {
@ -51,13 +56,18 @@ public:
int channels);
#endif // !DA_FFMPEG_NEW_CHANNEL_LAYOUT
[[nodiscard]] int64 startedAtSample() const {
return _startedAtSample;
}
~AbstractFFMpegLoader();
protected:
static int64 Mul(int64 value, AVRational rational);
int _samplesFrequency = Media::Player::kDefaultFrequency;
int64 _samplesCount = 0;
int64 _startedAtSample = 0;
crl::time _duration = 0;
uchar *ioBuffer = nullptr;
AVIOContext *ioContext = nullptr;
@ -88,9 +98,8 @@ public:
const QByteArray &data,
bytes::vector &&buffer);
int64 samplesCount() override {
return _outputSamplesCount;
}
void dropFramesTill(int64 samples) override;
int64 startReadingQueuedFrames(float64 newSpeed) override;
int samplesFrequency() override {
return _swrDstRate;
@ -107,10 +116,7 @@ public:
~AbstractAudioFFMpegLoader();
protected:
bool initUsingContext(
not_null<AVCodecContext *> context,
int64 initialCount,
int initialFrequency);
bool initUsingContext(not_null<AVCodecContext*> context, float64 speed);
[[nodiscard]] ReadResult readFromReadyContext(
not_null<AVCodecContext*> context);
@ -119,17 +125,42 @@ protected:
[[nodiscard]] ReadResult replaceFrameAndRead(FFmpeg::FramePointer frame);
private:
struct EnqueuedFrame {
int64 position = 0;
int64 samples = 0;
FFmpeg::FramePointer frame;
};
[[nodiscard]] ReadResult readFromReadyFrame();
bool frameHasDesiredFormat() const;
[[nodiscard]] ReadResult readOrBufferForFilter(
not_null<AVFrame*> frame,
int64 samplesOverride);
bool frameHasDesiredFormat(not_null<AVFrame*> frame) const;
bool initResampleForFrame();
bool initResampleUsingFormat();
bool ensureResampleSpaceAvailable(int samples);
bool changeSpeedFilter(float64 speed);
void createSpeedFilter(float64 speed);
void enqueueNormalFrame(
not_null<AVFrame*> frame,
int64 samples = 0);
void enqueueFramesFinished();
[[nodiscard]] auto fillFrameFromQueued()
-> std::variant<not_null<const EnqueuedFrame*>, ReadError>;
FFmpeg::FramePointer _frame;
FFmpeg::FramePointer _resampledFrame;
FFmpeg::FramePointer _filteredFrame;
int _resampledFrameCapacity = 0;
int64 _framesQueuedSamples = 0;
std::deque<EnqueuedFrame> _framesQueued;
int _framesQueuedIndex = -1;
int _outputFormat = AL_FORMAT_STEREO16;
int _outputChannels = 2;
int _outputSampleSize = 2 * sizeof(uint16);
int64 _outputSamplesCount = 0;
SwrContext *_swrContext = nullptr;
@ -146,8 +177,12 @@ private:
uint64_t _swrSrcChannelLayout = 0;
uint64_t _swrDstChannelLayout = AV_CH_LAYOUT_STEREO;
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
uint8_t **_swrDstData = nullptr;
int _swrDstDataCapacity = 0;
AVFilterGraph *_filterGraph = nullptr;
float64 _filterSpeed = 1.;
AVFilterContext *_filterSrc = nullptr;
AVFilterContext *_atempo = nullptr;
AVFilterContext *_filterSink = nullptr;
};
@ -158,7 +193,7 @@ public:
const QByteArray &data,
bytes::vector &&buffer);
bool open(crl::time positionMs) override;
bool open(crl::time positionMs, float64 speed = 1.) override;
ReadResult readMore() override;

View file

@ -52,6 +52,18 @@ bool AudioPlayerLoader::holdsSavedDecodedSamples() const {
return _holdsSavedSamples;
}
void AudioPlayerLoader::dropDecodedSamples() {
_savedSamples = {};
_holdsSavedSamples = false;
}
int AudioPlayerLoader::bytesPerBuffer() {
if (!_bytesPerBuffer) {
_bytesPerBuffer = samplesFrequency() * sampleSize();
}
return _bytesPerBuffer;
}
bool AudioPlayerLoader::openFile() {
if (_data.isEmpty() && _bytes.empty()) {
if (_f.isOpen()) _f.close();

View file

@ -21,17 +21,29 @@ public:
bytes::vector &&buffer);
virtual ~AudioPlayerLoader();
virtual bool check(const Core::FileLocation &file, const QByteArray &data);
virtual bool check(
const Core::FileLocation &file,
const QByteArray &data);
virtual bool open(crl::time positionMs) = 0;
virtual int64 samplesCount() = 0;
virtual bool open(crl::time positionMs, float64 speed = 1.) = 0;
virtual crl::time duration() = 0;
virtual int samplesFrequency() = 0;
virtual int sampleSize() = 0;
virtual int format() = 0;
virtual void dropFramesTill(int64 samples) {
}
[[nodiscard]] virtual int64 startReadingQueuedFrames(float64 newSpeed) {
Unexpected(
"startReadingQueuedFrames() on not AbstractAudioFFMpegLoader");
}
[[nodiscard]] int bytesPerBuffer();
enum class ReadError {
Other,
NotYet,
Retry,
RetryNotQueued,
Wait,
EndOfFile,
};
@ -51,6 +63,7 @@ public:
void saveDecodedSamples(not_null<QByteArray*> samples);
void takeSavedDecodedSamples(not_null<QByteArray*> samples);
bool holdsSavedDecodedSamples() const;
void dropDecodedSamples();
protected:
Core::FileLocation _file;
@ -67,6 +80,8 @@ private:
QByteArray _savedSamples;
bool _holdsSavedSamples = false;
int _bytesPerBuffer = 0;
};
} // namespace Media

View file

@ -13,11 +13,6 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
namespace Media {
namespace Player {
namespace {
constexpr auto kPlaybackBufferSize = 256 * 1024;
} // namespace
Loaders::Loaders(QThread *thread)
: _fromExternalNotify([=] { videoSoundAdded(); }) {
@ -151,17 +146,29 @@ void Loaders::onLoad(const AudioMsgId &audio) {
}
void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
auto err = SetupNoErrorStarted;
auto type = audio.type();
auto l = setupLoader(audio, err, positionMs);
auto setup = setupLoader(audio, positionMs);
const auto l = setup.loader;
if (!l) {
if (err == SetupErrorAtStart) {
if (setup.errorAtStart) {
emitError(type);
}
return;
}
auto started = (err == SetupNoErrorStarted);
const auto sampleSize = l->sampleSize();
const auto speedChanged = (setup.newSpeed != setup.oldSpeed);
auto updatedWithSpeed = speedChanged
? rebufferOnSpeedChange(setup)
: std::optional<Mixer::Track::WithSpeed>();
if (!speedChanged && setup.oldSpeed > 0.) {
const auto normalPosition = Mixer::Track::SpeedIndependentPosition(
setup.position,
setup.oldSpeed);
l->dropFramesTill(normalPosition);
}
const auto started = setup.justStarted;
auto finished = false;
auto waiting = false;
auto errAtStart = started;
@ -170,10 +177,15 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
auto accumulatedCount = 0;
if (l->holdsSavedDecodedSamples()) {
l->takeSavedDecodedSamples(&accumulated);
accumulatedCount = accumulated.size() / l->sampleSize();
accumulatedCount = accumulated.size() / sampleSize;
}
while (accumulated.size() < kPlaybackBufferSize) {
const auto accumulateTill = l->bytesPerBuffer();
while (accumulated.size() < accumulateTill) {
using Error = AudioPlayerLoader::ReadError;
const auto result = l->readMore();
if (result == Error::Retry) {
continue;
}
const auto sampleBytes = v::is<bytes::const_span>(result)
? v::get<bytes::const_span>(result)
: bytes::const_span();
@ -181,10 +193,8 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
accumulated.append(
reinterpret_cast<const char*>(sampleBytes.data()),
sampleBytes.size());
accumulatedCount += sampleBytes.size() / l->sampleSize();
}
using Error = AudioPlayerLoader::ReadError;
if (result == Error::Other) {
accumulatedCount += sampleBytes.size() / sampleSize;
} else if (result == Error::Other) {
if (errAtStart) {
{
QMutexLocker lock(internal::audioPlayerMutex());
@ -201,7 +211,7 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
finished = true;
break;
} else if (result == Error::Wait) {
waiting = (accumulated.size() < kPlaybackBufferSize)
waiting = (accumulated.size() < accumulateTill)
&& (accumulated.isEmpty() || !l->forceToBuffer());
if (waiting) {
l->saveDecodedSamples(&accumulated);
@ -225,10 +235,11 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
return;
}
if (started || !accumulated.isEmpty()) {
if (started || !accumulated.isEmpty() || updatedWithSpeed) {
Audio::AttachToDevice();
}
if (started) {
Assert(!updatedWithSpeed);
track->started();
if (!internal::audioCheckError()) {
setStoppedState(track, State::StoppedAtStart);
@ -237,12 +248,20 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
}
track->format = l->format();
track->frequency = l->samplesFrequency();
track->state.frequency = l->samplesFrequency();
const auto position = (positionMs * track->frequency) / 1000LL;
track->bufferedPosition = position;
track->state.position = position;
track->fadeStartPosition = position;
track->state.position = (positionMs * track->state.frequency)
/ 1000LL;
track->updateWithSpeedPosition();
track->withSpeed.bufferedPosition = track->withSpeed.position;
track->withSpeed.fadeStartPosition = track->withSpeed.position;
} else if (updatedWithSpeed) {
auto old = Mixer::Track();
old.stream = base::take(track->stream);
old.withSpeed = std::exchange(track->withSpeed, *updatedWithSpeed);
track->speed = setup.newSpeed;
track->reattach(type);
old.detach();
}
if (!accumulated.isEmpty()) {
track->ensureStreamCreated(type);
@ -256,21 +275,23 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
}
if (bufferIndex < 0) { // No free buffers, wait.
track->waitingForBuffer = true;
l->saveDecodedSamples(&accumulated);
return;
} else if (l->forceToBuffer()) {
l->setForceToBuffer(false);
}
track->waitingForBuffer = false;
track->bufferSamples[bufferIndex] = accumulated;
track->samplesCount[bufferIndex] = accumulatedCount;
track->bufferedLength += accumulatedCount;
track->withSpeed.buffered[bufferIndex] = accumulated;
track->withSpeed.samples[bufferIndex] = accumulatedCount;
track->withSpeed.bufferedLength += accumulatedCount;
alBufferData(
track->stream.buffers[bufferIndex],
track->format,
accumulated.constData(),
accumulated.size(),
track->frequency);
track->state.frequency);
alSourceQueueBuffers(
track->stream.source,
@ -292,8 +313,11 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
if (finished) {
track->loaded = true;
track->state.length = track->bufferedPosition + track->bufferedLength;
clear(type);
track->withSpeed.length = track->withSpeed.bufferedPosition
+ track->withSpeed.bufferedLength;
track->state.length = Mixer::Track::SpeedIndependentPosition(
track->withSpeed.length,
track->speed);
}
track->loading = false;
@ -323,7 +347,10 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
}
if (state == AL_STOPPED) {
alSourcei(track->stream.source, AL_SAMPLE_OFFSET, qMax(track->state.position - track->bufferedPosition, 0LL));
alSourcei(
track->stream.source,
AL_SAMPLE_OFFSET,
qMax(track->withSpeed.position - track->withSpeed.bufferedPosition, 0LL));
if (!internal::audioCheckError()) {
setStoppedState(track, State::StoppedAtError);
emitError(type);
@ -340,20 +367,19 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
needToCheck();
}
AudioPlayerLoader *Loaders::setupLoader(
Loaders::SetupLoaderResult Loaders::setupLoader(
const AudioMsgId &audio,
SetupError &err,
crl::time positionMs) {
err = SetupErrorAtStart;
QMutexLocker lock(internal::audioPlayerMutex());
if (!mixer()) return nullptr;
if (!mixer()) {
return {};
}
auto track = mixer()->trackForType(audio.type());
if (!track || track->state.id != audio || !track->loading) {
error(audio);
LOG(("Audio Error: trying to load part of audio, that is not current at the moment"));
err = SetupErrorNotPlaying;
return nullptr;
return {};
}
bool isGoodId = false;
@ -369,6 +395,8 @@ AudioPlayerLoader *Loaders::setupLoader(
l = nullptr;
}
auto SpeedDependentPosition = Mixer::Track::SpeedDependentPosition;
auto SpeedIndependentPosition = Mixer::Track::SpeedIndependentPosition;
if (!l) {
std::unique_ptr<AudioPlayerLoader> *loader = nullptr;
switch (audio.type()) {
@ -383,7 +411,7 @@ AudioPlayerLoader *Loaders::setupLoader(
track->state.state = State::StoppedAtError;
error(audio);
LOG(("Audio Error: video sound data not ready"));
return nullptr;
return {};
}
*loader = std::make_unique<ChildFFMpegLoader>(
std::move(track->externalData));
@ -395,24 +423,120 @@ AudioPlayerLoader *Loaders::setupLoader(
}
l = loader->get();
if (!l->open(positionMs)) {
track->speed = track->nextSpeed;
if (!l->open(positionMs, track->speed)) {
track->state.state = State::StoppedAtStart;
return nullptr;
return { .errorAtStart = true };
}
auto length = l->samplesCount();
if (length <= 0) {
const auto duration = l->duration();
if (duration <= 0) {
track->state.state = State::StoppedAtStart;
return nullptr;
return { .errorAtStart = true };
}
track->state.length = length;
track->state.frequency = l->samplesFrequency();
err = SetupNoErrorStarted;
track->state.length = (duration * track->state.frequency) / 1000;
track->withSpeed.length = SpeedDependentPosition(
track->state.length,
track->speed);
return { .loader = l, .justStarted = true };
} else if (track->nextSpeed != track->speed) {
return {
.loader = l,
.oldSpeed = track->speed,
.newSpeed = track->nextSpeed,
.fadeStartPosition = track->withSpeed.fadeStartPosition,
.position = track->withSpeed.fineTunedPosition,
.normalLength = track->state.length,
.frequency = track->state.frequency,
};
} else if (track->loaded) {
err = SetupErrorLoadedFull;
LOG(("Audio Error: trying to load part of audio, that is already loaded to the end"));
return nullptr;
return {};
}
return l;
return {
.loader = l,
.oldSpeed = track->speed,
.newSpeed = track->nextSpeed,
.position = track->withSpeed.fineTunedPosition,
.frequency = track->state.frequency,
};
}
Mixer::Track::WithSpeed Loaders::rebufferOnSpeedChange(
const SetupLoaderResult &setup) {
Expects(setup.oldSpeed > 0. && setup.newSpeed > 0.);
Expects(setup.loader != nullptr);
const auto speed = setup.newSpeed;
const auto change = setup.oldSpeed / speed;
const auto normalPosition = Mixer::Track::SpeedIndependentPosition(
setup.position,
setup.oldSpeed);
const auto newPosition = int64(base::SafeRound(setup.position * change));
auto result = Mixer::Track::WithSpeed{
.fineTunedPosition = newPosition,
.position = newPosition,
.length = Mixer::Track::SpeedDependentPosition(
setup.normalLength,
speed),
.fadeStartPosition = int64(
base::SafeRound(setup.fadeStartPosition * change)),
};
const auto l = setup.loader;
l->dropFramesTill(normalPosition);
const auto normalFrom = l->startReadingQueuedFrames(speed);
if (normalFrom < 0) {
result.bufferedPosition = newPosition;
return result;
}
result.bufferedPosition = Mixer::Track::SpeedDependentPosition(
normalFrom,
speed);
for (auto i = 0; i != Mixer::Track::kBuffersCount; ++i) {
auto finished = false;
auto accumulated = QByteArray();
auto accumulatedCount = int64();
const auto sampleSize = l->sampleSize();
const auto accumulateTill = l->bytesPerBuffer();
while (accumulated.size() < accumulateTill) {
const auto result = l->readMore();
const auto sampleBytes = v::is<bytes::const_span>(result)
? v::get<bytes::const_span>(result)
: bytes::const_span();
if (!sampleBytes.empty()) {
accumulated.append(
reinterpret_cast<const char*>(sampleBytes.data()),
sampleBytes.size());
accumulatedCount += sampleBytes.size() / sampleSize;
continue;
} else if (result == AudioPlayerLoader::ReadError::Retry) {
continue;
}
Assert(result == AudioPlayerLoader::ReadError::RetryNotQueued);
finished = true;
break;
}
if (!accumulated.isEmpty()) {
result.samples[i] = accumulatedCount;
result.bufferedLength += accumulatedCount;
result.buffered[i] = accumulated;
}
if (finished) {
break;
}
}
const auto limit = result.bufferedPosition + result.bufferedLength;
if (newPosition > limit) {
result.fineTunedPosition = limit;
result.position = limit;
}
if (limit > result.length) {
result.length = limit;
}
return result;
}
Mixer::Track *Loaders::checkLoader(AudioMsgId::Type type) {

View file

@ -37,7 +37,31 @@ public Q_SLOTS:
void onCancel(const AudioMsgId &audio);
private:
struct SetupLoaderResult {
AudioPlayerLoader *loader = nullptr;
float64 oldSpeed = 0.;
float64 newSpeed = 0.;
int64 fadeStartPosition = 0;
int64 position = 0;
int64 normalLength = 0;
int frequency = 0;
bool errorAtStart = false;
bool justStarted = false;
};
void videoSoundAdded();
[[nodiscard]] Mixer::Track::WithSpeed rebufferOnSpeedChange(
const SetupLoaderResult &setup);
void emitError(AudioMsgId::Type type);
AudioMsgId clear(AudioMsgId::Type type);
void setStoppedState(Mixer::Track *m, State state = State::Stopped);
void loadData(AudioMsgId audio, crl::time positionMs = 0);
[[nodiscard]] SetupLoaderResult setupLoader(
const AudioMsgId &audio,
crl::time positionMs);
Mixer::Track *checkLoader(AudioMsgId::Type type);
AudioMsgId _audio, _song, _video;
std::unique_ptr<AudioPlayerLoader> _audioLoader;
@ -51,23 +75,6 @@ private:
base::flat_set<AudioMsgId> _fromExternalForceToBuffer;
SingleQueuedInvokation _fromExternalNotify;
void emitError(AudioMsgId::Type type);
AudioMsgId clear(AudioMsgId::Type type);
void setStoppedState(Mixer::Track *m, State state = State::Stopped);
enum SetupError {
SetupErrorAtStart = 0,
SetupErrorNotPlaying = 1,
SetupErrorLoadedFull = 2,
SetupNoErrorStarted = 3,
};
void loadData(AudioMsgId audio, crl::time positionMs = 0);
AudioPlayerLoader *setupLoader(
const AudioMsgId &audio,
SetupError &err,
crl::time positionMs);
Mixer::Track *checkLoader(AudioMsgId::Type type);
};
} // namespace Player

View file

@ -59,7 +59,8 @@ void Track::fillFromData(bytes::vector &&data) {
}
auto format = loader.format();
_peakEachPosition = _peakDurationMs ? ((loader.samplesFrequency() * _peakDurationMs) / 1000) : 0;
auto peaksCount = _peakEachPosition ? (loader.samplesCount() / _peakEachPosition) : 0;
const auto samplesCount = (loader.duration() * loader.samplesFrequency()) / 1000;
const auto peaksCount = _peakEachPosition ? (samplesCount / _peakEachPosition) : 0;
_peaks.reserve(peaksCount);
auto peakValue = uint16(0);
auto peakSamples = 0;
@ -79,32 +80,33 @@ void Track::fillFromData(bytes::vector &&data) {
do {
using Error = AudioPlayerLoader::ReadError;
const auto result = loader.readMore();
const auto sampleBytes = v::is<bytes::const_span>(result)
? v::get<bytes::const_span>(result)
: bytes::const_span();
if (!sampleBytes.empty()) {
_samplesCount += sampleBytes.size() / loader.sampleSize();
_samples.insert(_samples.end(), sampleBytes.data(), sampleBytes.data() + sampleBytes.size());
if (peaksCount) {
if (format == AL_FORMAT_MONO8 || format == AL_FORMAT_STEREO8) {
Media::Audio::IterateSamples<uchar>(sampleBytes, peakCallback);
} else if (format == AL_FORMAT_MONO16 || format == AL_FORMAT_STEREO16) {
Media::Audio::IterateSamples<int16>(sampleBytes, peakCallback);
}
}
} else if (result == Error::Other
|| result == Error::NotYet
|| result == Error::Wait) {
_failed = true;
}
if (!v::is<bytes::const_span>(result)) {
Assert(result != Error::Wait && result != Error::RetryNotQueued);
if (result == Error::Retry) {
continue;
} else if (result == Error::EndOfFile) {
break;
} else if (result == Error::Other || result == Error::Wait) {
_failed = true;
break;
}
Assert(v::is<bytes::const_span>(result));
const auto sampleBytes = v::get<bytes::const_span>(result);
Assert(!sampleBytes.empty());
_samplesCount += sampleBytes.size() / loader.sampleSize();
_samples.insert(_samples.end(), sampleBytes.data(), sampleBytes.data() + sampleBytes.size());
if (peaksCount) {
if (format == AL_FORMAT_MONO8 || format == AL_FORMAT_STEREO8) {
Media::Audio::IterateSamples<uchar>(sampleBytes, peakCallback);
} else if (format == AL_FORMAT_MONO16 || format == AL_FORMAT_STEREO16) {
Media::Audio::IterateSamples<int16>(sampleBytes, peakCallback);
}
}
} while (true);
_alFormat = loader.format();
_sampleRate = loader.samplesFrequency();
_lengthMs = (loader.samplesCount() * crl::time(1000)) / _sampleRate;
_lengthMs = loader.duration();
}
void Track::fillFromFile(const Core::FileLocation &location) {

View file

@ -11,6 +11,11 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "core/file_location.h"
namespace Media {
namespace {
using FFmpeg::AvErrorWrap;
} // namespace
ChildFFMpegLoader::ChildFFMpegLoader(
std::unique_ptr<ExternalSoundData> &&data)
@ -22,14 +27,13 @@ ChildFFMpegLoader::ChildFFMpegLoader(
Expects(_parentData->codec != nullptr);
}
bool ChildFFMpegLoader::open(crl::time positionMs) {
return initUsingContext(
_parentData->codec.get(),
_parentData->length,
_parentData->frequency);
bool ChildFFMpegLoader::open(crl::time positionMs, float64 speed) {
const auto sample = (positionMs * samplesFrequency()) / 1000LL;
overrideDuration(sample, _parentData->duration);
return initUsingContext(_parentData->codec.get(), speed);
}
AudioPlayerLoader::ReadResult ChildFFMpegLoader::readFromInitialFrame() {
auto ChildFFMpegLoader::readFromInitialFrame() -> ReadResult {
if (!_parentData->frame) {
return ReadError::Wait;
}
@ -58,28 +62,22 @@ auto ChildFFMpegLoader::readMore() -> ReadResult {
_eofReached = packet.empty();
if (_eofReached) {
avcodec_send_packet(_parentData->codec.get(), nullptr); // drain
return bytes::const_span();
return ReadError::Retry;
}
auto res = avcodec_send_packet(
AvErrorWrap error = avcodec_send_packet(
_parentData->codec.get(),
&packet.fields());
if (res < 0) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: Unable to avcodec_send_packet() file '%1', "
"data size '%2', error %3, %4"
).arg(_file.name()
).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)));
if (error) {
LogError(u"avcodec_send_packet"_q, error);
// There is a sample voice message where skipping such packet
// results in a crash (read_access to nullptr) in swr_convert().
if (res == AVERROR_INVALIDDATA) {
return ReadError::NotYet; // try to skip bad packet
if (error.code() == AVERROR_INVALIDDATA) {
return ReadError::Retry; // try to skip bad packet
}
return ReadError::Other;
}
return bytes::const_span();
return ReadError::Retry;
}
void ChildFFMpegLoader::enqueuePackets(

View file

@ -15,8 +15,7 @@ namespace Media {
struct ExternalSoundData {
FFmpeg::CodecPointer codec;
FFmpeg::FramePointer frame;
int32 frequency = Media::Player::kDefaultFrequency;
int64 length = 0;
crl::time duration = 0;
float64 speed = 1.; // 0.5 <= speed <= 2.
};
@ -29,7 +28,7 @@ class ChildFFMpegLoader : public AbstractAudioFFMpegLoader {
public:
ChildFFMpegLoader(std::unique_ptr<ExternalSoundData> &&data);
bool open(crl::time positionMs) override;
bool open(crl::time positionMs, float64 speed = 1.) override;
bool check(const Core::FileLocation &file, const QByteArray &data) override {
return true;

View file

@ -53,7 +53,10 @@ constexpr auto kRememberShuffledOrderItems = 16;
constexpr auto kMinLengthForSavePosition = 20 * TimeId(60); // 20 minutes.
auto VoicePlaybackSpeed() {
return std::clamp(Core::App().settings().voicePlaybackSpeed(), 0.6, 1.7);
return std::clamp(
Core::App().settings().voicePlaybackSpeed(),
Media::Audio::kSpeedMin,
Media::Audio::kSpeedMax);
}
base::options::toggle OptionDisableAutoplayNext({

View file

@ -133,8 +133,7 @@ void AudioTrack::mixerInit() {
auto data = std::make_unique<ExternalSoundData>();
data->frame = std::move(_stream.decodedFrame);
data->codec = std::move(_stream.codec);
data->frequency = _stream.frequency;
data->length = (_stream.duration * data->frequency) / 1000LL;
data->duration = _stream.duration;
data->speed = _options.speed;
Media::Player::mixer()->play(

View file

@ -589,7 +589,7 @@ void System::showNext() {
alertThread->owner().notifySettings().sound(alertThread).id);
track->playOnce();
Media::Player::mixer()->suppressAll(track->getLengthMs());
Media::Player::mixer()->faderOnTimer();
Media::Player::mixer()->scheduleFaderCallback();
}
}

2
cmake

@ -1 +1 @@
Subproject commit 31eb395967d5da6f07bd1e3f5f5f9bdaac94cb85
Subproject commit d3977a83a6c8d7a1551dc457a79e05a3f383aa6f