Add support for static and webm custom emoji.

This commit is contained in:
John Preston 2022-06-29 11:56:10 +04:00
parent 8ed101cbbf
commit 3c01bb5a4a
6 changed files with 413 additions and 6 deletions

View file

@ -15,6 +15,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "data/data_file_origin.h"
#include "lottie/lottie_common.h"
#include "lottie/lottie_emoji.h"
#include "ffmpeg/ffmpeg_emoji.h"
#include "chat_helpers/stickers_lottie.h"
#include "ui/text/text_block.h"
#include "ui/ui_utility.h"
@ -317,8 +318,18 @@ void CustomEmojiLoader::check() {
auto put = [=, key = cacheKey(document)](QByteArray value) {
document->owner().cacheBigFile().put(key, std::move(value));
};
auto generator = [=, bytes = Lottie::ReadContent(data, filepath)]() {
return std::make_unique<Lottie::EmojiGenerator>(bytes);
const auto type = document->sticker()->type;
auto generator = [=, bytes = Lottie::ReadContent(data, filepath)]()
-> std::unique_ptr<Ui::FrameGenerator> {
switch (type) {
case StickerType::Tgs:
return std::make_unique<Lottie::EmojiGenerator>(bytes);
case StickerType::Webm:
return std::make_unique<FFmpeg::EmojiGenerator>(bytes);
case StickerType::Webp:
return std::make_unique<Ui::ImageFrameGenerator>(bytes);
}
Unexpected("Type in custom emoji sticker frame generator.");
};
auto renderer = std::make_unique<Renderer>(RendererDescriptor{
.generator = std::move(generator),

View file

@ -0,0 +1,355 @@
/*
This file is part of Telegram Desktop,
the official desktop application for the Telegram messaging service.
For license and copyright information please follow this link:
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
*/
#include "ffmpeg/ffmpeg_emoji.h"
#include "ffmpeg/ffmpeg_utility.h"
#include "base/debug_log.h"
namespace FFmpeg {
namespace {
constexpr auto kMaxArea = 1920 * 1080 * 4;
} // namespace
class EmojiGenerator::Impl final {
public:
explicit Impl(const QByteArray &bytes);
[[nodiscard]] Frame renderNext(
QImage storage,
QSize size,
Qt::AspectRatioMode mode);
private:
struct ReadFrame {
FramePointer frame;
crl::time position = 0;
crl::time duration = 0;
};
void readNextFrame();
void resolveNextFrameTiming();
[[nodiscard]] Frame renderCurrent(
QImage storage,
QSize size,
Qt::AspectRatioMode mode);
[[nodiscard]] QString wrapError(int result) const;
bool rotationSwapWidthHeight() const {
return (_rotation == 90) || (_rotation == 270);
}
[[nodiscard]] static int Read(
void *opaque,
uint8_t *buf,
int buf_size);
[[nodiscard]] static int64_t Seek(
void *opaque,
int64_t offset,
int whence);
[[nodiscard]] int read(uint8_t *buf, int buf_size);
[[nodiscard]] int64_t seek(int64_t offset, int whence);
const QByteArray _bytes;
int _deviceOffset = 0;
FormatPointer _format;
ReadFrame _current;
ReadFrame _next;
CodecPointer _codec;
SwscalePointer _scale;
int _streamId = 0;
int _rotation = 0;
//AVRational _aspect = kNormalAspect;
int _width = 0;
int _height = 0;
QSize _swsSize;
crl::time _framePosition = 0;
int _nextFrameDelay = 0;
int _currentFrameDelay = 0;
};
EmojiGenerator::Impl::Impl(const QByteArray &bytes)
: _bytes(bytes) {
_format = MakeFormatPointer(
static_cast<void*>(this),
&EmojiGenerator::Impl::Read,
nullptr,
&EmojiGenerator::Impl::Seek);
auto error = 0;
if ((error = avformat_find_stream_info(_format.get(), nullptr))) {
return;
}
_streamId = av_find_best_stream(
_format.get(),
AVMEDIA_TYPE_VIDEO,
-1,
-1,
nullptr,
0);
if (_streamId < 0) {
return;
}
const auto info = _format->streams[_streamId];
_rotation = ReadRotationFromMetadata(info);
//_aspect = ValidateAspectRatio(info->sample_aspect_ratio);
_codec = MakeCodecPointer({ .stream = info });
}
int EmojiGenerator::Impl::Read(void *opaque, uint8_t *buf, int buf_size) {
return static_cast<Impl*>(opaque)->read(buf, buf_size);
}
int EmojiGenerator::Impl::read(uint8_t *buf, int buf_size) {
const auto available = _bytes.size() - _deviceOffset;
if (available <= 0) {
return -1;
}
const auto fill = std::min(available, buf_size);
memcpy(buf, _bytes.data() + _deviceOffset, fill);
_deviceOffset += fill;
return fill;
}
int64_t EmojiGenerator::Impl::Seek(
void *opaque,
int64_t offset,
int whence) {
return static_cast<Impl*>(opaque)->seek(offset, whence);
}
int64_t EmojiGenerator::Impl::seek(int64_t offset, int whence) {
if (whence == AVSEEK_SIZE) {
return _bytes.size();
}
const auto now = [&] {
switch (whence) {
case SEEK_SET: return offset;
case SEEK_CUR: return _deviceOffset + offset;
case SEEK_END: return _bytes.size() + offset;
}
return int64_t(-1);
}();
if (now < 0 || now > _bytes.size()) {
return -1;
}
_deviceOffset = now;
return now;
}
EmojiGenerator::Frame EmojiGenerator::Impl::renderCurrent(
QImage storage,
QSize size,
Qt::AspectRatioMode mode) {
Expects(_current.frame != nullptr);
const auto frame = _current.frame.get();
const auto width = frame->width;
const auto height = frame->height;
if (!width || !height) {
LOG(("Webm Error: Bad frame size: %1x%2 ").arg(width).arg(height));
return {};
}
auto scaled = QSize(width, height).scaled(size, mode);
if (!scaled.isEmpty() && rotationSwapWidthHeight()) {
scaled.transpose();
}
if (!GoodStorageForFrame(storage, scaled)) {
storage = CreateFrameStorage(scaled);
}
const auto srcFormat = (frame->format == AV_PIX_FMT_NONE)
? _codec->pix_fmt
: frame->format;
const auto srcSize = QSize(frame->width, frame->height);
const auto dstFormat = AV_PIX_FMT_BGRA;
const auto dstSize = scaled;
const auto bgra = (srcFormat == AV_PIX_FMT_BGRA);
const auto withAlpha = bgra || (srcFormat == AV_PIX_FMT_YUVA420P);
const auto toPerLine = storage.bytesPerLine();
auto to = storage.bits();
if (srcSize == dstSize && bgra) {
const auto fromPerLine = frame->linesize[0];
const auto perLine = std::min(fromPerLine, toPerLine);
auto from = frame->data[0];
for (auto y = 0, height =srcSize.height(); y != height; ++y) {
memcpy(to, from, perLine);
from += fromPerLine;
to += toPerLine;
}
} else {
_scale = MakeSwscalePointer(
srcSize,
srcFormat,
dstSize,
dstFormat,
&_scale);
Assert(_scale != nullptr);
// AV_NUM_DATA_POINTERS defined in AVFrame struct
uint8_t *toData[AV_NUM_DATA_POINTERS] = { to, nullptr };
int toLinesize[AV_NUM_DATA_POINTERS] = { toPerLine, 0 };
sws_scale(
_scale.get(),
frame->data,
frame->linesize,
0,
frame->height,
toData,
toLinesize);
}
if (withAlpha) {
PremultiplyInplace(storage);
}
if (_rotation != 0) {
auto transform = QTransform();
transform.rotate(_rotation);
storage = storage.transformed(transform);
}
ClearFrameMemory(_current.frame.get());
const auto duration = _next.frame
? (_next.position - _current.position)
: _current.duration;
return {
.image = std::move(storage),
.duration = duration,
};
}
EmojiGenerator::Frame EmojiGenerator::Impl::renderNext(
QImage storage,
QSize size,
Qt::AspectRatioMode mode) {
if (!_current.frame) {
readNextFrame();
}
std::swap(_current, _next);
if (!_current.frame) {
return {};
}
readNextFrame();
return renderCurrent(std::move(storage), size, mode);
}
void EmojiGenerator::Impl::resolveNextFrameTiming() {
const auto base = _format->streams[_streamId]->time_base;
const auto duration = _next.frame->pkt_duration;
const auto framePts = _next.frame->pts;
auto framePosition = (framePts * 1000LL * base.num) / base.den;
_currentFrameDelay = _nextFrameDelay;
if (_framePosition + _currentFrameDelay < framePosition) {
_currentFrameDelay = int32(framePosition - _framePosition);
} else if (framePosition < _framePosition + _currentFrameDelay) {
framePosition = _framePosition + _currentFrameDelay;
}
if (duration == AV_NOPTS_VALUE) {
_nextFrameDelay = 0;
} else {
_nextFrameDelay = (duration * 1000LL * base.num) / base.den;
}
_framePosition = framePosition;
_next.position = _framePosition;
_next.duration = _nextFrameDelay;
}
void EmojiGenerator::Impl::readNextFrame() {
auto frame = _next.frame ? base::take(_next.frame) : MakeFramePointer();
while (true) {
auto result = avcodec_receive_frame(_codec.get(), frame.get());
if (result >= 0) {
if (frame->width * frame->height > kMaxArea) {
return;
}
_next.frame = std::move(frame);
resolveNextFrameTiming();
return;
}
if (result == AVERROR_EOF) {
return;
} else if (result != AVERROR(EAGAIN)) {
LOG(("Webm Error: Unable to avcodec_receive_frame(), ")
+ wrapError(result));
return;
}
auto packet = Packet();
auto finished = false;
do {
const auto result = av_read_frame(
_format.get(),
&packet.fields());
if (result == AVERROR_EOF) {
finished = true;
break;
} else if (result < 0) {
LOG(("Webm Error: Unable to av_read_frame(), ")
+ wrapError(result));
return;
}
} while (packet.fields().stream_index != _streamId);
if (finished) {
continue;
}
const auto native = &packet.fields();
const auto guard = gsl::finally([
&,
size = native->size,
data = native->data
] {
native->size = size;
native->data = data;
packet = Packet();
});
result = avcodec_send_packet(_codec.get(), native);
if (result < 0) {
LOG(("Webm Error: Unable to avcodec_send_packet(), ")
+ wrapError(result));
return;
}
}
}
QString EmojiGenerator::Impl::wrapError(int result) const {
auto error = std::array<char, AV_ERROR_MAX_STRING_SIZE>{};
return u"error %1, %2"_q
.arg(result)
.arg(av_make_error_string(error.data(), error.size(), result));
}
EmojiGenerator::EmojiGenerator(const QByteArray &bytes)
: _impl(std::make_unique<Impl>(bytes)) {
}
EmojiGenerator::~EmojiGenerator() = default;
int EmojiGenerator::count() {
return 0;
}
EmojiGenerator::Frame EmojiGenerator::renderNext(
QImage storage,
QSize size,
Qt::AspectRatioMode mode) {
return _impl->renderNext(std::move(storage), size, mode);
}
} // namespace FFmpeg

View file

@ -0,0 +1,34 @@
/*
This file is part of Telegram Desktop,
the official desktop application for the Telegram messaging service.
For license and copyright information please follow this link:
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
*/
#pragma once
#include "ui/effects/frame_generator.h"
#include <QtGui/QImage>
namespace FFmpeg {
class EmojiGenerator final : public Ui::FrameGenerator {
public:
explicit EmojiGenerator(const QByteArray &bytes);
~EmojiGenerator();
int count() override;
Frame renderNext(
QImage storage,
QSize size,
Qt::AspectRatioMode mode = Qt::IgnoreAspectRatio) override;
private:
class Impl;
std::unique_ptr<Impl> _impl;
};
} // namespace FFmpeg

View file

@ -270,6 +270,13 @@ void Renderer::frameReady(
}
const auto explicitRepaint = (_cache.frames() == _cache.currentFrame());
_cache.add(duration, frame);
if (explicitRepaint && _repaint) {
_repaint();
}
if (!duration) {
finish();
return;
}
const auto size = _cache.size();
const auto guard = base::make_weak(this);
crl::async([
@ -292,9 +299,6 @@ void Renderer::frameReady(
std::move(frame.image));
});
});
if (explicitRepaint && _repaint) {
_repaint();
}
}
void Renderer::finish() {

View file

@ -10,6 +10,8 @@ init_target(lib_ffmpeg)
nice_target_sources(lib_ffmpeg ${src_loc}
PRIVATE
ffmpeg/ffmpeg_emoji.cpp
ffmpeg/ffmpeg_emoji.h
ffmpeg/ffmpeg_utility.cpp
ffmpeg/ffmpeg_utility.h
)
@ -22,6 +24,7 @@ PUBLIC
target_link_libraries(lib_ffmpeg
PUBLIC
desktop-app::lib_base
desktop-app::lib_ui
desktop-app::external_ffmpeg
)

@ -1 +1 @@
Subproject commit 6ef5ec3410ef5db33cb6413a7f05cc87a2c970bd
Subproject commit cb5296a6b0e14a608cb97d3cafe8971ea25e7f56