1
0
mirror of https://gitlab.com/OpenMW/openmw.git synced 2025-02-04 03:40:14 +00:00

Merge branch 'task/#6088_ffmpeg_api_deprecation_warnings' into 'master'

#6088 ffmpeg api deprecation warnings

Closes #6088

See merge request OpenMW/openmw!1504
This commit is contained in:
psi29a 2022-01-06 13:08:45 +00:00
commit 6a2cf2e358
2 changed files with 69 additions and 61 deletions

View File

@ -4,10 +4,12 @@
#include <cassert> #include <cassert>
#include <cstddef> #include <cstddef>
#include <iostream> #include <iostream>
#include <memory>
#include <thread> #include <thread>
#include <chrono> #include <chrono>
#include <osg/Texture2D> #include <osg/Texture2D>
#include <utility>
#if defined(_MSC_VER) #if defined(_MSC_VER)
#pragma warning (push) #pragma warning (push)
@ -62,20 +64,20 @@ namespace
av_frame_free(&frame); av_frame_free(&frame);
} }
}; };
template<class T>
struct AVFree
{
void operator()(T* frame) const
{
av_free(&frame);
}
};
} }
namespace Video namespace Video
{ {
struct PacketListFree
{
void operator()(Video::PacketList* list) const
{
av_packet_free(&list->pkt);
av_free(&list);
}
};
VideoState::VideoState() VideoState::VideoState()
: mAudioFactory(nullptr) : mAudioFactory(nullptr)
, format_ctx(nullptr) , format_ctx(nullptr)
@ -95,7 +97,7 @@ VideoState::VideoState()
{ {
mFlushPktData = flush_pkt.data; mFlushPktData = flush_pkt.data;
// This is not needed anymore above FFMpeg version 4.0 // This is not needed any more above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796 #if LIBAVCODEC_VERSION_INT < 3805796
av_register_all(); av_register_all();
#endif #endif
@ -114,25 +116,29 @@ void VideoState::setAudioFactory(MovieAudioFactory *factory)
void PacketQueue::put(AVPacket *pkt) void PacketQueue::put(AVPacket *pkt)
{ {
std::unique_ptr<AVPacketList, AVFree<AVPacketList>> pkt1(static_cast<AVPacketList*>(av_malloc(sizeof(AVPacketList)))); std::unique_ptr<PacketList, PacketListFree> pkt1(static_cast<PacketList*>(av_malloc(sizeof(PacketList))));
if(!pkt1) throw std::bad_alloc(); if(!pkt1) throw std::bad_alloc();
if(pkt == &flush_pkt) if(pkt == &flush_pkt)
pkt1->pkt = *pkt; pkt1->pkt = pkt;
else else
av_packet_move_ref(&pkt1->pkt, pkt); {
pkt1->pkt = av_packet_alloc();
av_packet_move_ref(pkt1->pkt, pkt);
}
pkt1->next = nullptr; pkt1->next = nullptr;
std::lock_guard<std::mutex> lock(this->mutex); std::lock_guard<std::mutex> lock(this->mutex);
AVPacketList* ptr = pkt1.release(); PacketList* ptr = pkt1.release();
if(!last_pkt) if(!last_pkt)
this->first_pkt = ptr; this->first_pkt = ptr;
else else
this->last_pkt->next = ptr; this->last_pkt->next = ptr;
this->last_pkt = ptr; this->last_pkt = ptr;
this->nb_packets++; this->nb_packets++;
this->size += ptr->pkt.size; this->size += ptr->pkt->size;
this->cond.notify_one(); this->cond.notify_one();
} }
@ -141,17 +147,17 @@ int PacketQueue::get(AVPacket *pkt, VideoState *is)
std::unique_lock<std::mutex> lock(this->mutex); std::unique_lock<std::mutex> lock(this->mutex);
while(!is->mQuit) while(!is->mQuit)
{ {
AVPacketList *pkt1 = this->first_pkt; PacketList *pkt1 = this->first_pkt;
if(pkt1) if(pkt1)
{ {
this->first_pkt = pkt1->next; this->first_pkt = pkt1->next;
if(!this->first_pkt) if(!this->first_pkt)
this->last_pkt = nullptr; this->last_pkt = nullptr;
this->nb_packets--; this->nb_packets--;
this->size -= pkt1->pkt.size; this->size -= pkt1->pkt->size;
av_packet_unref(pkt); av_packet_unref(pkt);
av_packet_move_ref(pkt, &pkt1->pkt); av_packet_move_ref(pkt, pkt1->pkt);
av_free(pkt1); av_free(pkt1);
return 1; return 1;
@ -173,14 +179,14 @@ void PacketQueue::flush()
void PacketQueue::clear() void PacketQueue::clear()
{ {
AVPacketList *pkt, *pkt1; PacketList *pkt, *pkt1;
std::lock_guard<std::mutex> lock(this->mutex); std::lock_guard<std::mutex> lock(this->mutex);
for(pkt = this->first_pkt; pkt != nullptr; pkt = pkt1) for(pkt = this->first_pkt; pkt != nullptr; pkt = pkt1)
{ {
pkt1 = pkt->next; pkt1 = pkt->next;
if (pkt->pkt.data != flush_pkt.data) if (pkt->pkt->data != flush_pkt.data)
av_packet_unref(&pkt->pkt); av_packet_unref(pkt->pkt);
av_freep(&pkt); av_freep(&pkt);
} }
this->last_pkt = nullptr; this->last_pkt = nullptr;
@ -304,7 +310,7 @@ void VideoState::video_refresh()
VideoPicture* vp = &this->pictq[this->pictq_rindex]; VideoPicture* vp = &this->pictq[this->pictq_rindex];
this->video_display(vp); this->video_display(vp);
this->pictq_rindex = (pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE; this->pictq_rindex = (pictq_rindex+1) % pictq.size();
this->frame_last_pts = vp->pts; this->frame_last_pts = vp->pts;
this->pictq_size--; this->pictq_size--;
this->pictq_cond.notify_one(); this->pictq_cond.notify_one();
@ -321,12 +327,12 @@ void VideoState::video_refresh()
for (; i<this->pictq_size-1; ++i) for (; i<this->pictq_size-1; ++i)
{ {
if (this->pictq[pictq_rindex].pts + threshold <= this->get_master_clock()) if (this->pictq[pictq_rindex].pts + threshold <= this->get_master_clock())
this->pictq_rindex = (this->pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE; // not enough time to show this picture this->pictq_rindex = (this->pictq_rindex+1) % pictq.size(); // not enough time to show this picture
else else
break; break;
} }
assert (this->pictq_rindex < VIDEO_PICTURE_ARRAY_SIZE); assert (this->pictq_rindex < pictq.size());
VideoPicture* vp = &this->pictq[this->pictq_rindex]; VideoPicture* vp = &this->pictq[this->pictq_rindex];
this->video_display(vp); this->video_display(vp);
@ -336,7 +342,7 @@ void VideoState::video_refresh()
this->pictq_size -= i; this->pictq_size -= i;
// update queue for next picture // update queue for next picture
this->pictq_size--; this->pictq_size--;
this->pictq_rindex = (this->pictq_rindex+1) % VIDEO_PICTURE_ARRAY_SIZE; this->pictq_rindex = (this->pictq_rindex+1) % pictq.size();
this->pictq_cond.notify_one(); this->pictq_cond.notify_one();
} }
} }
@ -386,7 +392,7 @@ int VideoState::queue_picture(const AVFrame &pFrame, double pts)
0, this->video_ctx->height, vp->rgbaFrame->data, vp->rgbaFrame->linesize); 0, this->video_ctx->height, vp->rgbaFrame->data, vp->rgbaFrame->linesize);
// now we inform our display thread that we have a pic ready // now we inform our display thread that we have a pic ready
this->pictq_windex = (this->pictq_windex+1) % VIDEO_PICTURE_ARRAY_SIZE; this->pictq_windex = (this->pictq_windex+1) % pictq.size();
this->pictq_size++; this->pictq_size++;
return 0; return 0;
@ -415,7 +421,7 @@ double VideoState::synchronize_video(const AVFrame &src_frame, double pts)
class VideoThread class VideoThread
{ {
public: public:
VideoThread(VideoState* self) explicit VideoThread(VideoState* self)
: mVideoState(self) : mVideoState(self)
, mThread([this] , mThread([this]
{ {
@ -439,9 +445,8 @@ public:
void run() void run()
{ {
VideoState* self = mVideoState; VideoState* self = mVideoState;
AVPacket packetData; AVPacket* packetData = av_packet_alloc();
av_init_packet(&packetData); std::unique_ptr<AVPacket, AVPacketUnref> packet(packetData);
std::unique_ptr<AVPacket, AVPacketUnref> packet(&packetData);
std::unique_ptr<AVFrame, AVFrameFree> pFrame{av_frame_alloc()}; std::unique_ptr<AVFrame, AVFrameFree> pFrame{av_frame_alloc()};
while(self->videoq.get(packet.get(), self) >= 0) while(self->videoq.get(packet.get(), self) >= 0)
@ -491,7 +496,7 @@ private:
class ParseThread class ParseThread
{ {
public: public:
ParseThread(VideoState* self) explicit ParseThread(VideoState* self)
: mVideoState(self) : mVideoState(self)
, mThread([this] { run(); }) , mThread([this] { run(); })
{ {
@ -507,9 +512,8 @@ public:
VideoState* self = mVideoState; VideoState* self = mVideoState;
AVFormatContext *pFormatCtx = self->format_ctx; AVFormatContext *pFormatCtx = self->format_ctx;
AVPacket packetData; AVPacket* packetData = av_packet_alloc();
av_init_packet(&packetData); std::unique_ptr<AVPacket, AVPacketUnref> packet(packetData);
std::unique_ptr<AVPacket, AVPacketUnref> packet(&packetData);
try try
{ {
@ -632,13 +636,11 @@ bool VideoState::update()
int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx) int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
{ {
const AVCodec *codec;
if(stream_index < 0 || stream_index >= static_cast<int>(pFormatCtx->nb_streams)) if(stream_index < 0 || stream_index >= static_cast<int>(pFormatCtx->nb_streams))
return -1; return -1;
// Get a pointer to the codec context for the video stream // Get a pointer to the codec context for the video stream
codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codecpar->codec_id); const AVCodec *codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codecpar->codec_id);
if(!codec) if(!codec)
{ {
fprintf(stderr, "Unsupported codec!\n"); fprintf(stderr, "Unsupported codec!\n");
@ -654,7 +656,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
this->audio_ctx = avcodec_alloc_context3(codec); this->audio_ctx = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(this->audio_ctx, pFormatCtx->streams[stream_index]->codecpar); avcodec_parameters_to_context(this->audio_ctx, pFormatCtx->streams[stream_index]->codecpar);
// This is not needed anymore above FFMpeg version 4.0 // This is not needed any more above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796 #if LIBAVCODEC_VERSION_INT < 3805796
av_codec_set_pkt_timebase(this->audio_ctx, pFormatCtx->streams[stream_index]->time_base); av_codec_set_pkt_timebase(this->audio_ctx, pFormatCtx->streams[stream_index]->time_base);
#endif #endif
@ -674,7 +676,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
} }
mAudioDecoder = mAudioFactory->createDecoder(this); mAudioDecoder = mAudioFactory->createDecoder(this);
if (!mAudioDecoder.get()) if (!mAudioDecoder)
{ {
std::cerr << "Failed to create audio decoder, can not play audio stream" << std::endl; std::cerr << "Failed to create audio decoder, can not play audio stream" << std::endl;
avcodec_free_context(&this->audio_ctx); avcodec_free_context(&this->audio_ctx);
@ -691,7 +693,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
this->video_ctx = avcodec_alloc_context3(codec); this->video_ctx = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(this->video_ctx, pFormatCtx->streams[stream_index]->codecpar); avcodec_parameters_to_context(this->video_ctx, pFormatCtx->streams[stream_index]->codecpar);
// This is not needed anymore above FFMpeg version 4.0 // This is not needed any more above FFMpeg version 4.0
#if LIBAVCODEC_VERSION_INT < 3805796 #if LIBAVCODEC_VERSION_INT < 3805796
av_codec_set_pkt_timebase(this->video_ctx, pFormatCtx->streams[stream_index]->time_base); av_codec_set_pkt_timebase(this->video_ctx, pFormatCtx->streams[stream_index]->time_base);
#endif #endif
@ -702,7 +704,7 @@ int VideoState::stream_open(int stream_index, AVFormatContext *pFormatCtx)
return -1; return -1;
} }
this->video_thread.reset(new VideoThread(this)); this->video_thread = std::make_unique<VideoThread>(this);
break; break;
default: default:
@ -721,8 +723,8 @@ void VideoState::init(std::shared_ptr<std::istream> inputstream, const std::stri
this->av_sync_type = AV_SYNC_DEFAULT; this->av_sync_type = AV_SYNC_DEFAULT;
this->mQuit = false; this->mQuit = false;
this->stream = inputstream; this->stream = std::move(inputstream);
if(!this->stream.get()) if(!this->stream)
throw std::runtime_error("Failed to open video resource"); throw std::runtime_error("Failed to open video resource");
AVIOContext *ioCtx = avio_alloc_context(nullptr, 0, 0, this, istream_read, istream_write, istream_seek); AVIOContext *ioCtx = avio_alloc_context(nullptr, 0, 0, this, istream_read, istream_write, istream_seek);
@ -789,7 +791,7 @@ void VideoState::init(std::shared_ptr<std::istream> inputstream, const std::stri
} }
this->parse_thread.reset(new ParseThread(this)); this->parse_thread = std::make_unique<ParseThread>(this);
} }
void VideoState::deinit() void VideoState::deinit()
@ -801,11 +803,11 @@ void VideoState::deinit()
mAudioDecoder.reset(); mAudioDecoder.reset();
if (this->parse_thread.get()) if (this->parse_thread)
{ {
this->parse_thread.reset(); this->parse_thread.reset();
} }
if (this->video_thread.get()) if (this->video_thread)
{ {
this->video_thread.reset(); this->video_thread.reset();
} }
@ -850,9 +852,9 @@ void VideoState::deinit()
mTexture = nullptr; mTexture = nullptr;
} }
// Dellocate RGBA frame queue. // Deallocate RGBA frame queue.
for (std::size_t i = 0; i < VIDEO_PICTURE_ARRAY_SIZE; ++i) for (auto & i : this->pictq)
this->pictq[i].rgbaFrame = nullptr; i.rgbaFrame = nullptr;
} }
@ -870,14 +872,14 @@ double VideoState::get_master_clock()
return this->get_external_clock(); return this->get_external_clock();
} }
double VideoState::get_video_clock() double VideoState::get_video_clock() const
{ {
return this->frame_last_pts; return this->frame_last_pts;
} }
double VideoState::get_audio_clock() double VideoState::get_audio_clock()
{ {
if (!mAudioDecoder.get()) if (!mAudioDecoder)
return 0.0; return 0.0;
return mAudioDecoder->getAudioClock(); return mAudioDecoder->getAudioClock();
} }
@ -896,7 +898,7 @@ void VideoState::seekTo(double time)
mSeekRequested = true; mSeekRequested = true;
} }
double VideoState::getDuration() double VideoState::getDuration() const
{ {
return this->format_ctx->duration / 1000000.0; return this->format_ctx->duration / 1000000.0;
} }

View File

@ -1,8 +1,9 @@
#ifndef VIDEOPLAYER_VIDEOSTATE_H #ifndef VIDEOPLAYER_VIDEOSTATE_H
#define VIDEOPLAYER_VIDEOSTATE_H #define VIDEOPLAYER_VIDEOSTATE_H
#include <stdint.h> #include <cstdint>
#include <atomic> #include <atomic>
#include <array>
#include <vector> #include <vector>
#include <memory> #include <memory>
#include <string> #include <string>
@ -40,13 +41,10 @@ extern "C"
#include "videodefs.hpp" #include "videodefs.hpp"
#define VIDEO_PICTURE_QUEUE_SIZE 50 #define VIDEO_PICTURE_QUEUE_SIZE 50
// allocate one extra to make sure we do not overwrite the osg::Image currently set on the texture
#define VIDEO_PICTURE_ARRAY_SIZE (VIDEO_PICTURE_QUEUE_SIZE+1)
extern "C" extern "C"
{ {
struct SwsContext; struct SwsContext;
struct AVPacketList;
struct AVPacket; struct AVPacket;
struct AVFormatContext; struct AVFormatContext;
struct AVStream; struct AVStream;
@ -78,6 +76,13 @@ struct ExternalClock
void set(uint64_t time); void set(uint64_t time);
}; };
class PacketList
{
public:
AVPacket* pkt = nullptr;
PacketList *next = nullptr;
};
struct PacketQueue { struct PacketQueue {
PacketQueue() PacketQueue()
: first_pkt(nullptr), last_pkt(nullptr), flushing(false), nb_packets(0), size(0) : first_pkt(nullptr), last_pkt(nullptr), flushing(false), nb_packets(0), size(0)
@ -85,7 +90,7 @@ struct PacketQueue {
~PacketQueue() ~PacketQueue()
{ clear(); } { clear(); }
AVPacketList *first_pkt, *last_pkt; PacketList *first_pkt, *last_pkt;
std::atomic<bool> flushing; std::atomic<bool> flushing;
std::atomic<int> nb_packets; std::atomic<int> nb_packets;
std::atomic<int> size; std::atomic<int> size;
@ -129,7 +134,7 @@ struct VideoState {
void setPaused(bool isPaused); void setPaused(bool isPaused);
void seekTo(double time); void seekTo(double time);
double getDuration(); double getDuration() const;
int stream_open(int stream_index, AVFormatContext *pFormatCtx); int stream_open(int stream_index, AVFormatContext *pFormatCtx);
@ -145,7 +150,7 @@ struct VideoState {
double synchronize_video(const AVFrame &src_frame, double pts); double synchronize_video(const AVFrame &src_frame, double pts);
double get_audio_clock(); double get_audio_clock();
double get_video_clock(); double get_video_clock() const;
double get_external_clock(); double get_external_clock();
double get_master_clock(); double get_master_clock();
@ -178,8 +183,9 @@ struct VideoState {
PacketQueue videoq; PacketQueue videoq;
SwsContext* sws_context; SwsContext* sws_context;
int sws_context_w, sws_context_h; int sws_context_w, sws_context_h;
VideoPicture pictq[VIDEO_PICTURE_ARRAY_SIZE]; std::array<VideoPicture, VIDEO_PICTURE_QUEUE_SIZE+1> pictq; // allocate one extra to make sure we do not overwrite the osg::Image currently set on the texture
int pictq_size, pictq_rindex, pictq_windex; int pictq_size;
unsigned long pictq_rindex, pictq_windex;
std::mutex pictq_mutex; std::mutex pictq_mutex;
std::condition_variable pictq_cond; std::condition_variable pictq_cond;