To packetize the H.264 raw stream, av_parser_parse2() (called by av_read_frame()) knows that it has received a full frame only after it has received some data for the next frame. As a consequence, the client always waited until the next frame before sending the current frame to the decoder! On the device side, we know packets boundaries. To reduce latency, make the device always transmit the "frame meta" to packetize the stream manually (it was already implemented to send PTS, but only enabled on recording). On the client side, replace av_read_frame() by manual packetizing and parsing. <https://stackoverflow.com/questions/50682518/replacing-av-read-frame-to-reduce-delay> <https://trac.ffmpeg.org/ticket/3354>
42 lines
840 B
C
42 lines
840 B
C
#ifndef STREAM_H
|
|
#define STREAM_H
|
|
|
|
#include <stdbool.h>
|
|
#include <stdint.h>
|
|
#include <libavformat/avformat.h>
|
|
#include <SDL2/SDL_atomic.h>
|
|
#include <SDL2/SDL_thread.h>
|
|
|
|
#include "net.h"
|
|
|
|
struct video_buffer;
|
|
|
|
struct stream {
|
|
socket_t socket;
|
|
struct video_buffer *video_buffer;
|
|
SDL_Thread *thread;
|
|
struct decoder *decoder;
|
|
struct recorder *recorder;
|
|
AVCodecContext *codec_ctx;
|
|
AVCodecParserContext *parser;
|
|
// successive packets may need to be concatenated, until a non-config
|
|
// packet is available
|
|
bool has_pending;
|
|
AVPacket pending;
|
|
};
|
|
|
|
void
|
|
stream_init(struct stream *stream, socket_t socket,
|
|
struct decoder *decoder, struct recorder *recorder);
|
|
|
|
bool
|
|
stream_start(struct stream *stream);
|
|
|
|
void
|
|
stream_stop(struct stream *stream);
|
|
|
|
void
|
|
stream_join(struct stream *stream);
|
|
|
|
#endif
|