41 #define ENABLE_VAAPI 0
44 #define MAX_SUPPORTED_WIDTH 1950
45 #define MAX_SUPPORTED_HEIGHT 1100
48 #include "libavutil/hwcontext_vaapi.h"
50 typedef struct VAAPIDecodeContext {
52 VAEntrypoint va_entrypoint;
54 VAContextID va_context;
56 #if FF_API_STRUCT_VAAPI_CONTEXT
59 struct vaapi_context *old_context;
60 AVBufferRef *device_ref;
64 AVHWDeviceContext *device;
65 AVVAAPIDeviceContext *hwctx;
67 AVHWFramesContext *frames;
68 AVVAAPIFramesContext *hwfc;
70 enum AVPixelFormat surface_format;
86 : last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0),
87 audio_pts_offset(99999), video_pts_offset(99999),
path(
path), is_video_seek(true), check_interlace(false),
88 check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
89 prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
90 current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
103 if (inspect_reader) {
125 if (abs(diff) <= amount)
136 static enum AVPixelFormat get_hw_dec_format(AVCodecContext *ctx,
const enum AVPixelFormat *pix_fmts)
138 const enum AVPixelFormat *p;
140 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
142 #if defined(__linux__)
144 case AV_PIX_FMT_VAAPI:
149 case AV_PIX_FMT_VDPAU:
157 case AV_PIX_FMT_DXVA2_VLD:
162 case AV_PIX_FMT_D3D11:
168 #if defined(__APPLE__)
170 case AV_PIX_FMT_VIDEOTOOLBOX:
177 case AV_PIX_FMT_CUDA:
193 return AV_PIX_FMT_NONE;
196 int FFmpegReader::IsHardwareDecodeSupported(
int codecid)
200 case AV_CODEC_ID_H264:
201 case AV_CODEC_ID_MPEG2VIDEO:
202 case AV_CODEC_ID_VC1:
203 case AV_CODEC_ID_WMV1:
204 case AV_CODEC_ID_WMV2:
205 case AV_CODEC_ID_WMV3:
226 if (avformat_open_input(&pFormatCtx,
path.c_str(), NULL, NULL) != 0)
230 if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
236 for (
unsigned int i = 0; i < pFormatCtx->nb_streams; i++) {
238 if (
AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
242 if (
AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
246 if (videoStream == -1 && audioStream == -1)
250 if (videoStream != -1) {
255 pStream = pFormatCtx->streams[videoStream];
261 AVCodec *pCodec = avcodec_find_decoder(codecId);
262 AVDictionary *
opts = NULL;
263 int retry_decode_open = 2;
268 if (
hw_de_on && (retry_decode_open==2)) {
270 hw_de_supported = IsHardwareDecodeSupported(pCodecCtx->codec_id);
273 retry_decode_open = 0;
278 if (pCodec == NULL) {
279 throw InvalidCodec(
"A valid video codec could not be found for this file.",
path);
283 av_dict_set(&
opts,
"strict",
"experimental", 0);
287 int i_decoder_hw = 0;
289 char *adapter_ptr = NULL;
292 fprintf(stderr,
"Hardware decoding device number: %d\n", adapter_num);
295 pCodecCtx->get_format = get_hw_dec_format;
297 if (adapter_num < 3 && adapter_num >=0) {
298 #if defined(__linux__)
299 snprintf(adapter,
sizeof(adapter),
"/dev/dri/renderD%d", adapter_num+128);
300 adapter_ptr = adapter;
302 switch (i_decoder_hw) {
304 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
307 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
310 hw_de_av_device_type = AV_HWDEVICE_TYPE_VDPAU;
313 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
316 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
320 #elif defined(_WIN32)
323 switch (i_decoder_hw) {
325 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
328 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
331 hw_de_av_device_type = AV_HWDEVICE_TYPE_D3D11VA;
334 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
337 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
340 #elif defined(__APPLE__)
343 switch (i_decoder_hw) {
345 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
348 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
351 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
361 #if defined(__linux__)
362 if( adapter_ptr != NULL && access( adapter_ptr, W_OK ) == 0 ) {
363 #elif defined(_WIN32)
364 if( adapter_ptr != NULL ) {
365 #elif defined(__APPLE__)
366 if( adapter_ptr != NULL ) {
375 hw_device_ctx = NULL;
377 if (av_hwdevice_ctx_create(&hw_device_ctx, hw_de_av_device_type, adapter_ptr, NULL, 0) >= 0) {
378 if (!(pCodecCtx->hw_device_ctx = av_buffer_ref(hw_device_ctx))) {
420 pCodecCtx->thread_type &= ~FF_THREAD_FRAME;
424 if (avcodec_open2(pCodecCtx, pCodec, &
opts) < 0)
425 throw InvalidCodec(
"A video codec was found, but could not be opened.",
path);
429 AVHWFramesConstraints *constraints = NULL;
430 void *hwconfig = NULL;
431 hwconfig = av_hwdevice_hwconfig_alloc(hw_device_ctx);
435 ((AVVAAPIHWConfig *)hwconfig)->config_id = ((VAAPIDecodeContext *)(pCodecCtx->priv_data))->va_config;
436 constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx,hwconfig);
439 if (pCodecCtx->coded_width < constraints->min_width ||
440 pCodecCtx->coded_height < constraints->min_height ||
441 pCodecCtx->coded_width > constraints->max_width ||
442 pCodecCtx->coded_height > constraints->max_height) {
445 retry_decode_open = 1;
448 av_buffer_unref(&hw_device_ctx);
449 hw_device_ctx = NULL;
454 ZmqLogger::Instance()->
AppendDebugMethod(
"\nDecode hardware acceleration is used\n",
"Min width :", constraints->min_width,
"Min Height :", constraints->min_height,
"MaxWidth :", constraints->max_width,
"MaxHeight :", constraints->max_height,
"Frame width :", pCodecCtx->coded_width,
"Frame height :", pCodecCtx->coded_height);
455 retry_decode_open = 0;
457 av_hwframe_constraints_free(&constraints);
470 if (pCodecCtx->coded_width < 0 ||
471 pCodecCtx->coded_height < 0 ||
472 pCodecCtx->coded_width > max_w ||
473 pCodecCtx->coded_height > max_h ) {
474 ZmqLogger::Instance()->
AppendDebugMethod(
"DIMENSIONS ARE TOO LARGE for hardware acceleration\n",
"Max Width :", max_w,
"Max Height :", max_h,
"Frame width :", pCodecCtx->coded_width,
"Frame height :", pCodecCtx->coded_height);
476 retry_decode_open = 1;
479 av_buffer_unref(&hw_device_ctx);
480 hw_device_ctx = NULL;
484 ZmqLogger::Instance()->
AppendDebugMethod(
"\nDecode hardware acceleration is used\n",
"Max Width :", max_w,
"Max Height :", max_h,
"Frame width :", pCodecCtx->coded_width,
"Frame height :", pCodecCtx->coded_height);
485 retry_decode_open = 0;
493 retry_decode_open = 0;
495 }
while (retry_decode_open);
504 if (audioStream != -1) {
509 aStream = pFormatCtx->streams[audioStream];
515 AVCodec *aCodec = avcodec_find_decoder(codecId);
521 if (aCodec == NULL) {
522 throw InvalidCodec(
"A valid audio codec could not be found for this file.",
path);
526 AVDictionary *
opts = NULL;
527 av_dict_set(&
opts,
"strict",
"experimental", 0);
530 if (avcodec_open2(aCodecCtx, aCodec, &
opts) < 0)
531 throw InvalidCodec(
"An audio codec was found, but could not be opened.",
path);
541 AVDictionaryEntry *tag = NULL;
542 while ((tag = av_dict_get(pFormatCtx->metadata,
"", tag, AV_DICT_IGNORE_SUFFIX))) {
543 QString str_key = tag->key;
544 QString str_value = tag->value;
545 info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
549 previous_packet_location.
frame = -1;
572 RemoveAVPacket(packet);
578 avcodec_flush_buffers(pCodecCtx);
583 av_buffer_unref(&hw_device_ctx);
584 hw_device_ctx = NULL;
590 avcodec_flush_buffers(aCodecCtx);
596 working_cache.
Clear();
597 missing_frames.
Clear();
602 processed_video_frames.clear();
603 processed_audio_frames.clear();
604 processing_video_frames.clear();
605 processing_audio_frames.clear();
606 missing_audio_frames.clear();
607 missing_video_frames.clear();
608 missing_audio_frames_source.clear();
609 missing_video_frames_source.clear();
610 checked_frames.clear();
614 avformat_close_input(&pFormatCtx);
615 av_freep(&pFormatCtx);
619 largest_frame_processed = 0;
620 seek_audio_frame_found = 0;
621 seek_video_frame_found = 0;
622 current_video_frame = 0;
623 has_missing_frames =
false;
625 last_video_frame.reset();
629 bool FFmpegReader::HasAlbumArt() {
633 return pFormatCtx && videoStream >= 0 && pFormatCtx->streams[videoStream]
634 && (pFormatCtx->streams[videoStream]->disposition & AV_DISPOSITION_ATTACHED_PIC);
637 void FFmpegReader::UpdateAudioInfo() {
640 info.
file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
654 if (aStream->duration > 0.0f && aStream->duration >
info.
duration)
681 AVDictionaryEntry *tag = NULL;
682 while ((tag = av_dict_get(aStream->metadata,
"", tag, AV_DICT_IGNORE_SUFFIX))) {
683 QString str_key = tag->key;
684 QString str_value = tag->value;
685 info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
689 void FFmpegReader::UpdateVideoInfo() {
696 info.
file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
703 AVRational framerate = av_guess_frame_rate(pFormatCtx, pStream, NULL);
713 if (pStream->sample_aspect_ratio.num != 0) {
736 if (!check_interlace) {
737 check_interlace =
true;
739 switch(field_order) {
740 case AV_FIELD_PROGRESSIVE:
753 case AV_FIELD_UNKNOWN:
755 check_interlace =
false;
772 info.
duration = float(pFormatCtx->duration) / AV_TIME_BASE;
784 is_duration_known =
false;
787 is_duration_known =
true;
801 AVDictionaryEntry *tag = NULL;
802 while ((tag = av_dict_get(pStream->metadata,
"", tag, AV_DICT_IGNORE_SUFFIX))) {
803 QString str_key = tag->key;
804 QString str_value = tag->value;
805 info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
810 return this->is_duration_known;
816 throw ReaderClosed(
"The FFmpegReader is closed. Call Open() before calling this method.",
path);
819 if (requested_frame < 1)
825 throw InvalidFile(
"Could not detect the duration of the video or audio stream.",
path);
852 if (last_frame == 0 && requested_frame != 1)
857 int64_t diff = requested_frame - last_frame;
858 if (diff >= 1 && diff <= 20) {
860 frame = ReadStream(requested_frame);
865 Seek(requested_frame);
874 frame = ReadStream(requested_frame);
882 std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame) {
884 bool end_of_stream =
false;
885 bool check_seek =
false;
886 bool frame_finished =
false;
887 int packet_error = -1;
890 int packets_processed = 0;
891 int minimum_packets = 1;
892 int max_packets = 4096;
900 packet_error = GetNextPacket();
902 int processing_video_frames_size = 0;
903 int processing_audio_frames_size = 0;
906 processing_video_frames_size = processing_video_frames.size();
907 processing_audio_frames_size = processing_audio_frames.size();
911 while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) {
912 std::this_thread::sleep_for(std::chrono::milliseconds(3));
914 processing_video_frames_size = processing_video_frames.size();
915 processing_audio_frames_size = processing_audio_frames.size();
919 if (packet_error < 0) {
921 end_of_stream =
true;
926 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ReadStream (GetNextPacket)",
"requested_frame", requested_frame,
"processing_video_frames_size", processing_video_frames_size,
"processing_audio_frames_size", processing_audio_frames_size,
"minimum_packets", minimum_packets,
"packets_processed", packets_processed,
"is_seeking", is_seeking);
931 num_packets_since_video_frame = 0;
935 check_seek = CheckSeek(
true);
952 frame_finished = GetAVFrame();
955 if (frame_finished) {
957 UpdatePTSOffset(
true);
960 ProcessVideoPacket(requested_frame);
965 else if (
info.
has_audio && packet->stream_index == audioStream) {
967 num_packets_since_video_frame++;
971 check_seek = CheckSeek(
false);
988 UpdatePTSOffset(
false);
1000 CheckWorkingFrames(
false, requested_frame);
1007 packets_processed++;
1010 if ((is_cache_found && packets_processed >= minimum_packets) || packets_processed > max_packets)
1016 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ReadStream (Completed)",
"packets_processed", packets_processed,
"end_of_stream", end_of_stream,
"largest_frame_processed", largest_frame_processed,
"Working Cache Count", working_cache.
Count());
1021 CheckWorkingFrames(end_of_stream, requested_frame);
1037 std::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
1046 int FFmpegReader::GetNextPacket() {
1047 int found_packet = 0;
1048 AVPacket *next_packet;
1049 next_packet =
new AVPacket();
1050 found_packet = av_read_frame(pFormatCtx, next_packet);
1054 RemoveAVPacket(packet);
1057 if (found_packet >= 0) {
1059 packet = next_packet;
1064 return found_packet;
1068 bool FFmpegReader::GetAVFrame() {
1069 int frameFinished = -1;
1077 ret = avcodec_send_packet(pCodecCtx, packet);
1084 if (ret < 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1088 AVFrame *next_frame2;
1096 next_frame2 = next_frame;
1100 ret = avcodec_receive_frame(pCodecCtx, next_frame2);
1101 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1110 if (next_frame2->format == hw_de_av_pix_fmt) {
1111 next_frame->format = AV_PIX_FMT_YUV420P;
1112 if ((err = av_hwframe_transfer_data(next_frame,next_frame2,0)) < 0) {
1115 if ((err = av_frame_copy_props(next_frame,next_frame2)) < 0) {
1123 next_frame = next_frame2;
1128 if (frameFinished == 0 ) {
1130 av_image_alloc(pFrame->data, pFrame->linesize,
info.
width,
info.
height, (AVPixelFormat)(pStream->codecpar->format), 1);
1131 av_image_copy(pFrame->data, pFrame->linesize, (
const uint8_t**)next_frame->data, next_frame->linesize,
1142 avcodec_decode_video2(pCodecCtx, next_frame, &frameFinished, packet);
1148 if (frameFinished) {
1152 av_picture_copy((AVPicture *) pFrame, (AVPicture *) next_frame, pCodecCtx->pix_fmt,
info.
width,
1161 return frameFinished;
1165 bool FFmpegReader::CheckSeek(
bool is_video) {
1170 if ((is_video_seek && !seek_video_frame_found) || (!is_video_seek && !seek_audio_frame_found))
1178 int64_t max_seeked_frame = seek_audio_frame_found;
1179 if (seek_video_frame_found > max_seeked_frame)
1180 max_seeked_frame = seek_video_frame_found;
1183 if (max_seeked_frame >= seeking_frame) {
1185 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckSeek (Too far, seek again)",
"is_video_seek", is_video_seek,
"max_seeked_frame", max_seeked_frame,
"seeking_frame", seeking_frame,
"seeking_pts", seeking_pts,
"seek_video_frame_found", seek_video_frame_found,
"seek_audio_frame_found", seek_audio_frame_found);
1188 Seek(seeking_frame - (10 * seek_count * seek_count));
1191 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckSeek (Successful)",
"is_video_seek", is_video_seek,
"current_pts", packet->pts,
"seeking_pts", seeking_pts,
"seeking_frame", seeking_frame,
"seek_video_frame_found", seek_video_frame_found,
"seek_audio_frame_found", seek_audio_frame_found);
1205 void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
1207 int64_t current_frame = ConvertVideoPTStoFrame(GetVideoPTS());
1210 if (!seek_video_frame_found && is_seeking)
1211 seek_video_frame_found = current_frame;
1214 if ((current_frame < (requested_frame - 20)) or (current_frame == -1)) {
1216 RemoveAVFrame(pFrame);
1233 AVFrame *my_frame = pFrame;
1238 processing_video_frames[current_frame] = current_frame;
1241 AVFrame *pFrameRGB =
nullptr;
1242 uint8_t *buffer =
nullptr;
1246 if (pFrameRGB ==
nullptr)
1268 max_width = std::max(
float(max_width), max_width * max_scale_x);
1269 max_height = std::max(
float(max_height), max_height * max_scale_y);
1275 QSize width_size(max_width * max_scale_x,
1278 max_height * max_scale_y);
1280 if (width_size.width() >= max_width && width_size.height() >= max_height) {
1281 max_width = std::max(max_width, width_size.width());
1282 max_height = std::max(max_height, width_size.height());
1284 max_width = std::max(max_width, height_size.width());
1285 max_height = std::max(max_height, height_size.height());
1292 float preview_ratio = 1.0;
1299 max_width =
info.
width * max_scale_x * preview_ratio;
1300 max_height =
info.
height * max_scale_y * preview_ratio;
1305 int original_height = height;
1306 if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) {
1308 float ratio = float(width) / float(height);
1309 int possible_width = round(max_height * ratio);
1310 int possible_height = round(max_width / ratio);
1312 if (possible_width <= max_width) {
1314 width = possible_width;
1315 height = max_height;
1319 height = possible_height;
1324 const int bytes_per_pixel = 4;
1325 int buffer_size = width * height * bytes_per_pixel;
1326 buffer =
new unsigned char[buffer_size]();
1331 int scale_mode = SWS_FAST_BILINEAR;
1333 scale_mode = SWS_BICUBIC;
1339 sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0,
1340 original_height, pFrameRGB->data, pFrameRGB->linesize);
1343 std::shared_ptr<Frame> f = CreateFrame(current_frame);
1348 f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888_Premultiplied, buffer);
1351 f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888, buffer);
1355 working_cache.
Add(f);
1358 last_video_frame = f;
1364 RemoveAVFrame(my_frame);
1365 sws_freeContext(img_convert_ctx);
1370 processing_video_frames.erase(current_frame);
1371 processed_video_frames[current_frame] = current_frame;
1375 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessVideoPacket (After)",
"requested_frame", requested_frame,
"current_frame", current_frame,
"f->number", f->number);
1379 void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_frame,
int starting_sample) {
1381 if (!seek_audio_frame_found && is_seeking)
1382 seek_audio_frame_found = target_frame;
1385 if (target_frame < (requested_frame - 20)) {
1387 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Skipped)",
"requested_frame", requested_frame,
"target_frame", target_frame,
"starting_sample", starting_sample);
1394 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Before)",
"requested_frame", requested_frame,
"target_frame", target_frame,
"starting_sample", starting_sample);
1397 int frame_finished = 0;
1401 int packet_samples = 0;
1407 while((packet->size > 0 || (!packet->data && frame_finished)) && ret >= 0) {
1409 ret = avcodec_send_packet(aCodecCtx, packet);
1410 if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) {
1411 avcodec_send_packet(aCodecCtx, NULL);
1416 ret = avcodec_receive_frame(aCodecCtx, audio_frame);
1419 if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) {
1420 avcodec_flush_buffers(aCodecCtx);
1424 ret = frame_finished;
1427 if (!packet->data && !frame_finished)
1432 int used = avcodec_decode_audio4(aCodecCtx, audio_frame, &frame_finished, packet);
1435 if (frame_finished) {
1438 int plane_size = -1;
1439 data_size = av_samples_get_buffer_size(&plane_size,
1441 audio_frame->nb_samples,
1449 int pts_remaining_samples = packet_samples /
info.
channels;
1452 int64_t adjusted_pts = packet->pts + audio_pts_offset;
1457 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Decode Info A)",
"pts_counter", pts_counter,
"PTS", adjusted_pts,
"Offset", audio_pts_offset,
"PTS Diff", adjusted_pts - prev_pts,
"Samples", pts_remaining_samples,
"Sample PTS ratio",
float(adjusted_pts - prev_pts) / pts_remaining_samples);
1458 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Decode Info B)",
"Sample Diff", pts_remaining_samples - prev_samples - prev_pts,
"Total", pts_total,
"PTS Seconds", audio_seconds,
"Sample Seconds", sample_seconds,
"Seconds Diff", audio_seconds - sample_seconds,
"raw samples", packet_samples);
1461 prev_pts = adjusted_pts;
1462 pts_total += pts_remaining_samples;
1464 prev_samples = pts_remaining_samples;
1469 processing_audio_frames.insert(std::pair<int, int>(previous_packet_location.
frame, previous_packet_location.
frame));
1472 while (pts_remaining_samples) {
1477 int samples = samples_per_frame - previous_packet_location.
sample_start;
1478 if (samples > pts_remaining_samples)
1479 samples = pts_remaining_samples;
1482 pts_remaining_samples -= samples;
1484 if (pts_remaining_samples > 0) {
1486 previous_packet_location.
frame++;
1492 processing_audio_frames.insert(std::pair<int, int>(previous_packet_location.
frame, previous_packet_location.
frame));
1505 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (ReSample)",
"packet_samples", packet_samples,
"info.channels",
info.
channels,
"info.sample_rate",
info.
sample_rate,
"aCodecCtx->sample_fmt",
AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx),
"AV_SAMPLE_FMT_S16", AV_SAMPLE_FMT_S16);
1510 audio_converted->nb_samples = audio_frame->nb_samples;
1511 av_samples_alloc(audio_converted->data, audio_converted->linesize,
info.
channels, audio_frame->nb_samples, AV_SAMPLE_FMT_S16, 0);
1521 av_opt_set_int(avr,
"out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
1530 audio_converted->data,
1531 audio_converted->linesize[0],
1532 audio_converted->nb_samples,
1534 audio_frame->linesize[0],
1535 audio_frame->nb_samples);
1538 memcpy(audio_buf, audio_converted->data[0], audio_converted->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) *
info.
channels);
1546 av_free(audio_converted->data[0]);
1549 int64_t starting_frame_number = -1;
1550 bool partial_frame =
true;
1551 for (
int channel_filter = 0; channel_filter <
info.
channels; channel_filter++) {
1553 starting_frame_number = target_frame;
1554 int channel_buffer_size = packet_samples /
info.
channels;
1555 float *channel_buffer =
new float[channel_buffer_size];
1558 for (
int z = 0; z < channel_buffer_size; z++)
1559 channel_buffer[z] = 0.0f;
1565 for (
int sample = 0; sample < packet_samples; sample++) {
1567 if (channel_filter == channel) {
1569 channel_buffer[position] = audio_buf[sample] * (1.0f / (1 << 15));
1585 int start = starting_sample;
1586 int remaining_samples = channel_buffer_size;
1587 float *iterate_channel_buffer = channel_buffer;
1588 while (remaining_samples > 0) {
1593 int samples = samples_per_frame - start;
1594 if (samples > remaining_samples)
1595 samples = remaining_samples;
1598 std::shared_ptr<Frame> f = CreateFrame(starting_frame_number);
1601 if (samples_per_frame == start + samples)
1602 partial_frame =
false;
1604 partial_frame =
true;
1607 f->AddAudio(
true, channel_filter, start, iterate_channel_buffer, samples, 1.0f);
1610 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (f->AddAudio)",
"frame", starting_frame_number,
"start", start,
"samples", samples,
"channel", channel_filter,
"partial_frame", partial_frame,
"samples_per_frame", samples_per_frame);
1613 working_cache.
Add(f);
1616 remaining_samples -= samples;
1619 if (remaining_samples > 0)
1620 iterate_channel_buffer += samples;
1623 starting_frame_number++;
1630 delete[] channel_buffer;
1631 channel_buffer = NULL;
1632 iterate_channel_buffer = NULL;
1643 for (int64_t f = target_frame; f < starting_frame_number; f++) {
1647 processing_audio_frames.erase(processing_audio_frames.find(f));
1650 if (processing_audio_frames.count(f) == 0)
1652 processed_audio_frames[f] = f;
1655 if (target_frame == starting_frame_number) {
1657 processing_audio_frames.erase(processing_audio_frames.find(target_frame));
1665 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (After)",
"requested_frame", requested_frame,
"starting_frame", target_frame,
"end_frame", starting_frame_number - 1);
1671 void FFmpegReader::Seek(int64_t requested_frame) {
1673 if (requested_frame < 1)
1674 requested_frame = 1;
1678 int processing_video_frames_size = 0;
1679 int processing_audio_frames_size = 0;
1682 processing_video_frames_size = processing_video_frames.size();
1683 processing_audio_frames_size = processing_audio_frames.size();
1687 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::Seek",
"requested_frame", requested_frame,
"seek_count", seek_count,
"last_frame", last_frame,
"processing_video_frames_size", processing_video_frames_size,
"processing_audio_frames_size", processing_audio_frames_size,
"video_pts_offset", video_pts_offset);
1690 while (processing_video_frames_size + processing_audio_frames_size > 0) {
1691 std::this_thread::sleep_for(std::chrono::milliseconds(3));
1693 processing_video_frames_size = processing_video_frames.size();
1694 processing_audio_frames_size = processing_audio_frames.size();
1698 working_cache.
Clear();
1699 missing_frames.
Clear();
1704 processing_audio_frames.clear();
1705 processing_video_frames.clear();
1706 processed_video_frames.clear();
1707 processed_audio_frames.clear();
1708 missing_audio_frames.clear();
1709 missing_video_frames.clear();
1710 missing_audio_frames_source.clear();
1711 missing_video_frames_source.clear();
1712 checked_frames.clear();
1717 current_video_frame = 0;
1718 largest_frame_processed = 0;
1719 num_checks_since_final = 0;
1720 num_packets_since_video_frame = 0;
1721 has_missing_frames =
false;
1729 int buffer_amount = std::max(max_concurrent_frames, 8);
1730 if (requested_frame - buffer_amount < 20) {
1741 if (seek_count == 1) {
1744 seeking_pts = ConvertFrameToVideoPTS(1);
1746 seek_audio_frame_found = 0;
1747 seek_video_frame_found = 0;
1751 bool seek_worked =
false;
1752 int64_t seek_target = 0;
1756 seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
1758 fprintf(stderr,
"%s: error while seeking video stream\n", pFormatCtx->AV_FILENAME);
1761 is_video_seek =
true;
1768 seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
1770 fprintf(stderr,
"%s: error while seeking audio stream\n", pFormatCtx->AV_FILENAME);
1773 is_video_seek =
false;
1782 avcodec_flush_buffers(aCodecCtx);
1786 avcodec_flush_buffers(pCodecCtx);
1789 previous_packet_location.
frame = -1;
1794 if (seek_count == 1) {
1796 seeking_pts = seek_target;
1797 seeking_frame = requested_frame;
1799 seek_audio_frame_found = 0;
1800 seek_video_frame_found = 0;
1824 int64_t FFmpegReader::GetVideoPTS() {
1825 int64_t current_pts = 0;
1826 if (packet->dts != AV_NOPTS_VALUE)
1827 current_pts = packet->dts;
1834 void FFmpegReader::UpdatePTSOffset(
bool is_video) {
1838 if (video_pts_offset == 99999)
1841 video_pts_offset = 0 - GetVideoPTS();
1850 if (video_pts_offset < -max_offset || video_pts_offset > max_offset) {
1852 video_pts_offset = 0;
1860 if (audio_pts_offset == 99999)
1868 audio_pts_offset = 0 - packet->pts;
1870 if (audio_pts_offset < -max_offset || audio_pts_offset > max_offset) {
1872 audio_pts_offset = 0;
1882 int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
1884 pts = pts + video_pts_offset;
1885 int64_t previous_video_frame = current_video_frame;
1894 if (current_video_frame == 0)
1895 current_video_frame = frame;
1899 if (frame == previous_video_frame) {
1904 current_video_frame++;
1907 if (current_video_frame < frame)
1909 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ConvertVideoPTStoFrame (detected missing frame)",
"calculated frame", frame,
"previous_video_frame", previous_video_frame,
"current_video_frame", current_video_frame);
1914 while (current_video_frame < frame) {
1915 if (!missing_video_frames.count(current_video_frame)) {
1916 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ConvertVideoPTStoFrame (tracking missing frame)",
"current_video_frame", current_video_frame,
"previous_video_frame", previous_video_frame);
1917 missing_video_frames.insert(std::pair<int64_t, int64_t>(current_video_frame, previous_video_frame));
1918 missing_video_frames_source.insert(std::pair<int64_t, int64_t>(previous_video_frame, current_video_frame));
1922 has_missing_frames =
true;
1925 current_video_frame++;
1934 int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
1942 return video_pts - video_pts_offset;
1946 int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
1954 return audio_pts - audio_pts_offset;
1958 AudioLocation FFmpegReader::GetAudioPTSLocation(int64_t pts) {
1960 pts = pts + audio_pts_offset;
1969 int64_t whole_frame = int64_t(frame);
1972 double sample_start_percentage = frame - double(whole_frame);
1978 int sample_start = round(
double(samples_per_frame) * sample_start_percentage);
1981 if (whole_frame < 1)
1983 if (sample_start < 0)
1990 if (previous_packet_location.
frame != -1) {
1991 if (location.
is_near(previous_packet_location, samples_per_frame, samples_per_frame)) {
1992 int64_t orig_frame = location.
frame;
1997 location.
frame = previous_packet_location.
frame;
2000 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAudioPTSLocation (Audio Gap Detected)",
"Source Frame", orig_frame,
"Source Audio Sample", orig_start,
"Target Frame", location.
frame,
"Target Audio Sample", location.
sample_start,
"pts", pts);
2007 for (int64_t audio_frame = previous_packet_location.
frame; audio_frame < location.
frame; audio_frame++) {
2008 if (!missing_audio_frames.count(audio_frame)) {
2009 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAudioPTSLocation (tracking missing frame)",
"missing_audio_frame", audio_frame,
"previous_audio_frame", previous_packet_location.
frame,
"new location frame", location.
frame);
2010 missing_audio_frames.insert(std::pair<int64_t, int64_t>(audio_frame, previous_packet_location.
frame - 1));
2017 previous_packet_location = location;
2024 std::shared_ptr<Frame> FFmpegReader::CreateFrame(int64_t requested_frame) {
2026 std::shared_ptr<Frame> output = working_cache.
GetFrame(requested_frame);
2033 output = working_cache.
GetFrame(requested_frame);
2034 if(output)
return output;
2042 working_cache.
Add(output);
2045 if (requested_frame > largest_frame_processed)
2046 largest_frame_processed = requested_frame;
2053 bool FFmpegReader::IsPartialFrame(int64_t requested_frame) {
2056 bool seek_trash =
false;
2057 int64_t max_seeked_frame = seek_audio_frame_found;
2058 if (seek_video_frame_found > max_seeked_frame) {
2059 max_seeked_frame = seek_video_frame_found;
2061 if ((
info.
has_audio && seek_audio_frame_found && max_seeked_frame >= requested_frame) ||
2062 (
info.
has_video && seek_video_frame_found && max_seeked_frame >= requested_frame)) {
2070 bool FFmpegReader::CheckMissingFrame(int64_t requested_frame) {
2075 ++checked_frames[requested_frame];
2078 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame",
"requested_frame", requested_frame,
"has_missing_frames", has_missing_frames,
"missing_video_frames.size()", missing_video_frames.size(),
"checked_count", checked_frames[requested_frame]);
2081 std::map<int64_t, int64_t>::iterator itr;
2082 bool found_missing_frame =
false;
2089 if (checked_frames[requested_frame] > 8 && !missing_video_frames.count(requested_frame) &&
2090 !processing_audio_frames.count(requested_frame) && processed_audio_frames.count(requested_frame) &&
2091 last_video_frame && last_video_frame->has_image_data && HasAlbumArt()) {
2092 missing_video_frames.insert(std::pair<int64_t, int64_t>(requested_frame, last_video_frame->number));
2093 missing_video_frames_source.insert(std::pair<int64_t, int64_t>(last_video_frame->number, requested_frame));
2094 missing_frames.
Add(last_video_frame);
2099 if (missing_video_frames.count(requested_frame)) {
2100 int64_t missing_source_frame = missing_video_frames.find(requested_frame)->second;
2103 ++checked_frames[missing_source_frame];
2106 std::shared_ptr<Frame> parent_frame = missing_frames.
GetFrame(missing_source_frame);
2107 if (parent_frame == NULL) {
2109 if (parent_frame != NULL) {
2111 missing_frames.
Add(parent_frame);
2116 std::shared_ptr<Frame> missing_frame = CreateFrame(requested_frame);
2119 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame (Is Previous Video Frame Final)",
"requested_frame", requested_frame,
"missing_frame->number", missing_frame->number,
"missing_source_frame", missing_source_frame);
2122 if (parent_frame != NULL) {
2124 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame (AddImage from Previous Video Frame)",
"requested_frame", requested_frame,
"missing_frame->number", missing_frame->number,
"missing_source_frame", missing_source_frame);
2127 std::shared_ptr<QImage> parent_image = parent_frame->GetImage();
2129 missing_frame->AddImage(std::make_shared<QImage>(*parent_image));
2130 processed_video_frames[missing_frame->number] = missing_frame->number;
2136 if (missing_audio_frames.count(requested_frame)) {
2139 std::shared_ptr<Frame> missing_frame = CreateFrame(requested_frame);
2145 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame (Add Silence for Missing Audio Frame)",
"requested_frame", requested_frame,
"missing_frame->number", missing_frame->number,
"samples_per_frame", samples_per_frame);
2148 missing_frame->AddAudioSilence(samples_per_frame);
2149 processed_audio_frames[missing_frame->number] = missing_frame->number;
2152 return found_missing_frame;
2156 void FFmpegReader::CheckWorkingFrames(
bool end_of_stream, int64_t requested_frame) {
2158 bool checked_count_tripped =
false;
2159 int max_checked_count = 80;
2162 CheckMissingFrame(requested_frame);
2174 if (f->number < (requested_frame - (max_concurrent_frames * 2))) {
2175 working_cache.
Remove(f->number);
2179 CheckMissingFrame(f->number);
2182 int checked_count = 0;
2183 int checked_frames_size = 0;
2185 bool is_video_ready =
false;
2186 bool is_audio_ready =
false;
2189 is_video_ready = processed_video_frames.count(f->number);
2190 is_audio_ready = processed_audio_frames.count(f->number);
2193 checked_frames_size = checked_frames.size();
2194 if (!checked_count_tripped || f->number >= requested_frame)
2195 checked_count = checked_frames[f->number];
2198 checked_count = max_checked_count;
2201 if (previous_packet_location.
frame == f->number && !end_of_stream)
2202 is_audio_ready =
false;
2203 bool is_seek_trash = IsPartialFrame(f->number);
2210 if (checked_count >= max_checked_count && (!is_video_ready || !is_audio_ready)) {
2212 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames (exceeded checked_count)",
"requested_frame", requested_frame,
"frame_number", f->number,
"is_video_ready", is_video_ready,
"is_audio_ready", is_audio_ready,
"checked_count", checked_count,
"checked_frames_size", checked_frames_size);
2215 checked_count_tripped =
true;
2217 if (
info.
has_video && !is_video_ready && last_video_frame) {
2219 f->AddImage(std::make_shared<QImage>(*last_video_frame->GetImage()));
2220 is_video_ready =
true;
2225 is_audio_ready =
true;
2230 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames",
"requested_frame", requested_frame,
"frame_number", f->number,
"is_video_ready", is_video_ready,
"is_audio_ready", is_audio_ready,
"checked_count", checked_count,
"checked_frames_size", checked_frames_size);
2233 if ((!end_of_stream && is_video_ready && is_audio_ready) || end_of_stream || is_seek_trash) {
2235 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames (mark frame as final)",
"requested_frame", requested_frame,
"f->number", f->number,
"is_seek_trash", is_seek_trash,
"Working Cache Count", working_cache.
Count(),
"Final Cache Count",
final_cache.
Count(),
"end_of_stream", end_of_stream);
2237 if (!is_seek_trash) {
2241 f->AddImage(std::make_shared<QImage>(*last_video_frame->GetImage()));
2244 num_checks_since_final = 0;
2252 if (missing_video_frames_source.count(f->number)) {
2254 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames (add frame to missing cache)",
"f->number", f->number,
"is_seek_trash", is_seek_trash,
"Missing Cache Count", missing_frames.
Count(),
"Working Cache Count", working_cache.
Count(),
"Final Cache Count",
final_cache.
Count());
2255 missing_frames.
Add(f);
2259 checked_frames.erase(f->number);
2263 working_cache.
Remove(f->number);
2266 last_frame = f->number;
2270 working_cache.
Remove(f->number);
2281 void FFmpegReader::CheckFPS() {
2285 int first_second_counter = 0;
2286 int second_second_counter = 0;
2287 int third_second_counter = 0;
2288 int forth_second_counter = 0;
2289 int fifth_second_counter = 0;
2290 int frames_detected = 0;
2296 if (GetNextPacket() < 0)
2301 if (packet->stream_index == videoStream) {
2305 UpdatePTSOffset(
true);
2308 pts = GetVideoPTS();
2311 RemoveAVFrame(pFrame);
2314 pts += video_pts_offset;
2320 if (video_seconds <= 1.0)
2321 first_second_counter++;
2322 else if (video_seconds > 1.0 && video_seconds <= 2.0)
2323 second_second_counter++;
2324 else if (video_seconds > 2.0 && video_seconds <= 3.0)
2325 third_second_counter++;
2326 else if (video_seconds > 3.0 && video_seconds <= 4.0)
2327 forth_second_counter++;
2328 else if (video_seconds > 4.0 && video_seconds <= 5.0)
2329 fifth_second_counter++;
2338 if (second_second_counter != 0 && third_second_counter != 0 && forth_second_counter != 0 && fifth_second_counter != 0) {
2340 int sum_fps = second_second_counter + third_second_counter + forth_second_counter + fifth_second_counter;
2341 int avg_fps = round(sum_fps / 4.0f);
2352 }
else if (second_second_counter != 0 && third_second_counter != 0) {
2354 int sum_fps = second_second_counter;
2378 void FFmpegReader::RemoveAVFrame(AVFrame *remove_frame) {
2382 av_freep(&remove_frame->data[0]);
2390 void FFmpegReader::RemoveAVPacket(AVPacket *remove_packet) {
2395 delete remove_packet;
2399 int64_t FFmpegReader::GetSmallestVideoFrame() {
2401 std::map<int64_t, int64_t>::iterator itr;
2402 int64_t smallest_frame = -1;
2404 for (itr = processing_video_frames.begin(); itr != processing_video_frames.end(); ++itr) {
2405 if (itr->first < smallest_frame || smallest_frame == -1)
2406 smallest_frame = itr->first;
2410 return smallest_frame;
2414 int64_t FFmpegReader::GetSmallestAudioFrame() {
2416 std::map<int64_t, int64_t>::iterator itr;
2417 int64_t smallest_frame = -1;
2419 for (itr = processing_audio_frames.begin(); itr != processing_audio_frames.end(); ++itr) {
2420 if (itr->first < smallest_frame || smallest_frame == -1)
2421 smallest_frame = itr->first;
2425 return smallest_frame;
2440 root[
"type"] =
"FFmpegReader";
2441 root[
"path"] =
path;
2456 catch (
const std::exception& e) {
2458 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
2469 if (!root[
"path"].isNull())
2470 path = root[
"path"].asString();
Header file for all Exception classes.
AVPixelFormat hw_de_av_pix_fmt_global
AVHWDeviceType hw_de_av_device_type_global
Header file for FFmpegReader class.
#define AV_FREE_CONTEXT(av_context)
#define AV_FREE_FRAME(av_frame)
#define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count)
#define AV_GET_CODEC_TYPE(av_stream)
#define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context)
#define AV_GET_CODEC_CONTEXT(av_stream, av_codec)
#define AV_FIND_DECODER_CODEC_ID(av_stream)
#define AV_ALLOCATE_FRAME()
#define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height)
#define AV_FREE_PACKET(av_packet)
#define AVCODEC_REGISTER_ALL
#define AVCODEC_MAX_AUDIO_FRAME_SIZE
#define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context)
#define MY_INPUT_BUFFER_PADDING_SIZE
#define AV_GET_SAMPLE_FORMAT(av_stream, av_context)
#define AV_RESET_FRAME(av_frame)
#define FF_NUM_PROCESSORS
#define OPEN_MP_NUM_PROCESSORS
Header file for Timeline class.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
int64_t Count()
Count the frames in the queue.
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
void Remove(int64_t frame_number)
Remove a specific frame.
void Clear()
Clear the cache of all frames.
std::shared_ptr< openshot::Frame > GetSmallestFrame()
Get the smallest frame number.
openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
This class represents a clip (used to arrange readers on the timeline)
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
double Y
The Y value of the coordinate (usually representing the value of the property being animated)
void Open() override
Open File - which is called by the constructor automatically.
FFmpegReader(const std::string &path, bool inspect_reader=true)
Constructor for FFmpegReader.
Json::Value JsonValue() const override
Generate Json::Value for this object.
bool GetIsDurationKnown()
Return true if frame can be read with GetFrame()
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
CacheMemory final_cache
Final cache object used to hold final frames.
virtual ~FFmpegReader()
Destructor.
std::string Json() const override
Generate JSON string of this object.
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame) override
void Close() override
Close File.
void SetJson(const std::string value) override
Load JSON string into this object.
This class represents a fraction.
int num
Numerator for the fraction.
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Fraction Reciprocal() const
Return the reciprocal as a Fraction.
int den
Denominator for the fraction.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Exception when no valid codec is found for a file.
Exception for files that can not be found or opened.
Exception for invalid JSON.
Point GetMaxPoint() const
Get max point (by Y coordinate)
Exception when no streams are found in the file.
Exception when memory could not be allocated.
Coordinate co
This is the primary coordinate.
openshot::ReaderInfo info
Information about the current media file.
juce::CriticalSection processingCriticalSection
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Exception when a reader is closed, and a frame is requested.
int DE_LIMIT_WIDTH_MAX
Maximum columns that hardware decode can handle.
int HW_DE_DEVICE_SET
Which GPU to use to decode (0 is the first)
int DE_LIMIT_HEIGHT_MAX
Maximum rows that hardware decode can handle.
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
int HARDWARE_DECODER
Use video codec for faster video decoding (if supported)
int preview_height
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
int preview_width
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
This class represents a timeline.
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround,...
const Json::Value stringToJson(const std::string value)
This struct holds the associated video frame and starting sample # for an audio packet.
bool is_near(AudioLocation location, int samples_per_frame, int64_t amount)
int audio_bit_rate
The bit rate of the audio stream (in bytes)
int video_bit_rate
The bit rate of the video stream (in bytes)
float duration
Length of time (in seconds)
openshot::Fraction audio_timebase
The audio timebase determines how long each audio packet should be played.
int width
The width of the video (in pixesl)
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3)
int height
The height of the video (in pixels)
int pixel_format
The pixel format (i.e. YUV420P, RGB24, etc...)
int64_t video_length
The number of frames in the video stream.
std::string acodec
The name of the audio codec used to encode / decode the video stream.
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
std::string vcodec
The name of the video codec used to encode / decode the video stream.
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square)
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
bool has_video
Determines if this file has a video stream.
bool has_audio
Determines if this file has an audio stream.
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
int video_stream_index
The index of the video stream.
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
int audio_stream_index
The index of the audio stream.
int64_t file_size
Size of file (in bytes)