diff --git a/src/internal/audio/kitaudio.c b/src/internal/audio/kitaudio.c index e9abfba..45ed347 100644 --- a/src/internal/audio/kitaudio.c +++ b/src/internal/audio/kitaudio.c @@ -13,7 +13,7 @@ #include "kitchensink/kiterror.h" #define KIT_AUDIO_EARLY_FAIL 5 -#define KIT_AUDIO_EARLY_THRESHOLD 0.05 +#define KIT_AUDIO_EARLY_THRESHOLD 0.1 #define KIT_AUDIO_LATE_THRESHOLD 0.05 #define SAMPLE_BYTES(audio_decoder) (audio_decoder->output.channels * audio_decoder->output.bytes) @@ -259,7 +259,7 @@ int Kit_GetAudioDecoderData(Kit_Decoder *decoder, size_t backend_buffer_size, un // If packet is far too early, the stream jumped or was seeked. Skip packets until we see something valid. while(pts > sync_ts + KIT_AUDIO_EARLY_FAIL) { - // LOG("[AUDIO] FAIL-EARLY: pts = %lf < %lf + %lf\n", pts, sync_ts, KIT_AUDIO_LATE_THRESHOLD); + LOG("[AUDIO] FAIL-EARLY: pts = %lf < %lf + %lf\n", pts, sync_ts, KIT_AUDIO_LATE_THRESHOLD); av_frame_unref(audio_decoder->current); Kit_FinishPacketBufferRead(audio_decoder->buffer); if(!Kit_BeginPacketBufferRead(audio_decoder->buffer, audio_decoder->current, 0)) @@ -269,7 +269,7 @@ int Kit_GetAudioDecoderData(Kit_Decoder *decoder, size_t backend_buffer_size, un // Packet is too early, wait. if(pts > sync_ts + KIT_AUDIO_EARLY_THRESHOLD) { - // LOG("[AUDIO] EARLY pts = %lf > %lf + %lf\n", pts, sync_ts, KIT_AUDIO_EARLY_THRESHOLD); + LOG("[AUDIO] EARLY pts = %lf > %lf + %lf\n", pts, sync_ts, KIT_AUDIO_EARLY_THRESHOLD); av_frame_unref(audio_decoder->current); Kit_CancelPacketBufferRead(audio_decoder->buffer); return 0; @@ -277,14 +277,14 @@ int Kit_GetAudioDecoderData(Kit_Decoder *decoder, size_t backend_buffer_size, un // Packet is too late, skip packets until we see something reasonable. while(pts < sync_ts - KIT_AUDIO_LATE_THRESHOLD) { - // LOG("[AUDIO] LATE: pts = %lf < %lf + %lf\n", pts, sync_ts, KIT_AUDIO_LATE_THRESHOLD); + LOG("[AUDIO] LATE: pts = %lf < %lf + %lf\n", pts, sync_ts, KIT_AUDIO_LATE_THRESHOLD); av_frame_unref(audio_decoder->current); Kit_FinishPacketBufferRead(audio_decoder->buffer); if(!Kit_BeginPacketBufferRead(audio_decoder->buffer, audio_decoder->current, 0)) goto no_data; pts = Kit_GetCurrentPTS(decoder); } - // LOG("[AUDIO] >>> SYNC!: pts = %lf, sync = %lf\n", pts, sync_ts); + LOG("[AUDIO] >>> SYNC!: pts = %lf, sync = %lf\n", pts, sync_ts); size = &audio_decoder->current->crop_top; left = &audio_decoder->current->crop_bottom; diff --git a/src/internal/kitdecoder.c b/src/internal/kitdecoder.c index 1d33abe..425bab8 100644 --- a/src/internal/kitdecoder.c +++ b/src/internal/kitdecoder.c @@ -2,10 +2,138 @@ #include #include +#include +#include #include "kitchensink/internal/kitdecoder.h" +#include "kitchensink/internal/utils/kitlog.h" #include "kitchensink/kiterror.h" +void Kit_PrintAVMethod(int methods) { + LOG(" * Methods:\n"); + if(methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) { + LOG(" * AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX\n"); + } + if(methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) { + LOG(" * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX\n"); + } + if(methods & AV_CODEC_HW_CONFIG_METHOD_INTERNAL) { + LOG(" * AV_CODEC_HW_CONFIG_METHOD_INTERNAL\n"); + } + if(methods & AV_CODEC_HW_CONFIG_METHOD_AD_HOC) { + LOG(" * AV_CODEC_HW_CONFIG_METHOD_AD_HOC\n"); + } +} + +void Kit_PrintPixelFormatType(enum AVPixelFormat pix_fmt) { + LOG(" * Pixel format: %s\n", av_get_pix_fmt_name(pix_fmt)); +} + +void Kit_PrintAVType(enum AVHWDeviceType type) { + LOG(" * Type: "); + switch(type) { + case AV_HWDEVICE_TYPE_NONE: + LOG("AV_HWDEVICE_TYPE_NONE\n"); + break; + case AV_HWDEVICE_TYPE_VDPAU: + LOG("AV_HWDEVICE_TYPE_VDPAU\n"); + break; + case AV_HWDEVICE_TYPE_CUDA: + LOG("AV_HWDEVICE_TYPE_CUDA\n"); + break; + case AV_HWDEVICE_TYPE_VAAPI: + LOG("AV_HWDEVICE_TYPE_VAAPI\n"); + break; + case AV_HWDEVICE_TYPE_DXVA2: + LOG("AV_HWDEVICE_TYPE_DXVA2\n"); + break; + case AV_HWDEVICE_TYPE_QSV: + LOG("AV_HWDEVICE_TYPE_QSV\n"); + break; + case AV_HWDEVICE_TYPE_VIDEOTOOLBOX: + LOG("AV_HWDEVICE_TYPE_VIDEOTOOLBOX\n"); + break; + case AV_HWDEVICE_TYPE_D3D11VA: + LOG("AV_HWDEVICE_TYPE_D3D11VA\n"); + break; + case AV_HWDEVICE_TYPE_DRM: + LOG("AV_HWDEVICE_TYPE_DRM\n"); + break; + case AV_HWDEVICE_TYPE_OPENCL: + LOG("AV_HWDEVICE_TYPE_OPENCL\n"); + break; + case AV_HWDEVICE_TYPE_MEDIACODEC: + LOG("AV_HWDEVICE_TYPE_MEDIACODEC\n"); + break; + case AV_HWDEVICE_TYPE_VULKAN: + LOG("AV_HWDEVICE_TYPE_VULKAN\n"); + break; + } +} + +static void Kit_PrintHardwareDecoders(const AVCodec *codec) { + const AVCodecHWConfig *config; + int index = 0; + while((config = avcodec_get_hw_config(codec, index)) != NULL) { + LOG("Device %d\n", index); + Kit_PrintAVMethod(config->methods); + Kit_PrintAVType(config->device_type); + Kit_PrintPixelFormatType(config->pix_fmt); + index++; + } +} + +static bool Kit_FindHardwareDecoder(const AVCodec *codec, enum AVHWDeviceType *type) { + const AVCodecHWConfig *config; + int index = 0; + while((config = avcodec_get_hw_config(codec, index++)) != NULL) { + if(config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX && + config->device_type == AV_HWDEVICE_TYPE_D3D11VA) { + *type = config->device_type; + return true; + } + } + return false; +} + +static enum AVPixelFormat Kit_GetHardwarePixelFormat(AVCodecContext *ctx, const enum AVPixelFormat *formats) { + while(*formats != AV_PIX_FMT_NONE) { + LOG("FMT %s\n", av_get_pix_fmt_name(*formats)); + switch(*formats) { + case AV_PIX_FMT_YUV420P10: + return AV_PIX_FMT_YUV420P10; + case AV_PIX_FMT_D3D11VA_VLD: + return AV_PIX_FMT_D3D11; + case AV_PIX_FMT_YUV420P: + case AV_PIX_FMT_YUYV422: + case AV_PIX_FMT_UYVY422: + case AV_PIX_FMT_NV12: + case AV_PIX_FMT_NV21: + case AV_PIX_FMT_CUDA: + case AV_PIX_FMT_QSV: + case AV_PIX_FMT_DXVA2_VLD: + case AV_PIX_FMT_D3D11: + case AV_PIX_FMT_VAAPI: + case AV_PIX_FMT_VDPAU: + return *formats; + default:; + } + formats++; + } + return AV_PIX_FMT_NONE; +} + +static bool Kit_InitHardwareDecoder(AVCodecContext *ctx, const enum AVHWDeviceType type) { + AVBufferRef *hw_device_ctx = NULL; + int err = av_hwdevice_ctx_create(&hw_device_ctx, type, NULL, NULL, 0); + if(err < 0) { + Kit_SetError("Unable to create hardware device -- %s", av_err2str(err)); + return false; + } + ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx); + return true; +} + Kit_Decoder *Kit_CreateDecoder( AVStream *stream, Kit_Timer *sync_timer, @@ -20,6 +148,7 @@ Kit_Decoder *Kit_CreateDecoder( assert(stream != NULL); assert(thread_count >= 0); + enum AVHWDeviceType hw_type; Kit_Decoder *decoder = NULL; AVCodecContext *codec_ctx = NULL; AVDictionary *codec_opts = NULL; @@ -52,6 +181,13 @@ Kit_Decoder *Kit_CreateDecoder( codec_ctx->thread_count = 1; // Disable threading } + Kit_PrintHardwareDecoders(codec); + if(Kit_FindHardwareDecoder(codec, &hw_type)) { + codec_ctx->get_format = Kit_GetHardwarePixelFormat; + if(!Kit_InitHardwareDecoder(codec_ctx, hw_type)) + goto exit_2; + } + // Open the stream with selected options. Note that av_dict_set will allocate the dict! // This is required for ass_process_chunk() av_dict_set(&codec_opts, "sub_text_format", "ass", 0); diff --git a/src/internal/video/kitvideo.c b/src/internal/video/kitvideo.c index 2338892..e3924e0 100644 --- a/src/internal/video/kitvideo.c +++ b/src/internal/video/kitvideo.c @@ -61,7 +61,7 @@ static void dec_signal_video_cb(Kit_Decoder *decoder) { static void dec_read_video(const Kit_Decoder *decoder) { Kit_VideoDecoder *video_decoder = decoder->userdata; - enum AVPixelFormat in_fmt = decoder->codec_ctx->pix_fmt; + enum AVPixelFormat in_fmt = video_decoder->in_frame->format; enum AVPixelFormat out_fmt = Kit_FindAVPixelFormat(video_decoder->output.format); int w = video_decoder->in_frame->width; int h = video_decoder->in_frame->height; @@ -94,13 +94,28 @@ static Kit_DecoderInputResult dec_input_video_cb(const Kit_Decoder *decoder, con static bool dec_decode_video_cb(const Kit_Decoder *decoder, double *pts) { assert(decoder); + int ret; + AVFrame *tmp_frame = av_frame_alloc(); Kit_VideoDecoder *video_decoder = decoder->userdata; - if(avcodec_receive_frame(decoder->codec_ctx, video_decoder->in_frame) == 0) { - *pts = video_decoder->in_frame->best_effort_timestamp * av_q2d(decoder->stream->time_base); + if(avcodec_receive_frame(decoder->codec_ctx, tmp_frame) == 0) { + + if(tmp_frame->format == AV_PIX_FMT_D3D11) { + if((ret = av_hwframe_transfer_data(video_decoder->in_frame, tmp_frame, 0)) < 0) { + LOG("Error in data transfer: %s", av_err2str(ret)); + return false; + } + av_frame_copy_props(video_decoder->in_frame, tmp_frame); + } else { + av_frame_move_ref(video_decoder->in_frame, tmp_frame); + } + + *pts = tmp_frame->best_effort_timestamp * av_q2d(decoder->stream->time_base); dec_read_video(decoder); av_frame_unref(video_decoder->in_frame); + av_frame_free(&tmp_frame); return true; } + av_frame_free(&tmp_frame); return false; } @@ -191,11 +206,7 @@ Kit_Decoder *Kit_CreateVideoDecoder(const Kit_Source *src, Kit_Timer *sync_timer // Create scaler for handling format changes if((sws = Kit_GetSwsContext( - sws, - decoder->codec_ctx->width, - decoder->codec_ctx->height, - decoder->codec_ctx->pix_fmt, - output_format + sws, decoder->codec_ctx->width, decoder->codec_ctx->height, decoder->codec_ctx->pix_fmt, output_format )) == NULL) { goto exit_6; }