diff --git a/FFmpeg.AutoGen.Examples.Encode/FFmpeg.AutoGen.Examples.Encode.csproj b/FFmpeg.AutoGen.Examples.Encode/FFmpeg.AutoGen.Examples.Encode.csproj deleted file mode 100644 index 1dc89c2..0000000 --- a/FFmpeg.AutoGen.Examples.Encode/FFmpeg.AutoGen.Examples.Encode.csproj +++ /dev/null @@ -1,14 +0,0 @@ - - - - Exe - net6.0 - True - x64 - - - - - - - diff --git a/FFmpeg.AutoGen.Examples.Encode/Program.cs b/FFmpeg.AutoGen.Examples.Encode/Program.cs deleted file mode 100644 index 5c9f7d4..0000000 --- a/FFmpeg.AutoGen.Examples.Encode/Program.cs +++ /dev/null @@ -1,213 +0,0 @@ -using System; -using System.IO; - -namespace FFmpeg.AutoGen.Examples.Encode -{ - internal unsafe class Program - { - static int Main(string[] args) - { - AVFrame* frame; - AVPacket* pkt; - int i, j, k, ret; - short* samples; - float t, tincr; - - if (args.Length <= 0) - { - string executable = Environment.GetCommandLineArgs()[0]; - Console.Error.WriteLine($"Usage: {executable} "); - return 0; - } - - var filename = args[0]; - - /* find the MP2 encoder */ - AVCodec* codec = ffmpeg.avcodec_find_encoder(AVCodecID.AV_CODEC_ID_MP2); - if (codec == null) - { - Console.Error.WriteLine("Codec not found"); - return 1; - } - - AVCodecContext* c = ffmpeg.avcodec_alloc_context3(codec); - if (c == null) - { - Console.Error.WriteLine("Could not allocate audio codec context"); - return 1; - } - - /* put sample parameters */ - c->bit_rate = 64000; - - /* check that the encoder supports s16 pcm input */ - c->sample_fmt = AVSampleFormat.AV_SAMPLE_FMT_S16; - if (!check_sample_fmt(codec, c->sample_fmt)) - { - string sampleFormat = ffmpeg.av_get_sample_fmt_name(c->sample_fmt); - Console.Error.WriteLine($"Encoder does not support sample format {sampleFormat}"); - return 1; - } - - /* select other audio parameters supported by the encoder */ - c->sample_rate = select_sample_rate(codec); - c->channel_layout = select_channel_layout(codec); - c->channels = ffmpeg.av_get_channel_layout_nb_channels(c->channel_layout); - - /* open it */ - if (ffmpeg.avcodec_open2(c, codec, null) < 0) - { - Console.Error.WriteLine($"Could not open codec"); - return 1; - } - - using BinaryWriter output = new BinaryWriter(new FileStream(filename, FileMode.Create)); - - /* packet for holding encoded output */ - pkt = ffmpeg.av_packet_alloc(); - if (pkt == null) - { - Console.Error.WriteLine($"could not allocate the packet"); - return 1; - } - - /* frame containing input raw audio */ - frame = ffmpeg.av_frame_alloc(); - if (frame == null) - { - Console.Error.WriteLine($"Could not allocate audio frame"); - return 1; - } - - frame->nb_samples = c->frame_size; - frame->format = (int)c->sample_fmt; - frame->channel_layout = c->channel_layout; - - /* allocate the data buffers */ - ret = ffmpeg.av_frame_get_buffer(frame, 0); - if (ret < 0) - { - Console.Error.WriteLine($"Could not allocate audio data buffers"); - return 1; - } - - /* encode a single tone sound */ - t = 0.0f; - tincr = (float)(2 * Math.PI * 440.0 / c->sample_rate); - for (i = 0; i < 200; i++) - { - /* make sure the frame is writable -- makes a copy if the encoder - * kept a reference internally */ - ret = ffmpeg.av_frame_make_writable(frame); - if (ret < 0) - return 1; - samples = (short*)frame->data[0]; - - for (j = 0; j < c->frame_size; j++) - { - samples[2 * j] = (short)(Math.Sin(t) * 10000); - - for (k = 1; k < c->channels; k++) - samples[2 * j + k] = samples[2 * j]; - t += tincr; - } - encode(c, frame, pkt, output); - } - - /* flush the encoder */ - encode(c, null, pkt, output); - - ffmpeg.av_frame_free(&frame); - ffmpeg.av_packet_free(&pkt); - ffmpeg.avcodec_free_context(&c); - - return 0; - } - - static void encode(AVCodecContext* ctx, AVFrame* frame, AVPacket* pkt, BinaryWriter output) - { - int ret; - - /* send the frame for encoding */ - ret = ffmpeg.avcodec_send_frame(ctx, frame); - if (ret < 0) - { - Console.Error.WriteLine($"Error sending the frame to the encoder"); - Environment.Exit(1); - } - - /* read all the available output packets (in general there may be any - * number of them */ - while (ret >= 0) - { - ret = ffmpeg.avcodec_receive_packet(ctx, pkt); - if (ret == ffmpeg.AVERROR(ffmpeg.EAGAIN) || ret == ffmpeg.AVERROR_EOF) - return; - else if (ret < 0) - { - Console.Error.WriteLine($"Error encoding audio frame"); - Environment.Exit(1); - } - - output.Write(new ReadOnlySpan(pkt->data, pkt->size)); - ffmpeg.av_packet_unref(pkt); - } - } - - /* check that a given sample format is supported by the encoder */ - static bool check_sample_fmt(AVCodec *codec, AVSampleFormat sample_fmt) - { - AVSampleFormat *p = codec->sample_fmts; - - while (*p != AVSampleFormat.AV_SAMPLE_FMT_NONE) { - if (*p == sample_fmt) - return true; - p++; - } - return false; - } - - /* just pick the highest supported samplerate */ - static int select_sample_rate(AVCodec* codec) - { - int best_samplerate = 0; - - if (codec->supported_samplerates == null) - return 44100; - - int* p = codec->supported_samplerates; - while (*p != 0) { - if (best_samplerate != 0 || Math.Abs(44100 - *p) < Math.Abs(44100 - best_samplerate)) - best_samplerate = *p; - p++; - } - return best_samplerate; - } - - /* select layout with the highest channel count */ - static ulong select_channel_layout(AVCodec* codec) - { - var p; - ulong best_ch_layout = 0; - int best_nb_channels = 0; - - if (codec->@ch_layouts == null) - return ffmpeg.AV_CH_LAYOUT_STEREO; - - p = codec->@ch_layouts; - while (*p != 0) { - int nb_channels = ffmpeg.av_buffersink_get_channels(*p); - - if (nb_channels > best_nb_channels) { - best_ch_layout = *p; - best_nb_channels = nb_channels; - } - p++; - } - - return best_ch_layout; - } - - - } -} diff --git a/FFmpeg.AutoGen.Examples.ResamplingAudio/FFmpeg.AutoGen.Examples.ResamplingAudio.csproj b/FFmpeg.AutoGen.Examples.ResamplingAudio/FFmpeg.AutoGen.Examples.ResamplingAudio.csproj deleted file mode 100644 index 1dc89c2..0000000 --- a/FFmpeg.AutoGen.Examples.ResamplingAudio/FFmpeg.AutoGen.Examples.ResamplingAudio.csproj +++ /dev/null @@ -1,14 +0,0 @@ - - - - Exe - net6.0 - True - x64 - - - - - - - diff --git a/FFmpeg.AutoGen.Examples.ResamplingAudio/Program.cs b/FFmpeg.AutoGen.Examples.ResamplingAudio/Program.cs deleted file mode 100644 index 22c9fc4..0000000 --- a/FFmpeg.AutoGen.Examples.ResamplingAudio/Program.cs +++ /dev/null @@ -1,202 +0,0 @@ -using System; -using System.IO; - -namespace FFmpeg.AutoGen.Examples.ResamplingAudio -{ - internal unsafe class Program - { - static int Main(string[] args) - { - long sourceChannelLayout = (long)ffmpeg.AV_CH_LAYOUT_STEREO; - long destinationChannelLayout = (long)ffmpeg.AV_CH_LAYOUT_SURROUND; - int sourceSampleRate = 48000; - int destinationSampleRate = 44100; - byte** sourceData = null; - byte** destinationData = null; - int sourceSamplesCount = 1024; - AVSampleFormat sourceSampleFormat = AVSampleFormat.AV_SAMPLE_FMT_DBL; - AVSampleFormat destinationSampleFormat = AVSampleFormat.AV_SAMPLE_FMT_S16; - int ret; - - if (args.Length != 1) - { - string executable = Environment.GetCommandLineArgs()[0]; - Console.Error.Write($"Usage: {executable} output_file\n" - + "API example program to show how to resample an audio stream with libswresample.\n" - + "This program generates a series of audio frames, resamples them to a specified " - + "output format and rate and saves them to an output file named output_file.\n"); - return 1; - } - - var filename = args[0]; - using var stream = File.Open(filename, FileMode.Create); - - /* Create resampler context */ - var resampleContext = ffmpeg.swr_alloc(); - if (resampleContext == null) - { - Console.Error.Write("Could not allocate resampler context\n"); - ret = ffmpeg.AVERROR(ffmpeg.ENOMEM); - goto end; - } - - /* Set options */ - ffmpeg.av_opt_set_int(resampleContext, "in_channel_layout", sourceChannelLayout, 0); - ffmpeg.av_opt_set_int(resampleContext, "in_sample_rate", sourceSampleRate, 0); - ffmpeg.av_opt_set_sample_fmt(resampleContext, "in_sample_fmt", sourceSampleFormat, 0); - - ffmpeg.av_opt_set_int(resampleContext, "out_channel_layout", destinationChannelLayout, 0); - ffmpeg.av_opt_set_int(resampleContext, "out_sample_rate", destinationSampleRate, 0); - ffmpeg.av_opt_set_sample_fmt(resampleContext, "out_sample_fmt", destinationSampleFormat, 0); - - /* Initialize the resampling context */ - ret = ffmpeg.swr_init(resampleContext); - if (ret < 0) - { - Console.Error.Write("Failed to initialize the resampling context\n"); - goto end; - } - - /* Allocate source and destination samples buffers */ - int sourceLinesize; - int sourceChannelsCount = ffmpeg.av_get_channel_layout_nb_channels((ulong)sourceChannelLayout); - ret = ffmpeg.av_samples_alloc_array_and_samples(&sourceData, &sourceLinesize, sourceChannelsCount, - sourceSamplesCount, sourceSampleFormat, 0); - if (ret < 0) - { - Console.Error.Write("Could not allocate source samples\n"); - goto end; - } - - /* Compute the number of converted samples: buffering is avoided - * ensuring that the output buffer will contain at least all the - * converted input samples */ - int destinationSampleCount = - (int)ffmpeg.av_rescale_rnd(sourceSamplesCount, destinationSampleRate, sourceSampleRate, AVRounding.AV_ROUND_UP); - int maxDestinationSampleCount = destinationSampleCount; - - /* Buffer is going to be directly written to a rawaudio file, no alignment */ - int destinationLinesize; - int destinationChanelsCount = ffmpeg.av_get_channel_layout_nb_channels((ulong)destinationChannelLayout); - ret = ffmpeg.av_samples_alloc_array_and_samples(&destinationData, &destinationLinesize, destinationChanelsCount, - destinationSampleCount, destinationSampleFormat, 0); - if (ret < 0) - { - Console.Error.Write("Could not allocate destination samples\n"); - goto end; - } - - double toneLevel = 0; - do - { - /* Generate synthetic audio */ - FillSamples((double *)sourceData[0], sourceSamplesCount, sourceChannelsCount, sourceSampleRate, &toneLevel); - - /* Compute destination number of samples */ - destinationSampleCount = (int)ffmpeg.av_rescale_rnd(ffmpeg.swr_get_delay(resampleContext, sourceSampleRate) + - sourceSamplesCount, destinationSampleRate, sourceSampleRate, AVRounding.AV_ROUND_UP); - if (destinationSampleCount > maxDestinationSampleCount) - { - ffmpeg.av_freep(&destinationData[0]); - ret = ffmpeg.av_samples_alloc(destinationData, &destinationLinesize, destinationChanelsCount, - destinationSampleCount, destinationSampleFormat, 1); - if (ret < 0) - { - break; - } - - maxDestinationSampleCount = destinationSampleCount; - } - - /* Convert to destination format */ - ret = ffmpeg.swr_convert(resampleContext, destinationData, destinationSampleCount, sourceData, sourceSamplesCount); - if (ret < 0) - { - Console.Error.Write("Error while converting\n"); - goto end; - } - - int destinationBufferSize = ffmpeg.av_samples_get_buffer_size(&destinationLinesize, destinationChanelsCount, - ret, destinationSampleFormat, 1); - if (destinationBufferSize < 0) - { - Console.Error.Write("Could not get sample buffer size\n"); - goto end; - } - - Console.WriteLine($"t:{toneLevel} in:{sourceSamplesCount} out:{ret}"); - stream.Write(new Span(destinationData[0], destinationBufferSize)); - } - while (toneLevel < 10); - - if ((ret = getFormatFromSampleFormat(out var fmt, destinationSampleFormat)) < 0) - goto end; - Console.Error.Write("Resampling succeeded. Play the output file with the command:\n" - + $"ffplay -f {fmt} -channel_layout {destinationChannelLayout} -channels {destinationChanelsCount} -ar {destinationSampleRate} {filename}\n", - fmt, destinationChannelLayout, destinationChanelsCount, destinationSampleRate, filename); - - end: - if (sourceData != null) - ffmpeg.av_freep(&sourceData[0]); - ffmpeg.av_freep(&sourceData); - - if (destinationData != null) - ffmpeg.av_freep(&destinationData[0]); - ffmpeg.av_freep(&destinationData); - - ffmpeg.swr_free(&resampleContext); - return ret; - } - - struct sample_fmt_entry - { - public AVSampleFormat sample_fmt; - public string fmt_be, fmt_le; - } - - static int getFormatFromSampleFormat(out string fmt, AVSampleFormat sample_fmt) - { - var sample_fmt_entries = new[]{ - new sample_fmt_entry{ sample_fmt = AVSampleFormat.AV_SAMPLE_FMT_U8, fmt_be = "u8", fmt_le = "u8" }, - new sample_fmt_entry{ sample_fmt = AVSampleFormat.AV_SAMPLE_FMT_S16, fmt_be = "s16be", fmt_le = "s16le" }, - new sample_fmt_entry{ sample_fmt = AVSampleFormat.AV_SAMPLE_FMT_S32, fmt_be = "s32be", fmt_le = "s32le" }, - new sample_fmt_entry{ sample_fmt = AVSampleFormat.AV_SAMPLE_FMT_FLT, fmt_be = "f32be", fmt_le = "f32le" }, - new sample_fmt_entry{ sample_fmt = AVSampleFormat.AV_SAMPLE_FMT_DBL, fmt_be = "f64be", fmt_le = "f64le" }, - }; - fmt = null; - for (var i = 0; i < sample_fmt_entries.Length; i++) - { - var entry = sample_fmt_entries[i]; - if (sample_fmt == entry.sample_fmt) - { - fmt = ffmpeg.AV_HAVE_BIGENDIAN != 0 ? entry.fmt_be : entry.fmt_le; - return 0; - } - } - - Console.Error.WriteLine($"Sample format {ffmpeg.av_get_sample_fmt_name(sample_fmt)} not supported as output format"); - return ffmpeg.AVERROR(ffmpeg.EINVAL); - } - - /** - * Fill dst buffer with nb_samples, generated starting from t. - */ - static void FillSamples(double* dst, int samplesCount, int channelsCount, int sampleRate, double* toneLevel) - { - int i, j; - double toneIncrement = 1.0 / sampleRate; - double *dstp = dst; - const double c = 2 * Math.PI * 440.0; - - /* generate sin tone with 440Hz frequency and duplicated channels */ - for (i = 0; i < samplesCount; i++) - { - *dstp = Math.Sin(c * *toneLevel); - for (j = 1; j < channelsCount; j++) - dstp[j] = dstp[0]; - dstp += channelsCount; - *toneLevel += toneIncrement; - } - } - } -} diff --git a/FFmpeg.AutoGen.sln b/FFmpeg.AutoGen.sln index f0822d6..470eb4b 100644 --- a/FFmpeg.AutoGen.sln +++ b/FFmpeg.AutoGen.sln @@ -16,8 +16,6 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "0. Solution Items", "0. Sol EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "FFmpeg.AutoGen.CppSharpUnsafeGenerator", "FFmpeg.AutoGen.CppSharpUnsafeGenerator\FFmpeg.AutoGen.CppSharpUnsafeGenerator.csproj", "{2A8E06C6-5A68-4FB4-AE0C-F43B644E3737}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "FFmpeg.AutoGen.Examples.Encode", "FFmpeg.AutoGen.Examples.Encode\FFmpeg.AutoGen.Examples.Encode.csproj", "{136668DD-ECE0-4153-B21E-511882F358F9}" -EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "FFmpeg.AutoGen.Examples.ResamplingAudio", "FFmpeg.AutoGen.Examples.ResamplingAudio\FFmpeg.AutoGen.Examples.ResamplingAudio.csproj", "{A192914F-DAA8-400B-B5CA-BA188AEBB42B}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "FFmpeg.AutoGen.ClangMacroParser", "FFmpeg.AutoGen.ClangMacroParser\FFmpeg.AutoGen.ClangMacroParser.csproj", "{4557AF4F-4680-4764-A7A0-F739664B4DA1}" @@ -60,10 +58,6 @@ Global {2A8E06C6-5A68-4FB4-AE0C-F43B644E3737}.Debug|Any CPU.Build.0 = Debug|Any CPU {2A8E06C6-5A68-4FB4-AE0C-F43B644E3737}.Release|Any CPU.ActiveCfg = Release|Any CPU {2A8E06C6-5A68-4FB4-AE0C-F43B644E3737}.Release|Any CPU.Build.0 = Release|Any CPU - {136668DD-ECE0-4153-B21E-511882F358F9}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {136668DD-ECE0-4153-B21E-511882F358F9}.Debug|Any CPU.Build.0 = Debug|Any CPU - {136668DD-ECE0-4153-B21E-511882F358F9}.Release|Any CPU.ActiveCfg = Release|Any CPU - {136668DD-ECE0-4153-B21E-511882F358F9}.Release|Any CPU.Build.0 = Release|Any CPU {A192914F-DAA8-400B-B5CA-BA188AEBB42B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {A192914F-DAA8-400B-B5CA-BA188AEBB42B}.Debug|Any CPU.Build.0 = Debug|Any CPU {A192914F-DAA8-400B-B5CA-BA188AEBB42B}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -100,7 +94,6 @@ Global {A536B02A-B0B1-4753-8328-17369EF09976} = {0771FEE5-9700-4F09-99C4-EC36FD5AFEA9} {5BD870D6-27B4-4208-ACBF-496F2809326A} = {660FBA30-C9C0-44A6-B4C5-1C5030ED271D} {2A8E06C6-5A68-4FB4-AE0C-F43B644E3737} = {E9CC1A7F-ADD1-486B-BEB3-B6E388B35731} - {136668DD-ECE0-4153-B21E-511882F358F9} = {660FBA30-C9C0-44A6-B4C5-1C5030ED271D} {A192914F-DAA8-400B-B5CA-BA188AEBB42B} = {660FBA30-C9C0-44A6-B4C5-1C5030ED271D} {4557AF4F-4680-4764-A7A0-F739664B4DA1} = {E9CC1A7F-ADD1-486B-BEB3-B6E388B35731} {07F3A6D4-5599-4D77-9197-A666E8FC8EEC} = {E9CC1A7F-ADD1-486B-BEB3-B6E388B35731}