diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 667012441..12365c0da 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -66,6 +66,7 @@ windows-builder-x86: - Expand-Archive -Path artifacts.zip -DestinationPath . - $env:LIBOPENSHOT_AUDIO_DIR = "$CI_PROJECT_DIR\build\install-x86" - $env:UNITTEST_DIR = "C:\msys32\usr" + - $env:RESVGDIR = "C:\msys32\usr\local" - $env:ZMQDIR = "C:\msys32\usr" - $env:Path = "C:\msys32\mingw32\bin;C:\msys32\mingw32\lib;C:\msys32\usr\lib\cmake\UnitTest++;C:\msys32\home\jonathan\depot_tools;C:\msys32\usr;C:\msys32\usr\lib;" + $env:Path; - New-Item -ItemType Directory -Force -Path build diff --git a/cmake/Modules/FindRESVG.cmake b/cmake/Modules/FindRESVG.cmake new file mode 100644 index 000000000..4a201747e --- /dev/null +++ b/cmake/Modules/FindRESVG.cmake @@ -0,0 +1,34 @@ +# - Try to find RESVG +# Once done this will define +# RESVG_FOUND - System has RESVG +# RESVG_INCLUDE_DIRS - The RESVG include directories +# RESVG_LIBRARIES - The libraries needed to use RESVG +# RESVG_DEFINITIONS - Compiler switches required for using RESVG +find_path ( RESVG_INCLUDE_DIR ResvgQt.h + PATHS ${RESVGDIR}/include/resvg + $ENV{RESVGDIR}/include/resvg + $ENV{RESVGDIR}/include + /usr/include/resvg + /usr/include + /usr/local/include/resvg + /usr/local/include ) + +find_library ( RESVG_LIBRARY NAMES resvg + PATHS /usr/lib + /usr/local/lib + $ENV{RESVGDIR}/lib ) + +set ( RESVG_LIBRARIES ${RESVG_LIBRARY} ) +set ( RESVG_INCLUDE_DIRS ${RESVG_INCLUDE_DIR} ) + +SET( RESVG_FOUND FALSE ) + +IF ( RESVG_INCLUDE_DIR AND RESVG_LIBRARY ) + SET ( RESVG_FOUND TRUE ) + + include ( FindPackageHandleStandardArgs ) + # handle the QUIETLY and REQUIRED arguments and set RESVG_FOUND to TRUE + # if all listed variables are TRUE + find_package_handle_standard_args ( RESVG DEFAULT_MSG RESVG_LIBRARY RESVG_INCLUDE_DIR ) +ENDIF ( RESVG_INCLUDE_DIR AND RESVG_LIBRARY ) + diff --git a/include/ChunkReader.h b/include/ChunkReader.h index aa1510934..b780602ba 100644 --- a/include/ChunkReader.h +++ b/include/ChunkReader.h @@ -29,8 +29,6 @@ #define OPENSHOT_CHUNK_READER_H #include "ReaderBase.h" -#include "FFmpegReader.h" - #include #include #include @@ -107,7 +105,7 @@ namespace openshot string path; bool is_open; int64_t chunk_size; - FFmpegReader *local_reader; + ReaderBase *local_reader; ChunkLocation previous_location; ChunkVersion version; std::shared_ptr last_frame; diff --git a/include/Clip.h b/include/Clip.h index f30844b29..26cdd211a 100644 --- a/include/Clip.h +++ b/include/Clip.h @@ -44,18 +44,9 @@ #include "EffectBase.h" #include "Effects.h" #include "EffectInfo.h" -#include "FFmpegReader.h" #include "Fraction.h" -#include "FrameMapper.h" -#ifdef USE_IMAGEMAGICK - #include "ImageReader.h" - #include "TextReader.h" -#endif -#include "QtImageReader.h" -#include "ChunkReader.h" #include "KeyFrame.h" #include "ReaderBase.h" -#include "DummyReader.h" using namespace std; using namespace openshot; diff --git a/include/ClipBase.h b/include/ClipBase.h index 063416409..3dae8a536 100644 --- a/include/ClipBase.h +++ b/include/ClipBase.h @@ -58,8 +58,6 @@ namespace openshot { float start; ///< The position in seconds to start playing (used to trim the beginning of a clip) float end; ///< The position in seconds to end playing (used to trim the ending of a clip) string previous_properties; ///< This string contains the previous JSON properties - int max_width; ///< The maximum image width needed by this clip (used for optimizations) - int max_height; ///< The maximium image height needed by this clip (used for optimizations) /// Generate JSON for a property Json::Value add_property_json(string name, float value, string type, string memo, Keyframe* keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame); @@ -70,7 +68,7 @@ namespace openshot { public: /// Constructor for the base clip - ClipBase() { max_width = 0; max_height = 0; }; + ClipBase() { }; // Compare a clip using the Position() property bool operator< ( ClipBase& a) { return (Position() < a.Position()); } @@ -93,9 +91,6 @@ namespace openshot { void Start(float value) { start = value; } ///< Set start position (in seconds) of clip (trim start of video) void End(float value) { end = value; } ///< Set end position (in seconds) of clip (trim end of video) - /// Set Max Image Size (used for performance optimization) - void SetMaxSize(int width, int height) { max_width = width; max_height = height; }; - /// Get and Set JSON methods virtual string Json() = 0; ///< Generate JSON string of this object virtual void SetJson(string value) = 0; ///< Load JSON string into this object diff --git a/include/FFmpegReader.h b/include/FFmpegReader.h index 6e88fc7b8..eaa459438 100644 --- a/include/FFmpegReader.h +++ b/include/FFmpegReader.h @@ -42,6 +42,7 @@ #include #include #include "CacheMemory.h" +#include "Clip.h" #include "Exceptions.h" #include "OpenMPUtilities.h" #include "Settings.h" diff --git a/include/FFmpegUtilities.h b/include/FFmpegUtilities.h index 0c0865857..346da5418 100644 --- a/include/FFmpegUtilities.h +++ b/include/FFmpegUtilities.h @@ -141,7 +141,7 @@ #define AV_ALLOCATE_FRAME() av_frame_alloc() #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1) #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame) - #define AV_FREE_FRAME(av_frame) if (av_frame) av_frame_free(av_frame) + #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame) #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet) #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context) #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type @@ -176,7 +176,7 @@ #define AV_ALLOCATE_FRAME() av_frame_alloc() #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1) #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame) - #define AV_FREE_FRAME(av_frame) if(av_frame) av_frame_free(av_frame) + #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame) #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet) #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context) #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type @@ -211,7 +211,7 @@ #define AV_ALLOCATE_FRAME() av_frame_alloc() #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height) #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame) - #define AV_FREE_FRAME(av_frame) if (av_frame) av_frame_free(av_frame) + #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame) #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet) #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context) #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type @@ -241,7 +241,7 @@ #define AV_ALLOCATE_FRAME() avcodec_alloc_frame() #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height) #define AV_RESET_FRAME(av_frame) avcodec_get_frame_defaults(av_frame) - #define AV_FREE_FRAME(av_frame) if(av_frame) avcodec_free_frame(av_frame) + #define AV_FREE_FRAME(av_frame) avcodec_free_frame(av_frame) #define AV_FREE_PACKET(av_packet) av_free_packet(av_packet) #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context) #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type diff --git a/include/QtImageReader.h b/include/QtImageReader.h index 772a879ec..6b260f159 100644 --- a/include/QtImageReader.h +++ b/include/QtImageReader.h @@ -28,19 +28,14 @@ #ifndef OPENSHOT_QIMAGE_READER_H #define OPENSHOT_QIMAGE_READER_H -#include "ReaderBase.h" - #include #include #include #include #include #include -#include -#include -#include -#include "CacheMemory.h" #include "Exceptions.h" +#include "ReaderBase.h" using namespace std; @@ -110,9 +105,6 @@ namespace openshot Json::Value JsonValue(); ///< Generate Json::JsonValue for this object void SetJsonValue(Json::Value root); ///< Load Json::JsonValue into this object - /// Set Max Image Size (used for performance optimization) - void SetMaxSize(int width, int height); - /// Open File - which is called by the constructor automatically void Open(); }; diff --git a/include/ReaderBase.h b/include/ReaderBase.h index 2b3ee917c..b0a1b3dbd 100644 --- a/include/ReaderBase.h +++ b/include/ReaderBase.h @@ -35,6 +35,7 @@ #include #include "CacheMemory.h" #include "ChannelLayouts.h" +#include "ClipBase.h" #include "Fraction.h" #include "Frame.h" #include "Json.h" @@ -99,9 +100,7 @@ namespace openshot /// Section lock for multiple threads CriticalSection getFrameCriticalSection; CriticalSection processingCriticalSection; - - int max_width; ///< The maximum image width needed by this clip (used for optimizations) - int max_height; ///< The maximium image height needed by this clip (used for optimizations) + ClipBase* parent; public: @@ -111,6 +110,12 @@ namespace openshot /// Information about the current media file ReaderInfo info; + /// Parent clip object of this reader (which can be unparented and NULL) + ClipBase* GetClip(); + + /// Set parent clip object of this reader + void SetClip(ClipBase* clip); + /// Close the reader (and any resources it was consuming) virtual void Close() = 0; @@ -140,9 +145,6 @@ namespace openshot virtual Json::Value JsonValue() = 0; ///< Generate Json::JsonValue for this object virtual void SetJsonValue(Json::Value root) = 0; ///< Load Json::JsonValue into this object - /// Set Max Image Size (used for performance optimization) - void SetMaxSize(int width, int height) { max_width = width; max_height = height; }; - /// Open the reader (and start consuming resources, such as images or video files) virtual void Open() = 0; }; diff --git a/include/Settings.h b/include/Settings.h index 6a7940eb0..ec26338bd 100644 --- a/include/Settings.h +++ b/include/Settings.h @@ -85,6 +85,12 @@ namespace openshot { /// Scale mode used in FFmpeg decoding and encoding (used as an optimization for faster previews) bool HIGH_QUALITY_SCALING = false; + /// Maximum width for image data (useful for optimzing for a smaller preview or render) + int MAX_WIDTH = 0; + + /// Maximum height for image data (useful for optimzing for a smaller preview or render) + int MAX_HEIGHT = 0; + /// Wait for OpenMP task to finish before continuing (used to limit threads on slower systems) bool WAIT_FOR_VIDEO_PROCESSING_TASK = false; diff --git a/include/Timeline.h b/include/Timeline.h index ed5c2ab3f..312add2e1 100644 --- a/include/Timeline.h +++ b/include/Timeline.h @@ -48,6 +48,7 @@ #include "KeyFrame.h" #include "OpenMPUtilities.h" #include "ReaderBase.h" +#include "Settings.h" using namespace std; using namespace openshot; @@ -265,6 +266,10 @@ namespace openshot { Json::Value JsonValue(); ///< Generate Json::JsonValue for this object void SetJsonValue(Json::Value root); ///< Load Json::JsonValue into this object + /// Set Max Image Size (used for performance optimization). Convenience function for setting + /// Settings::Instance()->MAX_WIDTH and Settings::Instance()->MAX_HEIGHT. + void SetMaxSize(int width, int height); + /// @brief Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) /// This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync /// with another application... such as OpenShot Video Editor (http://www.openshot.org). diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 7752df152..6f08b81f5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -79,11 +79,6 @@ ENDIF (ImageMagick_FOUND) # Find FFmpeg libraries (used for video encoding / decoding) FIND_PACKAGE(FFmpeg REQUIRED) -# Include FFmpeg headers (needed for compile) -message('AVCODEC_FOUND: ${AVCODEC_FOUND}') -message('AVCODEC_INCLUDE_DIRS: ${AVCODEC_INCLUDE_DIRS}') -message('AVCODEC_LIBRARIES: ${AVCODEC_LIBRARIES}') - IF (AVCODEC_FOUND) include_directories(${AVCODEC_INCLUDE_DIRS}) ENDIF (AVCODEC_FOUND) @@ -116,8 +111,6 @@ ENDIF (AVRESAMPLE_FOUND) # Find JUCE-based openshot Audio libraries FIND_PACKAGE(OpenShotAudio REQUIRED) -message('LIBOPENSHOT_AUDIO_INCLUDE_DIRS: ${LIBOPENSHOT_AUDIO_INCLUDE_DIRS}') - # Include Juce headers (needed for compile) include_directories(${LIBOPENSHOT_AUDIO_INCLUDE_DIRS}) @@ -190,13 +183,28 @@ FIND_PACKAGE(ZMQ REQUIRED) # Include ZeroMQ headers (needed for compile) include_directories(${ZMQ_INCLUDE_DIRS}) +################### RESVG ##################### +# Find resvg library (used for rendering svg files) +FIND_PACKAGE(RESVG) + +# Include resvg headers (optional SVG library) +if (RESVG_FOUND) + include_directories(${RESVG_INCLUDE_DIRS}) + + # define a global var (used in the C++) + add_definitions( -DUSE_RESVG=1 ) + SET(CMAKE_SWIG_FLAGS "-DUSE_RESVG=1") +else(RESVG_FOUND) + message("-- Could NOT find libresvg (using Qt SVG parsing instead)") +endif(RESVG_FOUND) + ################### JSONCPP ##################### # Include jsoncpp headers (needed for JSON parsing) if (USE_SYSTEM_JSONCPP) find_package(JsonCpp REQUIRED) include_directories(${JSONCPP_INCLUDE_DIRS}) else() - message("Using embedded JsonCpp") + message("-- Could NOT find JsonCpp library (Using embedded JsonCpp instead)") include_directories("../thirdparty/jsoncpp/include") endif(USE_SYSTEM_JSONCPP) @@ -334,6 +342,10 @@ IF (AVRESAMPLE_FOUND) SET ( REQUIRED_LIBRARIES ${REQUIRED_LIBRARIES} ${AVRESAMPLE_LIBRARIES} ) ENDIF (AVRESAMPLE_FOUND) +IF (RESVG_FOUND) + SET ( REQUIRED_LIBRARIES ${REQUIRED_LIBRARIES} ${RESVG_LIBRARIES} ) +ENDIF(RESVG_FOUND) + IF (OPENMP_FOUND) SET ( REQUIRED_LIBRARIES ${REQUIRED_LIBRARIES} ${OpenMP_CXX_FLAGS} ) ENDIF (OPENMP_FOUND) diff --git a/src/ChunkReader.cpp b/src/ChunkReader.cpp index 8308a0c96..fe5522436 100644 --- a/src/ChunkReader.cpp +++ b/src/ChunkReader.cpp @@ -26,6 +26,7 @@ */ #include "../include/ChunkReader.h" +#include "../include/FFmpegReader.h" using namespace openshot; @@ -227,7 +228,6 @@ std::shared_ptr ChunkReader::GetFrame(int64_t requested_frame) cout << "Load READER: " << chunk_video_path << endl; // Load new FFmpegReader local_reader = new FFmpegReader(chunk_video_path); - local_reader->enable_seek = false; // disable seeking local_reader->Open(); // open reader } catch (InvalidFile) diff --git a/src/Clip.cpp b/src/Clip.cpp index 8e33f84c4..bd85d3401 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -26,6 +26,15 @@ */ #include "../include/Clip.h" +#include "../include/FFmpegReader.h" +#include "../include/FrameMapper.h" +#ifdef USE_IMAGEMAGICK + #include "../include/ImageReader.h" + #include "../include/TextReader.h" +#endif +#include "../include/QtImageReader.h" +#include "../include/ChunkReader.h" +#include "../include/DummyReader.h" using namespace openshot; @@ -212,6 +221,9 @@ void Clip::Reader(ReaderBase* new_reader) // set reader pointer reader = new_reader; + // set parent + reader->SetClip(this); + // Init rotation (if any) init_reader_rotation(); } @@ -620,35 +632,6 @@ std::shared_ptr Clip::GetOrCreateFrame(int64_t number) // Debug output ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1); - // Determine the max size of this clips source image (based on the timeline's size, the scaling mode, - // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible, - // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline - // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in - // the future. - if (scale == SCALE_FIT || scale == SCALE_STRETCH) { - // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) - float max_scale_x = scale_x.GetMaxPoint().co.Y; - float max_scale_y = scale_y.GetMaxPoint().co.Y; - reader->SetMaxSize(max(float(max_width), max_width * max_scale_x), max(float(max_height), max_height * max_scale_y)); - - } else if (scale == SCALE_CROP) { - // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes) - float max_scale_x = scale_x.GetMaxPoint().co.Y; - float max_scale_y = scale_y.GetMaxPoint().co.Y; - QSize width_size(max_width * max_scale_x, round(max_width / (float(reader->info.width) / float(reader->info.height)))); - QSize height_size(round(max_height / (float(reader->info.height) / float(reader->info.width))), max_height * max_scale_y); - - // respect aspect ratio - if (width_size.width() >= max_width && width_size.height() >= max_height) - reader->SetMaxSize(max(max_width, width_size.width()), max(max_height, width_size.height())); - else - reader->SetMaxSize(max(max_width, height_size.width()), max(max_height, height_size.height())); - - } else { - // No scaling, use original image size (slower) - reader->SetMaxSize(0, 0); - } - // Attempt to get a frame (but this could fail if a reader has just been closed) new_frame = reader->GetFrame(number); @@ -996,9 +979,11 @@ void Clip::SetJsonValue(Json::Value root) { reader->SetJsonValue(root["reader"]); } - // mark as managed reader - if (reader) + // mark as managed reader and set parent + if (reader) { + reader->SetClip(this); manage_reader = true; + } // Re-Open reader (if needed) if (already_open) diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index 434b1ae4e..547fbd998 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -886,9 +886,49 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) if (pFrameRGB == NULL) throw OutOfBoundsFrame("Convert Image Broke!", current_frame, video_length); - // Determine if video needs to be scaled down (for performance reasons) - // Timelines pass their size to the clips, which pass their size to the readers (as max size) - // If a clip is being scaled larger, it will set max_width and max_height = 0 (which means don't down scale) + // Determine the max size of this source image (based on the timeline's size, the scaling mode, + // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible, + // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline + // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in + // the future. + int max_width = Settings::Instance()->MAX_WIDTH; + int max_height = Settings::Instance()->MAX_HEIGHT; + + Clip* parent = (Clip*) GetClip(); + if (parent) { + if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { + // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) + float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; + float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; + max_width = max(float(max_width), max_width * max_scale_x); + max_height = max(float(max_height), max_height * max_scale_y); + + } else if (parent->scale == SCALE_CROP) { + // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes) + float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; + float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; + QSize width_size(max_width * max_scale_x, + round(max_width / (float(info.width) / float(info.height)))); + QSize height_size(round(max_height / (float(info.height) / float(info.width))), + max_height * max_scale_y); + // respect aspect ratio + if (width_size.width() >= max_width && width_size.height() >= max_height) { + max_width = max(max_width, width_size.width()); + max_height = max(max_height, width_size.height()); + } + else { + max_width = max(max_width, height_size.width()); + max_height = max(max_height, height_size.height()); + } + + } else { + // No scaling, use original image size (slower) + max_width = info.width; + max_height = info.height; + } + } + + // Determine if image needs to be scaled (for performance reasons) int original_height = height; if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) { // Override width and height (but maintain aspect ratio) @@ -1973,8 +2013,13 @@ void FFmpegReader::RemoveAVFrame(AVFrame* remove_frame) if (remove_frame) { // Free memory - av_freep(&remove_frame->data[0]); - AV_FREE_FRAME(&remove_frame); + #pragma omp critical (packet_cache) + { + av_freep(&remove_frame->data[0]); +#ifndef WIN32 + AV_FREE_FRAME(&remove_frame); +#endif + } } } diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index 1817c0491..2a5d42761 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -352,9 +352,6 @@ std::shared_ptr FrameMapper::GetOrCreateFrame(int64_t number) // Debug output ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1); - // Set max image size (used for performance optimization) - reader->SetMaxSize(max_width, max_height); - // Attempt to get a frame (but this could fail if a reader has just been closed) new_frame = reader->GetFrame(number); diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp index 764ef6ed1..80a8237d2 100644 --- a/src/QtImageReader.cpp +++ b/src/QtImageReader.cpp @@ -26,6 +26,18 @@ */ #include "../include/QtImageReader.h" +#include "../include/Settings.h" +#include "../include/Clip.h" +#include "../include/CacheMemory.h" +#include +#include +#include + +#if USE_RESVG == 1 + // If defined and found in CMake, utilize the libresvg for parsing + // SVG files and rasterizing them to QImages. + #include "ResvgQt.h" +#endif using namespace openshot; @@ -51,17 +63,46 @@ void QtImageReader::Open() // Open reader if not already open if (!is_open) { - // Attempt to open file + bool success = true; image = std::shared_ptr(new QImage()); - bool success = image->load(QString::fromStdString(path)); + +#if USE_RESVG == 1 + // If defined and found in CMake, utilize the libresvg for parsing + // SVG files and rasterizing them to QImages. + // Only use resvg for files ending in '.svg' or '.svgz' + if (path.find(".svg") != std::string::npos || + path.find(".svgz") != std::string::npos) { + + ResvgRenderer renderer(QString::fromStdString(path)); + if (!renderer.isValid()) { + success = false; + } else { + + image = std::shared_ptr(new QImage(renderer.defaultSize(), QImage::Format_RGBA8888)); + image->fill(Qt::transparent); + + QPainter p(image.get()); + renderer.render(&p); + p.end(); + } + + } else { + // Attempt to open file (old method) + success = image->load(QString::fromStdString(path)); + if (success) + image = std::shared_ptr(new QImage(image->convertToFormat(QImage::Format_RGBA8888))); + } +#else + // Attempt to open file using Qt's build in image processing capabilities + success = image->load(QString::fromStdString(path)); + if (success) + image = std::shared_ptr(new QImage(image->convertToFormat(QImage::Format_RGBA8888))); +#endif if (!success) // raise exception throw InvalidFile("File could not be opened.", path); - // Set pixel format - image = std::shared_ptr(new QImage(image->convertToFormat(QImage::Format_RGBA8888))); - // Update image properties info.has_audio = false; info.has_video = true; @@ -111,21 +152,6 @@ void QtImageReader::Close() } } -void QtImageReader::SetMaxSize(int width, int height) -{ - // Determine if we need to scale the image (for performance reasons) - // The timeline passes its size to the clips, which pass their size to the readers, and eventually here - // A max_width/max_height = 0 means do not scale (probably because we are scaling the image larger than 100%) - - // Remove cache that is no longer valid (if needed) - if (cached_image && (cached_image->width() != width && cached_image->height() != height)) - // Expire this cache - cached_image.reset(); - - max_width = width; - max_height = height; -} - // Get an openshot::Frame object for a specific frame number of this reader. std::shared_ptr QtImageReader::GetFrame(int64_t requested_frame) { @@ -133,39 +159,92 @@ std::shared_ptr QtImageReader::GetFrame(int64_t requested_frame) if (!is_open) throw ReaderClosed("The Image is closed. Call Open() before calling this method.", path); - if (max_width != 0 && max_height != 0 && max_width < info.width && max_height < info.height) - { - // Scale image smaller (or use a previous scaled image) - if (!cached_image) { - // Create a scoped lock, allowing only a single thread to run the following code at one time - const GenericScopedLock lock(getFrameCriticalSection); + // Create a scoped lock, allowing only a single thread to run the following code at one time + const GenericScopedLock lock(getFrameCriticalSection); + + // Determine the max size of this source image (based on the timeline's size, the scaling mode, + // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible, + // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline + // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in + // the future. + int max_width = Settings::Instance()->MAX_WIDTH; + int max_height = Settings::Instance()->MAX_HEIGHT; + + Clip* parent = (Clip*) GetClip(); + if (parent) { + if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { + // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) + float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; + float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; + max_width = max(float(max_width), max_width * max_scale_x); + max_height = max(float(max_height), max_height * max_scale_y); + + } else if (parent->scale == SCALE_CROP) { + // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes) + float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; + float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; + QSize width_size(max_width * max_scale_x, + round(max_width / (float(info.width) / float(info.height)))); + QSize height_size(round(max_height / (float(info.height) / float(info.width))), + max_height * max_scale_y); + // respect aspect ratio + if (width_size.width() >= max_width && width_size.height() >= max_height) { + max_width = max(max_width, width_size.width()); + max_height = max(max_height, width_size.height()); + } + else { + max_width = max(max_width, height_size.width()); + max_height = max(max_height, height_size.height()); + } + + } else { + // No scaling, use original image size (slower) + max_width = info.width; + max_height = info.height; + } + } + // Scale image smaller (or use a previous scaled image) + if (!cached_image || (cached_image && cached_image->width() != max_width || cached_image->height() != max_height)) { + +#if USE_RESVG == 1 + // If defined and found in CMake, utilize the libresvg for parsing + // SVG files and rasterizing them to QImages. + // Only use resvg for files ending in '.svg' or '.svgz' + if (path.find(".svg") != std::string::npos || + path.find(".svgz") != std::string::npos) { + ResvgRenderer renderer(QString::fromStdString(path)); + if (renderer.isValid()) { + + cached_image = std::shared_ptr(new QImage(QSize(max_width, max_height), QImage::Format_RGBA8888)); + cached_image->fill(Qt::transparent); + + QPainter p(cached_image.get()); + renderer.render(&p); + p.end(); + } + } else { // We need to resize the original image to a smaller image (for performance reasons) // Only do this once, to prevent tons of unneeded scaling operations cached_image = std::shared_ptr(new QImage(image->scaled(max_width, max_height, Qt::KeepAspectRatio, Qt::SmoothTransformation))); cached_image = std::shared_ptr(new QImage(cached_image->convertToFormat(QImage::Format_RGBA8888))); } +#else + // We need to resize the original image to a smaller image (for performance reasons) + // Only do this once, to prevent tons of unneeded scaling operations + cached_image = std::shared_ptr(new QImage(image->scaled(max_width, max_height, Qt::KeepAspectRatio, Qt::SmoothTransformation))); + cached_image = std::shared_ptr(new QImage(cached_image->convertToFormat(QImage::Format_RGBA8888))); +#endif + } - // Create or get frame object - std::shared_ptr image_frame(new Frame(requested_frame, cached_image->width(), cached_image->height(), "#000000", Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels), info.channels)); - - // Add Image data to frame - image_frame->AddImage(cached_image); - - // return frame object - return image_frame; - - } else { - // Use original image (higher quality but slower) - // Create or get frame object - std::shared_ptr image_frame(new Frame(requested_frame, info.width, info.height, "#000000", Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels), info.channels)); + // Create or get frame object + std::shared_ptr image_frame(new Frame(requested_frame, cached_image->width(), cached_image->height(), "#000000", Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels), info.channels)); - // Add Image data to frame - image_frame->AddImage(image); + // Add Image data to frame + image_frame->AddImage(cached_image); - // return frame object - return image_frame; - } + // return frame object + return image_frame; } // Generate JSON string of this object diff --git a/src/ReaderBase.cpp b/src/ReaderBase.cpp index 5de6fdff1..f2607cfd5 100644 --- a/src/ReaderBase.cpp +++ b/src/ReaderBase.cpp @@ -58,8 +58,9 @@ ReaderBase::ReaderBase() info.channel_layout = LAYOUT_MONO; info.audio_stream_index = -1; info.audio_timebase = Fraction(); - max_width = 0; - max_height = 0; + + // Init parent clip + parent = NULL; } // Display file information @@ -246,3 +247,13 @@ void ReaderBase::SetJsonValue(Json::Value root) { } } } + +/// Parent clip object of this reader (which can be unparented and NULL) +ClipBase* ReaderBase::GetClip() { + return parent; +} + +/// Set parent clip object of this reader +void ReaderBase::SetClip(ClipBase* clip) { + parent = clip; +} diff --git a/src/Settings.cpp b/src/Settings.cpp index e6749d5cb..b13f0f5a1 100644 --- a/src/Settings.cpp +++ b/src/Settings.cpp @@ -43,6 +43,8 @@ Settings *Settings::Instance() m_pInstance->HARDWARE_DECODE = false; m_pInstance->HARDWARE_ENCODE = false; m_pInstance->HIGH_QUALITY_SCALING = false; + m_pInstance->MAX_WIDTH = 0; + m_pInstance->MAX_HEIGHT = 0; m_pInstance->WAIT_FOR_VIDEO_PROCESSING_TASK = false; } diff --git a/src/Timeline.cpp b/src/Timeline.cpp index d97b13e4c..28c8956a0 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -60,7 +60,7 @@ Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int cha info.video_length = info.fps.ToFloat() * info.duration; // Init max image size - SetMaxSize(info.width, info.height); + SetMaxSize(info.width, info.height); // Init cache final_cache = new CacheMemory(); @@ -213,9 +213,6 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number) // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1); - // Set max image size (used for performance optimization) - clip->SetMaxSize(info.width, info.height); - // Attempt to get a frame (but this could fail if a reader has just been closed) #pragma omp critical (T_GetOtCreateFrame) new_frame = std::shared_ptr(clip->GetFrame(number)); @@ -235,7 +232,7 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number) ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1); // Create blank frame - new_frame = std::make_shared(number, max_width, max_height, "#000000", samples_in_frame, info.channels); + new_frame = std::make_shared(number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels); #pragma omp critical (T_GetOtCreateFrame) { new_frame->SampleRate(info.sample_rate); @@ -274,7 +271,7 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in // Generate Waveform Dynamically (the size of the timeline) std::shared_ptr source_image; #pragma omp critical (T_addLayer) - source_image = source_frame->GetWaveform(max_width, max_height, red, green, blue, alpha); + source_image = source_frame->GetWaveform(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, red, green, blue, alpha); source_frame->AddImage(std::shared_ptr(source_image)); } @@ -389,7 +386,7 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in { case (SCALE_FIT): { // keep aspect ratio - source_size.scale(max_width, max_height, Qt::KeepAspectRatio); + source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::KeepAspectRatio); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1); @@ -397,18 +394,18 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in } case (SCALE_STRETCH): { // ignore aspect ratio - source_size.scale(max_width, max_height, Qt::IgnoreAspectRatio); + source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::IgnoreAspectRatio); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1); break; } case (SCALE_CROP): { - QSize width_size(max_width, round(max_width / (float(source_size.width()) / float(source_size.height())))); - QSize height_size(round(max_height / (float(source_size.height()) / float(source_size.width()))), max_height); + QSize width_size(Settings::Instance()->MAX_WIDTH, round(Settings::Instance()->MAX_WIDTH / (float(source_size.width()) / float(source_size.height())))); + QSize height_size(round(Settings::Instance()->MAX_HEIGHT / (float(source_size.height()) / float(source_size.width()))), Settings::Instance()->MAX_HEIGHT); // respect aspect ratio - if (width_size.width() >= max_width && width_size.height() >= max_height) + if (width_size.width() >= Settings::Instance()->MAX_WIDTH && width_size.height() >= Settings::Instance()->MAX_HEIGHT) source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio); else source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio); @@ -423,7 +420,7 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in // (otherwise NONE scaling draws the frame image outside of the preview) float source_width_ratio = source_size.width() / float(info.width); float source_height_ratio = source_size.height() / float(info.height); - source_size.scale(max_width * source_width_ratio, max_height * source_height_ratio, Qt::KeepAspectRatio); + source_size.scale(Settings::Instance()->MAX_WIDTH * source_width_ratio, Settings::Instance()->MAX_HEIGHT * source_height_ratio, Qt::KeepAspectRatio); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_NONE)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1); @@ -444,32 +441,32 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in switch (source_clip->gravity) { case (GRAVITY_TOP): - x = (max_width - scaled_source_width) / 2.0; // center + x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center break; case (GRAVITY_TOP_RIGHT): - x = max_width - scaled_source_width; // right + x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right break; case (GRAVITY_LEFT): - y = (max_height - scaled_source_height) / 2.0; // center + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center break; case (GRAVITY_CENTER): - x = (max_width - scaled_source_width) / 2.0; // center - y = (max_height - scaled_source_height) / 2.0; // center + x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center break; case (GRAVITY_RIGHT): - x = max_width - scaled_source_width; // right - y = (max_height - scaled_source_height) / 2.0; // center + x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center break; case (GRAVITY_BOTTOM_LEFT): - y = (max_height - scaled_source_height); // bottom + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom break; case (GRAVITY_BOTTOM): - x = (max_width - scaled_source_width) / 2.0; // center - y = (max_height - scaled_source_height); // bottom + x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom break; case (GRAVITY_BOTTOM_RIGHT): - x = max_width - scaled_source_width; // right - y = (max_height - scaled_source_height); // bottom + x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom break; } @@ -478,8 +475,8 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in /* LOCATION, ROTATION, AND SCALE */ float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees - x += (max_width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width - y += (max_height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height + x += (Settings::Instance()->MAX_WIDTH * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width + y += (Settings::Instance()->MAX_HEIGHT * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height float shear_x = source_clip->shear_x.GetValue(clip_frame_number); float shear_y = source_clip->shear_y.GetValue(clip_frame_number); @@ -746,7 +743,7 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels); // Create blank frame (which will become the requested frame) - std::shared_ptr new_frame(std::make_shared(frame_number, max_width, max_height, "#000000", samples_in_frame, info.channels)); + std::shared_ptr new_frame(std::make_shared(frame_number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels)); #pragma omp critical (T_GetFrame) { new_frame->AddAudioSilence(samples_in_frame); @@ -760,7 +757,7 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) // Add Background Color to 1st layer (if animated or not black) if ((color.red.Points.size() > 1 || color.green.Points.size() > 1 || color.blue.Points.size() > 1) || (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0)) - new_frame->AddColor(max_width, max_height, color.GetColorHex(frame_number)); + new_frame->AddColor(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, color.GetColorHex(frame_number)); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size(), "", -1, "", -1, "", -1); @@ -1195,17 +1192,6 @@ void Timeline::apply_json_to_clips(Json::Value change) { // Apply framemapper (or update existing framemapper) apply_mapper_to_clip(existing_clip); - - // Clear any cached image sizes (since size might have changed) - existing_clip->SetMaxSize(0, 0); // force clearing of cached image size - if (existing_clip->Reader()) { - existing_clip->Reader()->SetMaxSize(0, 0); - if (existing_clip->Reader()->Name() == "FrameMapper") { - FrameMapper *nested_reader = (FrameMapper *) existing_clip->Reader(); - if (nested_reader->Reader()) - nested_reader->Reader()->SetMaxSize(0, 0); - } - } } } else if (change_type == "delete") { @@ -1452,3 +1438,11 @@ void Timeline::ClearAllCache() { } } + +// Set Max Image Size (used for performance optimization). Convenience function for setting +// Settings::Instance()->MAX_WIDTH and Settings::Instance()->MAX_HEIGHT. +void Timeline::SetMaxSize(int width, int height) { + // Init max image size + Settings::Instance()->MAX_WIDTH = width; + Settings::Instance()->MAX_HEIGHT = height; +} \ No newline at end of file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index bb2f8c2a1..ed6b25f38 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -179,13 +179,21 @@ FIND_PACKAGE(ZMQ REQUIRED) # Include ZeroMQ headers (needed for compile) include_directories(${ZMQ_INCLUDE_DIRS}) +################### RESVG ##################### +# Find resvg library (used for rendering svg files) +FIND_PACKAGE(RESVG REQUIRED) + +# Include resvg headers (optional SVG library) +if (RESVG_FOUND) + include_directories(${RESVG_INCLUDE_DIRS}) +endif(RESVG_FOUND) + ################### JSONCPP ##################### # Include jsoncpp headers (needed for JSON parsing) if (USE_SYSTEM_JSONCPP) find_package(JsonCpp REQUIRED) include_directories(${JSONCPP_INCLUDE_DIRS}) else() - message("Using embedded JsonCpp") include_directories("../thirdparty/jsoncpp/include") endif(USE_SYSTEM_JSONCPP)