From e412c1af3a250528936b48a9237de30f8c25a018 Mon Sep 17 00:00:00 2001 From: Alexander Piskun Date: Sat, 14 Sep 2024 01:04:13 +0300 Subject: [PATCH 1/3] moved docs from Sphinx to MkDocs Signed-off-by: Alexander Piskun --- .github/workflows/docs-check.yml | 5 +- .github/workflows/docs.yml | 4 +- .gitignore | 2 + Makefile | 13 - README.md | 17 +- docs/ComfyUI2VixMigration.rst | 105 -- docs/CommandLineOptions.rst | 105 -- docs/FAQ.rst | 14 - docs/Flows/Colorful_XL.md | 33 + docs/Flows/ComicuPortrait.md | 21 + docs/Flows/Flux_1.md | 38 + docs/Flows/GhibliPortrait.md | 19 + docs/Flows/HumanFaceDetailer.md | 17 + docs/Flows/HunyuanDiT.md | 29 + docs/Flows/Inpaint.md | 48 + docs/Flows/Juggernaut_Lite.md | 25 + docs/Flows/Juggernaut_XL.md | 29 + docs/Flows/MadScientist.md | 31 + docs/Flows/MemojiPortrait.md | 24 + docs/Flows/Mobius_XL.md | 33 + docs/Flows/PhotoStickers.md | 17 + docs/Flows/PhotoStickers2.md | 24 + docs/Flows/Photomaker_1.md | 40 + docs/Flows/Playground_2_5_aesthetic.md | 36 + docs/Flows/Playground_2_5_prometheus.md | 35 + docs/Flows/SD3_Medium.md | 33 + docs/Flows/SDXL_Lighting.md | 21 + docs/Flows/SketchPortrait.md | 19 + docs/Flows/Stable_Cascade.md | 31 + docs/Flows/SupirUpscaler.md | 54 + docs/Flows/VintagePortrait.md | 21 + docs/Flows/index.md | 29 + docs/GatedModels.rst | 29 - docs/HardwareFAQ.rst | 38 - docs/Installation.rst | 90 - docs/Makefile | 29 - docs/TechnicalInformation.rst | 97 -- docs/VixWorkflows.rst | 207 --- docs/WorkingModes.rst | 89 - docs/comfyui_vix_migration.md | 128 ++ docs/command_line_options.md | 224 +++ docs/conf.py | 95 -- docs/faq.md | 20 + docs/gated_models.md | 42 + docs/hardware_faq.md | 47 + docs/hardware_results.md | 179 ++ docs/index.md | 17 + docs/index.rst | 24 - docs/installation.md | 83 + docs/resources/css/dark.css | 1996 ----------------------- docs/resources/css/light.css | 8 - docs/resources/css/styles.css | 12 - docs/resources/js/script.js | 58 - docs/resources/logo.svg | 1493 ----------------- docs/technical_information.md | 115 ++ docs/vix_workflows.md | 228 +++ docs/working_modes.md | 119 ++ generate_hardware_results.py | 64 + mkdocs.yml | 50 + requirements.txt | 9 +- 60 files changed, 2041 insertions(+), 4521 deletions(-) delete mode 100644 Makefile delete mode 100644 docs/ComfyUI2VixMigration.rst delete mode 100644 docs/CommandLineOptions.rst delete mode 100644 docs/FAQ.rst create mode 100644 docs/Flows/Colorful_XL.md create mode 100644 docs/Flows/ComicuPortrait.md create mode 100644 docs/Flows/Flux_1.md create mode 100644 docs/Flows/GhibliPortrait.md create mode 100644 docs/Flows/HumanFaceDetailer.md create mode 100644 docs/Flows/HunyuanDiT.md create mode 100644 docs/Flows/Inpaint.md create mode 100644 docs/Flows/Juggernaut_Lite.md create mode 100644 docs/Flows/Juggernaut_XL.md create mode 100644 docs/Flows/MadScientist.md create mode 100644 docs/Flows/MemojiPortrait.md create mode 100644 docs/Flows/Mobius_XL.md create mode 100644 docs/Flows/PhotoStickers.md create mode 100644 docs/Flows/PhotoStickers2.md create mode 100644 docs/Flows/Photomaker_1.md create mode 100644 docs/Flows/Playground_2_5_aesthetic.md create mode 100644 docs/Flows/Playground_2_5_prometheus.md create mode 100644 docs/Flows/SD3_Medium.md create mode 100644 docs/Flows/SDXL_Lighting.md create mode 100644 docs/Flows/SketchPortrait.md create mode 100644 docs/Flows/Stable_Cascade.md create mode 100644 docs/Flows/SupirUpscaler.md create mode 100644 docs/Flows/VintagePortrait.md create mode 100644 docs/Flows/index.md delete mode 100644 docs/GatedModels.rst delete mode 100644 docs/HardwareFAQ.rst delete mode 100644 docs/Installation.rst delete mode 100644 docs/Makefile delete mode 100644 docs/TechnicalInformation.rst delete mode 100644 docs/VixWorkflows.rst delete mode 100644 docs/WorkingModes.rst create mode 100644 docs/comfyui_vix_migration.md create mode 100644 docs/command_line_options.md delete mode 100644 docs/conf.py create mode 100644 docs/faq.md create mode 100644 docs/gated_models.md create mode 100644 docs/hardware_faq.md create mode 100644 docs/hardware_results.md create mode 100644 docs/index.md delete mode 100644 docs/index.rst create mode 100644 docs/installation.md delete mode 100644 docs/resources/css/dark.css delete mode 100644 docs/resources/css/light.css delete mode 100644 docs/resources/css/styles.css delete mode 100644 docs/resources/js/script.js delete mode 100644 docs/resources/logo.svg create mode 100644 docs/technical_information.md create mode 100644 docs/vix_workflows.md create mode 100644 docs/working_modes.md create mode 100644 generate_hardware_results.py create mode 100644 mkdocs.yml diff --git a/.github/workflows/docs-check.yml b/.github/workflows/docs-check.yml index 9901978..d6307fd 100644 --- a/.github/workflows/docs-check.yml +++ b/.github/workflows/docs-check.yml @@ -18,6 +18,5 @@ jobs: - name: Install Docs dependencies run: python3 -m pip install -r requirements.txt - - name: Build and push Docs - run: | - make html SPHINXOPTS="-W" + - name: Build Docs + run: mkdocs build --strict diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index e044551..d8be07c 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -50,11 +50,11 @@ jobs: - name: Build and push Docs run: | export CHANGES_DATE=`date -d"@$(git log -1 --pretty=%ct)" --iso-8601=seconds` - make html + mkdocs build git config --global user.name bigcat88 git config --global user.email "bigcat88@users.noreply.github.com" docroot=`mktemp -d` - rsync -av "docs/_build/html/" "${docroot}/" + rsync -av "site/" "${docroot}/" rsync -av "flows_publish/" "${docroot}/" rsync -av "models_catalog.json" "${docroot}/" rsync -av "docs/swagger-ui/" "${docroot}/swagger-ui/" diff --git a/.gitignore b/.gitignore index cb46ae9..14d0378 100644 --- a/.gitignore +++ b/.gitignore @@ -103,3 +103,5 @@ scripts/visionatrix tasks_history.db /flows.zip flows_publish +hardware_results/ +site/ diff --git a/Makefile b/Makefile deleted file mode 100644 index ef30791..0000000 --- a/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -.DEFAULT_GOAL := help - -.PHONY: docs -.PHONY: html -docs html: - rm -rf docs/_build - $(MAKE) -C docs html - -.PHONY: help -help: - @echo "Welcome to Visionatrix development. Please use \`make \` where is one of" - @echo " docs make HTML docs" - @echo " " diff --git a/README.md b/README.md index 04c19b5..f5fd0f4 100644 --- a/README.md +++ b/README.md @@ -8,12 +8,13 @@ For any problems with Visionatrix or suggestions for improvement, go to the [mai - [Documentation](https://visionatrix.github.io/VixFlowsDocs/) - [Available Flows](https://visionatrix.github.io/VixFlowsDocs/Flows/index.html) - - [Manual Installation](https://visionatrix.github.io/VixFlowsDocs/Installation.html) - - [Command Line Options](https://visionatrix.github.io/VixFlowsDocs/CommandLineOptions.html) - - [Working modes](https://visionatrix.github.io/VixFlowsDocs/WorkingModes.html) - - [Vix Workflows](https://visionatrix.github.io/VixFlowsDocs/VixWorkflows.html) - - [Creating Workflows](https://visionatrix.github.io/VixFlowsDocs/ComfyUI2VixMigration.html) - - [Technical information](https://visionatrix.github.io/VixFlowsDocs/TechnicalInformation.html) - - [FAQ](https://visionatrix.github.io/VixFlowsDocs/FAQ.html) - - [Hardware FAQ](https://visionatrix.github.io/VixFlowsDocs/HardwareFAQ.html) + - [Manual Installation](https://visionatrix.github.io/VixFlowsDocs/installation.html) + - [Command Line Options](https://visionatrix.github.io/VixFlowsDocs/command_line_options.html) + - [Working modes](https://visionatrix.github.io/VixFlowsDocs/working_modes.html) + - [Vix Workflows](https://visionatrix.github.io/VixFlowsDocs/vix_workflows.html) + - [Creating Workflows](https://visionatrix.github.io/VixFlowsDocs/comfyui_vix_migration.html) + - [Technical information](https://visionatrix.github.io/VixFlowsDocs/technical_information.html) + - [FAQ](https://visionatrix.github.io/VixFlowsDocs/faq.html) + - [Hardware FAQ](https://visionatrix.github.io/VixFlowsDocs/hardware_faq.html) + - [Hardware Results](https://visionatrix.github.io/VixFlowsDocs/hardware_results.html) - [OpenAPI](https://visionatrix.github.io/VixFlowsDocs/swagger.html) diff --git a/docs/ComfyUI2VixMigration.rst b/docs/ComfyUI2VixMigration.rst deleted file mode 100644 index 9acee51..0000000 --- a/docs/ComfyUI2VixMigration.rst +++ /dev/null @@ -1,105 +0,0 @@ -ComfyUI to Visionatrix migration -================================ - -If you want to adopt your ComfyUI workflow to use in Visionatrix, -you can use this guide to help you do so. There are a few steps you need to follow. - - -1. Install ComfyUI-Visionatrix custom nodes -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -First, it is recommended to install our custom `ComfyUI-Visionatrix `_ nodes. -Otherwise you will have to use custom nodes titles which are will be parsed by Visionatrix. - -.. code-block:: bash - - git clone https://github.com/Visionatrix/ComfyUI-Visionatrix.git - -.. note:: - - You can do the required migration via nodes titles, which is less convenient. - The node title must be like this: ``input;Display Name;optional;advanced;order=1;custom_id=custom_name``. - - -2. Define the input params -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Visionatrix UI aims simplicity and clarity. -Define the most important input params of your ComfyUI workflow to extract -them to the Visionatrix UI as inputs, for example: - -- prompt (textarea) -- negative prompt (textarea) -- prompt strength (range) -- some logic toggles (checkbox) -- input files (file) - -For that you will need to attach our custom nodes as adapters to your nodes receiving these inputs -that are will be filled by the user from the Visionatrix UI. - -As example, you can have a look at our `list of worklows `_ adopted to the new format. - -.. note:: - - The list of available nodes can be found in the readme of the `ComfyUI-Visionatrix `_ repository. - - -2.1 Node to Input mapping via title string -****************************************** - -Alternatively, Visionatrix supports other Nodes mapping as an input param -via node title string separated by semicolon. - - The nodes titles starting with ``input;`` keyword are considered as - Visionatrix input param. - -The parameters list: - -- ``input`` - keyword to define the input param -- ``Display Name`` - positional parameter, the name of the input field displayed in the UI -- ``optional`` - if present, the optional field is set to True -- ``advanced`` - if present, the advanced field is set to True -- ``order=1`` - the order of the input param in the UI -- ``custom_id=custom_name`` - the custom id of the input param - -.. note:: - - ``order`` is important if workflow accepts more than 1 file. - - -2.2 External nodes used via Node to Input mapping -************************************************* - -In our workflows, we use some external nodes mapped as input params, that you can use as example: - -- ``SDXLAspectRatioSelector`` - select input field used from `comfyui-art-venture `_ for Aspect Ratio select. Usually it's an optional and hidden to the advanced prompt options: ``input;Aspect Ratio;optional;advanced;custom_id=aspect_ratio``; -- ``LoadImage`` - default ComfyUI image loader node as image file input field. As required title: ``input;Input image;order=1``, or optional advanced: ``input;Optional helper image;optional;advanced;order=20``; - - -3. Map the models for automatic download -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Visionatrix simplifies and automates the process of downloading the models. -As the third step of the migration, you need to map the models that are used in your workflow (see :ref:`models-mapping`). - - -4. Build the list of available flows -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The last step is to build the list of available flows in the Visionatrix UI. -Follow the steps described in `options.py `_ file -for ``FLOWS_URL`` and ``MODELS_CATALOG_URL`` to enable Visionatrix local workflows development mode: - -Create a zip with adjusted/new flows: - -.. code-block:: bash - - cd ../VixFlowsDocs && zip -r ../Visionatrix/flows.zip flows && cd ../Visionatrix - -And uncomment appropriate code lines in `options.py file `_ to use local versions of the flows. - - -5. Verify and test the workflow -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Last step is to run Visionatrix and setup your workflow to verify that everything works as expected. diff --git a/docs/CommandLineOptions.rst b/docs/CommandLineOptions.rst deleted file mode 100644 index 58e0a58..0000000 --- a/docs/CommandLineOptions.rst +++ /dev/null @@ -1,105 +0,0 @@ -Command Line Options -==================== - -Most of the options supported by `ComfyUI` are supported. - -They can be specified when starting manually: - -.. code-block:: shell - - python3 -m visionatrix run --ui --use-split-cross-attention --disable-smart-memory - -Here are the list of the supported command line options related to `Visionatrix` for **run** command: - - --backend_dir=BACKEND_DIR - Directory for the backend(folder with ComfyUI) - Default: ``vix_backend`` - --flows_dir=FLOWS_DIR - Directory for the flows - Default: ``vix_flows`` - --models_dir=MODELS_DIR - Directory for the models - Default: ```vix_models`` - --tasks_files_dir=TASKS_FILES_DIR - Directory for input/output files - Default: ``vix_task_files`` - --host=HOST Host to be used by Visionatrix - --port=PORT Port to be used by Visionatrix - --ui Flag to enable User interface(JS frontend). - -Supported **ComfyUI** options ------------------------------ - - --cuda-device DEVICE_ID - Set the id of the cuda device this instance will use. - --cuda-malloc Enable cudaMallocAsync (enabled by default for torch - 2.0 and up). - --disable-cuda-malloc - Disable cudaMallocAsync. - --force-fp32 Force fp32 (If this makes your GPU work better please report it). - --force-fp16 Force fp16. - --bf16-unet Run the UNET in bf16. This should only be used for - testing stuff. - --fp16-unet Store unet weights in fp16. - --fp8_e4m3fn-unet Store unet weights in fp8_e4m3fn. - --fp8_e5m2-unet Store unet weights in fp8_e5m2. - --fp16-vae Run the VAE in fp16, might cause black images. - --fp32-vae Run the VAE in full precision fp32. - --bf16-vae Run the VAE in bf16. - --cpu-vae Run the VAE on the CPU. - --fp8_e4m3fn-text-enc - Store text encoder weights in fp8 (e4m3fn variant). - --fp8_e5m2-text-enc Store text encoder weights in fp8 (e5m2 variant). - --fp16-text-enc Store text encoder weights in fp16. - --fp32-text-enc Store text encoder weights in fp32. - --cache-classic Use the old style (aggressive) caching. (Default) - --cache-lru Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM. - --disable-ipex-optimize - Disables ipex.optimize when loading models with Intel GPUs. - --use-split-cross-attention - Use the split cross attention optimization. Ignored when xformers is used. - --use-quad-cross-attention - Use the sub-quadratic cross attention optimization. Ignored when xformers is used. - --use-pytorch-cross-attention - Use the new pytorch 2.0 cross attention function. - --disable-xformers Disable xformers. - - --force-upcast-attention - Force enable attention upcasting, please report if it fixes black images. - --dont-upcast-attention - Disable all upcasting of attention. Should be unnecessary except for debugging. - - --gpu-only Store and run everything (text encoders/CLIP models, - etc... on the GPU). - --highvram By default models will be unloaded to CPU memory after - being used. This option keeps them in GPU memory. - --normalvram Used to force normal vram use if lowvram gets - automatically enabled. - --lowvram Split the unet in parts to use less vram. - --novram When lowvram isn't enough. - --cpu To use the CPU for everything (slow). - --reserve-vram Set the amount of VRAM in GB you want to reserve for use by your OS/other software. - --disable-smart-memory - Force ComfyUI to aggressively offload to regular ram - instead of keeping models in vram when it can. - --fast Enable some untested and potentially quality deteriorating optimizations. - -Additional commands -------------------- - -install-flow -'''''''''''' - -Can be used for Workers that do not have a user interface. - -.. code-block:: shell - - python3 -m visionatrix install-flow --file=path_to_json - -File should contain ComfyUI workflow with the `metadata `_ needed for Visionatrix. - -.. code-block:: shell - - python3 -m visionatrix install-flow --name=photo_stickers - -This will install flow by it's ``ID`` which is equal to it's folder name `here `_ diff --git a/docs/FAQ.rst b/docs/FAQ.rst deleted file mode 100644 index 10da941..0000000 --- a/docs/FAQ.rst +++ /dev/null @@ -1,14 +0,0 @@ -FAQ -=== - -Q: Can I use ComfyUI which is included in Visionatrix? - -A: Yes, you can install your Nodes there and run ComfyUI separately. You can also tell Visionatrix to use ComfyUI installed in a different path. See the `--backend_dir `_ parameter. - ---- - -Q: Can I run it on multiple GPU? - -A: You can run one worker on one GPU and process tasks in parallel, take a look at `Server and Worker modes `_. - ---- diff --git a/docs/Flows/Colorful_XL.md b/docs/Flows/Colorful_XL.md new file mode 100644 index 0000000..4b8e795 --- /dev/null +++ b/docs/Flows/Colorful_XL.md @@ -0,0 +1,33 @@ +# Colorful XL + +A fairly simple flow at the moment, simply using the latest **Colorful XL** model without any post-processing. + +!!! warning + + **Not Safe for Work (NSFW) version.** + +**Supports various aspect ratios.** + +**Supports fast generation using the Align Steps technique** + +## Examples + +![Image](../FlowsResults/Colorful_XL_1.png) + + portrait, half-robot woman, in the dark, contrasting light, realistic, masterpiece + +--- + +![Image](../FlowsResults/Colorful_XL_2.png) + + half-cat woman, in the forest, vivid lights, realistic, masterpiece + +- Fast Run: true, Vibrancy: 3 + +--- + +![Image](../FlowsResults/Colorful_XL_3.png) + + portrait, young man, angel, sky, sun, high contrast + +- Fast Run: true, Vibrancy: 2, Steps number to generate: 60 diff --git a/docs/Flows/ComicuPortrait.md b/docs/Flows/ComicuPortrait.md new file mode 100644 index 0000000..a2190b4 --- /dev/null +++ b/docs/Flows/ComicuPortrait.md @@ -0,0 +1,21 @@ +# ComicU Anime Portrait + +Create an anime(sketch by default) image from a photo of a person. + +**Prompt** is optional, something like emotions can be used there: *smile, sad, serious, etc*. + +> *If you ticked the "Disable Simple style" you can try to add something like* **line-sketch** *to the prompt.* + +Part of the flow runs on the CPU, part on the GPU, the flow is quite fast and convenient for everyday use. + +## Examples + +> As input files, the photos of `Bruce Lee` and `Shakira` were taken from the Internet and used. + +![Image](../FlowsResults/ComicuPortrait_1.png) + +![Image](../FlowsResults/ComicuPortrait_2.png) + +![Image](../FlowsResults/ComicuPortrait_3.png) + +![Image](../FlowsResults/ComicuPortrait_4.png) diff --git a/docs/Flows/Flux_1.md b/docs/Flows/Flux_1.md new file mode 100644 index 0000000..e412673 --- /dev/null +++ b/docs/Flows/Flux_1.md @@ -0,0 +1,38 @@ +# Flux 1 + +**FLUX.1** is a 12 billion parameter rectified flow transformer capable of generating images from text descriptions. For more information, please read [blog post](https://blackforestlabs.ai/announcing-black-forest-labs/) + +*The model is extremely demanding on hardware.* + +Even on 24 Gigabytes, the speed of the model in the full version leaves much to be desired, since this basic flow does not fit completely into the video card cache. + +Lightning versions are quite good and generate quite good pictures in 4 steps. + +Totally with these models there are 4 different flows: + +* Flux +* Flux (Small) +* Flux Lighting +* Flux Lighting (Small) + +**Supports various aspect ratios.** + +**Supports different number of steps for non-Lighting versions.** + +## Flux Small Example + +![Image](../FlowsResults/Flux_1_1.png) + + photo-realistic portrait of a cute kitten in cyberpunk style holding sign "Visionatrix" in ultra quality with high details + +## Flux Lighting Example + +![Image](../FlowsResults/Flux_1_2.png) + + A cool man is driving a luxury car through a night city. The scene captures the vibrant nightlife with glowing neon signs, tall skyscrapers, and bustling streets. The dad is stylishly dressed, exuding confidence and charisma. The luxury car, sleek and modern, reflects the city lights, enhancing the atmosphere of urban sophistication and adventure. + +## Flux Example (50 steps) + +![Image](../FlowsResults/Flux_1_3.png) + + Portrait of beautiful woman in a swimsuit is lounging under a palm tree on a tropical beach. The scene is photorealistic, capturing the serene and picturesque setting with clear blue skies, gentle waves, and white sandy shores. The palm tree provides shade, and the overall atmosphere is one of leisure and tropical paradise. diff --git a/docs/Flows/GhibliPortrait.md b/docs/Flows/GhibliPortrait.md new file mode 100644 index 0000000..392deb1 --- /dev/null +++ b/docs/Flows/GhibliPortrait.md @@ -0,0 +1,19 @@ +# Ghibli Studio Portrait + +Create an anime image from a person photo. + +**Prompt** is optional, something like emotions can be used there: *smile, sad, serious, etc*. + +Part of the flow runs on the CPU, part on the GPU, the flow is quite fast and convenient for everyday use. + +## Examples + +> As input files, the photos of `Bruce Lee` and `Shakira` were taken from the Internet and used. + +![Image](../FlowsResults/GhibliPortrait_1.png) + +![Image](../FlowsResults/GhibliPortrait_2.png) + +![Image](../FlowsResults/GhibliPortrait_3.png) + +![Image](../FlowsResults/GhibliPortrait_4.png) diff --git a/docs/Flows/HumanFaceDetailer.md b/docs/Flows/HumanFaceDetailer.md new file mode 100644 index 0000000..479f7ee --- /dev/null +++ b/docs/Flows/HumanFaceDetailer.md @@ -0,0 +1,17 @@ +# Human Face Detailer + +Initially this was part of Playground 2.5 Aesthetics Flow **until Visionatrix 1.0** version. + +It was decided to move it to a separate flow, so that it would be convenient to send the result of any flow here. + +This flow works by default at the moment on the CPU. + +The maximum number of faces for redrawing on a portrait is limited to **3**. + +If no faces are found in the input image, nothing will be redrawn. + +## Examples + +![Image](../FlowsResults/HumanFaceDetailer_original_1.png) + +![Image](../FlowsResults/HumanFaceDetailer_modified_1.png) diff --git a/docs/Flows/HunyuanDiT.md b/docs/Flows/HunyuanDiT.md new file mode 100644 index 0000000..bc8a962 --- /dev/null +++ b/docs/Flows/HunyuanDiT.md @@ -0,0 +1,29 @@ +# HunyuanDiT + +Powerful Multi-Resolution Diffusion Transformer with Fine-Grained Chinese Understanding. + +A solid model that supports natively very high **Vibrancy**. + +## Examples + +![Image](../FlowsResults/HunyuanDiT_1.png) + + portrait of a majestic insect + +- Vibrancy: 7, Steps: 60 + +--- + +![Image](../FlowsResults/HunyuanDiT_2.png) + + close portrait of happy girl on the great wall + +- Vibrancy: 7, Steps: 60 + +--- + +![Image](../FlowsResults/HunyuanDiT_3.png) + + portrait of a black pug on the yellow grass + +- Vibrancy: 6, Steps: 30 diff --git a/docs/Flows/Inpaint.md b/docs/Flows/Inpaint.md new file mode 100644 index 0000000..2451655 --- /dev/null +++ b/docs/Flows/Inpaint.md @@ -0,0 +1,48 @@ +# Inpaint + +Currently only a few inpaint flows are available: + +* Flux Redraw +* Flux Redraw (Small) +* Flux Redraw Lighting (Small) +* ColorfulXL + +Inpainting is hard, you always need to try to select the correct **Replacing factor** parameter, and it strongly depends on the image and the flow. + +As examples, we will use real photos, it is harder to inpaint them, since they have a high resolution, and current models draw in 1024x1024 resolution. + +It is easier to "repaint" photos generated with AI, as they have usually low resolutions. + +## Examples + +Original: + +![Image](../FlowsResults/Inpaint_1_original1.png) + +Results of painting long and lush luxurious hair: + +![Image](../FlowsResults/Inpaint_1_result1.png) +![Image](../FlowsResults/Inpaint_1_result2.png) + +--- + +Results of painting on the left of the man: + +![Image](../FlowsResults/Inpaint_1_result3.png) +![Image](../FlowsResults/Inpaint_1_result4.png) + +Results of painting Mooon the right of the man: + +![Image](../FlowsResults/Inpaint_1_result5.png) + +--- + +Original with baby photo: + +![Image](../FlowsResults/Inpaint_1_original2.png) + +Results of redrawing the upper part of the image: + +![Image](../FlowsResults/Inpaint_1_result6.png) +![Image](../FlowsResults/Inpaint_1_result7.png) +![Image](../FlowsResults/Inpaint_1_result8.png) diff --git a/docs/Flows/Juggernaut_Lite.md b/docs/Flows/Juggernaut_Lite.md new file mode 100644 index 0000000..23a0632 --- /dev/null +++ b/docs/Flows/Juggernaut_Lite.md @@ -0,0 +1,25 @@ +# Juggernaut Lite + +This flow is most suitable for generating people quickly and realistically. + +Although it sometimes has problems with eyes or hands, in most cases the quality is quite acceptable. + +**Supports various aspect ratios.** + +## Examples + +![Image](../FlowsResults/Juggernaut_Lite_1.png) + + portrait of hero wearing cuirass sitting on the chair, high details, photo realistic + +--- + +![Image](../FlowsResults/Juggernaut_Lite_2.png) + + portrait of elf man in obsidian armor looking at viewer from the dark, contrast, high details + +--- + +![Image](../FlowsResults/Juggernaut_Lite_3.png) + + portrait rage tiger diff --git a/docs/Flows/Juggernaut_XL.md b/docs/Flows/Juggernaut_XL.md new file mode 100644 index 0000000..55406e7 --- /dev/null +++ b/docs/Flows/Juggernaut_XL.md @@ -0,0 +1,29 @@ +# Juggernaut XL + +A fairly simple flow at the moment, simply using the latest **Juggernaut X** model without any post-processing. + +> **Note**: **Not Safe for Work (NSFW) version.** + +Prompting information can be found here: [Juggernaut-X prompting](https://storage.googleapis.com/run-diffusion-public-assets/Prompting_Juggernaut_X.pdf) + +**Supports various aspect ratios.** + +**Supports fast generation using the Align Steps technique** + +## Examples + +![Image](../FlowsResults/Juggernaut_XL_1.png) + + close portrait of hero wearing cuirass sitting on the chair, high details + +--- + +![Image](../FlowsResults/Juggernaut_XL_2.png) + + portrait of elf man in obsidian armor looking at viewer from the dark, contrast, high details + +--- + +![Image](../FlowsResults/Juggernaut_XL_3.png) + + portrait rage tiger, high resolution diff --git a/docs/Flows/MadScientist.md b/docs/Flows/MadScientist.md new file mode 100644 index 0000000..5939d78 --- /dev/null +++ b/docs/Flows/MadScientist.md @@ -0,0 +1,31 @@ +# Mad Scientist + +> This feature requires vision capabilities. +> +> You must either have the **Ollama** server running with the **llava:7b-v1.6-vicuna-q8_0 model**, +> or provide a `Gemini API key` in the settings. + +There are only two required arguments: + +1. Source file with a person's face. +2. The file from which style will be created and applied to the source file. + +The results of this flow are amazing. + +## Hardware requirements + +Depends on whether the Ollama server is running locally or remotely. + +It can run on Macbook 32GB (including Ollama running locally on the same device) + +Since the Ollama model used here requires 7GB models and uses SDXL models from the workflow, it will likely require a 16GB memory card to run it on the GPU. + +But you can always run Ollama on a CPU or other device and after that a 10GB graphics card will be enough. + +## Examples + +![Image](../FlowsResults/MadScientist_1.png) + +![Image](../FlowsResults/MadScientist_2.png) + +![Image](../FlowsResults/MadScientist_3.png) diff --git a/docs/Flows/MemojiPortrait.md b/docs/Flows/MemojiPortrait.md new file mode 100644 index 0000000..6a0eb9b --- /dev/null +++ b/docs/Flows/MemojiPortrait.md @@ -0,0 +1,24 @@ +# Memoji Portrait + +Create cute Memoji from a photo of a person. + +**Prompt** is required, simplest examples is: `girl, portrait, close up` + + To make it look more like Memoji you can add **sico style** words: + `**sico style**, girl, portrait, close up` + +**Person's face pose** is optional. + +Part of the flow runs on the CPU, part on the GPU, the flow is quite fast and convenient for everyday use. + +## Examples + +> As input files, the photos of `Bruce Lee` and `Einstein` were taken from the Internet and used. + +![Image](../FlowsResults/MemojiPortrait_1.png) + +![Image](../FlowsResults/MemojiPortrait_2.png) + +![Image](../FlowsResults/MemojiPortrait_3.png) + +![Image](../FlowsResults/MemojiPortrait_4.png) diff --git a/docs/Flows/Mobius_XL.md b/docs/Flows/Mobius_XL.md new file mode 100644 index 0000000..32e68d1 --- /dev/null +++ b/docs/Flows/Mobius_XL.md @@ -0,0 +1,33 @@ +# Mobius XL + +A fairly simple flow at the moment, simply using the latest [Mobius](https://huggingface.co/Corcelio/mobius) model without any post-processing. + +> *This is a very unusual model, although it is part of the SDXL family of models - its results in some areas are simply amazing.* + +It has better text drawing capabilities than other SDXL models. + +Since the author of this model is constantly improving it, we will update it with new versions when they are published. + +Here is a link to [civitai](https://civitai.com/models/490622/mobius) to learn more about the model. + +Link to the author of the model on [Twitter](https://x.com/DataPlusEngine) + +**Supports fast generation using the Align Steps technique** + +## Examples + +![Image](../FlowsResults/Mobius_XL_1.png) + + emotional owl looks at the viewer in surprise, masterpiece, cinematic, best quality + +--- + +![Image](../FlowsResults/Mobius_XL_2.png) + + very angry emotional pug, future, best quality, masterpiece, cinematic, ("VIX" text logo) + +--- + +![Image](../FlowsResults/Mobius_XL_3.png) + + portrait of male paratrooper, explosions background, masterpiece, cinematic, best quality diff --git a/docs/Flows/PhotoStickers.md b/docs/Flows/PhotoStickers.md new file mode 100644 index 0000000..b3f1df8 --- /dev/null +++ b/docs/Flows/PhotoStickers.md @@ -0,0 +1,17 @@ +# Photo Stickers + +Turns a photo into 4 anime stickers with different emotions. + +Part of the flow runs on the CPU, part on the GPU, the flow is quite fast and convenient for everyday use. + +## Examples + +> As input file, the photo of `Bruce Lee` was taken from the Internet and used. + +![Image](../FlowsResults/PhotoStickers_1.png) + +![Image](../FlowsResults/PhotoStickers_2.png) + +![Image](../FlowsResults/PhotoStickers_3.png) + +![Image](../FlowsResults/PhotoStickers_4.png) diff --git a/docs/Flows/PhotoStickers2.md b/docs/Flows/PhotoStickers2.md new file mode 100644 index 0000000..43387b4 --- /dev/null +++ b/docs/Flows/PhotoStickers2.md @@ -0,0 +1,24 @@ +# Photo Stickers 2 + +> This feature requires vision capabilities. +> +> You must either have the **Ollama** server running with the **llava:7b-v1.6-vicuna-q8_0 model**, +> or provide a `Gemini API key` in the settings. + +Turns a photo into 4 stickers using different prompts. + +Part of the flow runs on the CPU, part on the GPU, the flow is quite fast and convenient for everyday use. + +Original flow/idea examples: [StickerYou - 1 photo for stickers](https://openart.ai/workflows/rui400/stickeryou---1-photo-for-stickers/e8TPNxcEGKdNJ40bQXlU) + +## Examples + +> As input file, the photo of `Bruce Lee` was taken from the Internet and used with default prompts. + +![Image](../FlowsResults/PhotoStickers2_1.png) + +![Image](../FlowsResults/PhotoStickers2_2.png) + +![Image](../FlowsResults/PhotoStickers2_3.png) + +![Image](../FlowsResults/PhotoStickers2_4.png) diff --git a/docs/Flows/Photomaker_1.md b/docs/Flows/Photomaker_1.md new file mode 100644 index 0000000..92c0d3f --- /dev/null +++ b/docs/Flows/Photomaker_1.md @@ -0,0 +1,40 @@ +# Photomaker + +Creates fairly good and realistic images of a person in different styles based on one photo. +The person's face in the incoming image should preferably occupy most of the screen. + +> Advanced parameter **Accuracy** currently applies only to one result image. + +!!! note + + **Recommended words to be present in the prompt**: woman/girl/man/boy + +## Examples + +> The image of `Bruce Lee` was taken from the Internet and used as a basis for first two prompts, + for the third prompt `Erin Starlight` photo was used. + +![Image](../FlowsResults/Photomaker_1_1_1.png) +![Image](../FlowsResults/Photomaker_1_1_2.png) + + portrait of man photomaker in green suite with dragons + +- Style: Cinematic + +--- + +![Image](../FlowsResults/Photomaker_1_2_1.png) +![Image](../FlowsResults/Photomaker_1_2_2.png) + + portrait of man photomaker looking at viewer from the dark, fire and flames + +- Style: Neonpunk + +--- + +![Image](../FlowsResults/Photomaker_1_3_1.png) +![Image](../FlowsResults/Photomaker_1_3_2.png) + + portrait of woman photomaker wearing suite in the forest looking at viewer + +- Style: Comic book diff --git a/docs/Flows/Playground_2_5_aesthetic.md b/docs/Flows/Playground_2_5_aesthetic.md new file mode 100644 index 0000000..e4bd845 --- /dev/null +++ b/docs/Flows/Playground_2_5_aesthetic.md @@ -0,0 +1,36 @@ +# Aesthetic images (Playground 2.5) + +The flow focuses on three key improvements: enhancing color and contrast, generating images across multiple aspect ratios, and aligning outputs with human aesthetic preferences. + + +It demonstrates superior performance over previous models and commercial systems in terms of aesthetic quality, especially in generating vibrant colors, accommodating different aspect ratios, and capturing fine details in human-centric images. + + +Playground v2.5 outperforms widely-used models and even some closed-source systems in user studies focusing on aesthetic preferences. + +**Supports various aspect ratios.** + +**Supports fast generation using the Align Steps technique** + +## Examples + +> *The second is an image with the "fast run" option* + +![Image](../FlowsResults/Playground_2_5_aesthetic_1.png) +![Image](../FlowsResults/Playground_2_5_aesthetic_1-fast.png) + + girl in suite looking at viewer, high quality, 8k, bright colors + +--- + +![Image](../FlowsResults/Playground_2_5_aesthetic_2.png) +![Image](../FlowsResults/Playground_2_5_aesthetic_2-fast.png) + + cat in suite looking at viewer, high quality, 8k, bright colors + +--- + +![Image](../FlowsResults/Playground_2_5_aesthetic_3.png) +![Image](../FlowsResults/Playground_2_5_aesthetic_3-fast.png) + + Dragon in forest, vivid colors diff --git a/docs/Flows/Playground_2_5_prometheus.md b/docs/Flows/Playground_2_5_prometheus.md new file mode 100644 index 0000000..69f8240 --- /dev/null +++ b/docs/Flows/Playground_2_5_prometheus.md @@ -0,0 +1,35 @@ +# Prometheus (Playground 2.5) + +PrometheusV is presumed to be the first full rank finetune of Playground v2.5, developed by the creator of the Proteus model. This text-to-image generation model has been specifically adapted to enhance accessibility for the open-source community. + +PrometheusV1 represents a significant effort to make advanced text-to-image generation more accessible to the open-source community. +Built upon the Playground v2.5 architecture, it has undergone a full rank finetune using an extensive dataset of over 400,000 images from the Proteus collection. + +A key aspect of its development was the removal of custom sampling methods through brute force techniques at scale, allowing the model to work more seamlessly with standard open-source tools and pipelines. +Additionally, PrometheusV1 has been made backwards compatible with most SDXL LoRAs and tools. + +This approach aims to balance the model's performance capabilities with wider compatibility and ease of use. Users can expect outputs that reflect the model's intensive training on the large Proteus dataset while benefiting from improved interoperability with common open-source frameworks and existing SDXL ecosystem. + +**Supports various aspect ratios.** + +## Examples + +![Image](../FlowsResults/Playground_2_5_prometheus_1.png) + + portrait of gothic girl in suite looking at viewer, darkness, high quality + +--- + +![Image](../FlowsResults/Playground_2_5_prometheus_2.png) + + close up portrait of devil in rage, high detail, ultra quality + +- steps: 40 + +--- + +![Image](../FlowsResults/Playground_2_5_prometheus_3.png) + + the kindest kitten with wings, oil painting, high detail, soft colors + +- steps: 40 diff --git a/docs/Flows/SD3_Medium.md b/docs/Flows/SD3_Medium.md new file mode 100644 index 0000000..b8015d2 --- /dev/null +++ b/docs/Flows/SD3_Medium.md @@ -0,0 +1,33 @@ +# StableDiffusion3-Medium + +Flow using the gated model, requires a HuggingFace token to setup. + +*This flow is made more for development and verification that we have successfully added the ability to use gated/closed models.* + +> In the future this Flow will either be modernized or removed when something better appears to replace it based on the feature-tuned SD3, if there are any. + +**Supports various aspect ratios.** + +## Examples + +![Image](../FlowsResults/SD3_Medium_1.png) + + Black kitten with white wings sitting on a blue cloud, cinematic + +- prompt_strength: 5.1 + +--- + +![Image](../FlowsResults/SD3_Medium_2.png) + + poster, cyborg girl against an alien, black baground, high contrast, high details, cinematic + +- prompt_strength: 5.5 + +--- + +![Image](../FlowsResults/SD3_Medium_3.png) + + an oil line art painting of the angel impressive neon shadows, warm colors + +- prompt_strength: 4.1 diff --git a/docs/Flows/SDXL_Lighting.md b/docs/Flows/SDXL_Lighting.md new file mode 100644 index 0000000..5cac398 --- /dev/null +++ b/docs/Flows/SDXL_Lighting.md @@ -0,0 +1,21 @@ +# SDXL Lighting + +SDXL-Lightning is a fast text-to-image generation model. It can generate high-quality 1024px images in a few steps. + +## Examples + +![Image](../FlowsResults/SDXL_Lighting_8_1.png) + + A girl smiling + +--- + +![Image](../FlowsResults/SDXL_Lighting_8_2.png) + + lighting hero, anime + +--- + +![Image](../FlowsResults/SDXL_Lighting_8_3.png) + + portrait angry bear looking at viewer, vivid colours diff --git a/docs/Flows/SketchPortrait.md b/docs/Flows/SketchPortrait.md new file mode 100644 index 0000000..41f42f8 --- /dev/null +++ b/docs/Flows/SketchPortrait.md @@ -0,0 +1,19 @@ +# Sketch Portrait + +Quick creation of an anime sketch from a photograph of a person. + +**Prompt** is optional, something like emotions can be used there: *smile, sad, serious, etc*. + +Part of the flow runs on the CPU, part on the GPU, the flow is very fast and convenient for everyday use. + +## Examples + +> The input files were taken from the Internet and used photographs of `Shakira`, `Steve Jobs` and `Eminem`. + +![Image](../FlowsResults/SketchPortrait_1.png) + +![Image](../FlowsResults/SketchPortrait_2.png) + +![Image](../FlowsResults/SketchPortrait_3.png) + +![Image](../FlowsResults/SketchPortrait_4.png) diff --git a/docs/Flows/Stable_Cascade.md b/docs/Flows/Stable_Cascade.md new file mode 100644 index 0000000..001846e --- /dev/null +++ b/docs/Flows/Stable_Cascade.md @@ -0,0 +1,31 @@ +# Stable Cascade + +This flow works much better with text rendering, and supports repeated rendering to generate images in increased resolution with more detail. + +Suitable for various fairy-tale or cartoon images or for generating postcards. + +- One pass image resolution: **1024x576** + +- Two pass image resolution: **1536x864** + +- Three pass image resolution: **2048x1152** + +## Examples + +![Image](../FlowsResults/Stable_Cascade_1.png) + + portrait of bee, high details, 8k, vivid colors, contrast light + +--- + +![Image](../FlowsResults/Stable_Cascade_2.png) + + dolphin at sea, dawn, high details, 8k, vivid colors, contrast light + +--- + +- Second Pass: false + +![Image](../FlowsResults/Stable_Cascade_3.png) + + girl with sign 'Cascade', high details, 8k, cinematic diff --git a/docs/Flows/SupirUpscaler.md b/docs/Flows/SupirUpscaler.md new file mode 100644 index 0000000..331586c --- /dev/null +++ b/docs/Flows/SupirUpscaler.md @@ -0,0 +1,54 @@ +# SUPIR Upscaler + +*This workflow is added mostly for research purposes, it is still in development.* + +**Memory requirements(both VRAM and RAM) are directly related to the input image resolution.** + +> Currently, for **macOS runners** `Diffusion type` must be set to **fp32**. + +- `Low memory mode`: reduces the size of processed tiles to **256**. + +!!! note + + If you have a very small input image and the result is **less than 1024** (512 for low memory mode) pixels in width **or** height, **tiles should be disabled**. + +From [ComfyUI-SUPIR repo](https://github.com/kijai/ComfyUI-SUPIR): + + Memory requirements are directly related to the input image resolution. + In my testing I was able to run 512x512 to 1024x1024 with a 10GB 3080 GPU, + and other tests on 24GB GPU to up 3072x3072. + + System RAM requirements are also hefty, don't know numbers + but I would guess under 32GB is going to have issues, tested with 64GB. + +## Examples + +*This Upscaler is still in development stage, results may be get better.* + +> We specifically place one portrait example where results is not perfect. + +But for many tests we performed - portrait scaling is shiny compared to older scaling methods. + +--- + +Image of a classic car: + +![Image](../FlowsResults/SupirUpscaler-classic-car-1024x683.jpg) + +![Image](../FlowsResults/SupirUpscaler-classic-car-result.png) + +--- + +Jackie Chan portrait: + +![Image](../FlowsResults/SupirUpscaler-jackie-chan-787x761.jpg) + +![Image](../FlowsResults/SupirUpscaler-jackie-chan-result.png) + +--- + +Shakira: + +![Image](../FlowsResults/SupirUpscaler-shakira-711x474.jpeg) + +![Image](../FlowsResults/SupirUpscaler-shakira-result.png) diff --git a/docs/Flows/VintagePortrait.md b/docs/Flows/VintagePortrait.md new file mode 100644 index 0000000..ee95cea --- /dev/null +++ b/docs/Flows/VintagePortrait.md @@ -0,0 +1,21 @@ +# Vintage Portrait + +Create a vintage 20th century portrait from a photo of a person. + +**Prompt** is required, simplest examples is: [portrait of a girl, cinematic, masterpiece[ + +**Person's face pose** is optional. + +Part of the flow runs on the CPU, part on the GPU, the flow is quite fast and convenient for everyday use. + +## Examples + +> As input files, the photos of `Bruce Lee` and `Shakira` were taken from the Internet and used. + +![Image](../FlowsResults/VintagePortrait_1.png) + +![Image](../FlowsResults/VintagePortrait_2.png) + +![Image](../FlowsResults/VintagePortrait_3.png) + +![Image](../FlowsResults/VintagePortrait_4.png) diff --git a/docs/Flows/index.md b/docs/Flows/index.md new file mode 100644 index 0000000..0a93bb0 --- /dev/null +++ b/docs/Flows/index.md @@ -0,0 +1,29 @@ +# Available Flows + +!!! note + + Results of Flows time execution is now located at [Hardware Test Results](https://visionatrix.github.io/VixFlowsDocs/hardware_results/) + +- [Colorful XL](Colorful_XL.md) +- [Juggernaut Lite](Juggernaut_Lite.md) +- [Juggernaut XL](Juggernaut_XL.md) +- [Mobius XL](Mobius_XL.md) +- [SDXL Lighting](SDXL_Lighting.md) +- [Hunyuan DiT](HunyuanDiT.md) +- [SD3 Medium](SD3_Medium.md) +- [Stable Cascade](Stable_Cascade.md) +- [Playground 2.5 Prometheus](Playground_2_5_prometheus.md) +- [Playground 2.5 Aesthetic](Playground_2_5_aesthetic.md) +- [Ghibli Portrait](GhibliPortrait.md) +- [Comicu Portrait](ComicuPortrait.md) +- [Vintage Portrait](VintagePortrait.md) +- [Memoji Portrait](MemojiPortrait.md) +- [Sketch Portrait](SketchPortrait.md) +- [Photomaker 1](Photomaker_1.md) +- [Photo Stickers](PhotoStickers.md) +- [Photo Stickers 2](PhotoStickers2.md) +- [Mad Scientist](MadScientist.md) +- [Supir Upscaler](SupirUpscaler.md) +- [Human Face Detailer](HumanFaceDetailer.md) +- [Flux 1](Flux_1.md) +- [Inpaint](Inpaint.md) diff --git a/docs/GatedModels.rst b/docs/GatedModels.rst deleted file mode 100644 index 8e690a2..0000000 --- a/docs/GatedModels.rst +++ /dev/null @@ -1,29 +0,0 @@ -Gated Models -============ - -It often happens that the model you are using is not available for download without authentication. These are referred to as `Gated Models `_. - -Flows with such models have a separate mark in the Visionatrix UI. - -To be able to install such a flow, you need to specify an ``Access Token`` - -.. note:: - Currently, only HuggingFace Access Tokens are supported. - -Steps to Access Gated Models: - -1. Register on `HuggingFace `_ if you are not already registered -2. Gain access to the model on your account by going to its page (you can click on the model from Visionatrix UI) and filling out the form -3. Generate an access token in the settings of HuggingFace (click on your icon -> settings -> access tokens) -4. Click on ``Set Permissions`` of the token after generation and select ``Read access to contents of all public gated repos you can access`` -5. Go to the Visionatrix settings and enter this access token - -Alternatively, you can set an environment variable named ``HF_AUTH_TOKEN`` with the token value, but this requires setting up the environment variable for each worker if you have many of them. - -I'm a user and want to connect my own worker to process flows with closed models. ---------------------------------------------------------------------------------- - -As user's workers cannot receive global access tokens from the server to avoid leaks, you have two options: - -1. Download the model yourself and place it in the folder specified in ``models_catalog.json`` under the ``save_path`` key. -2. Set the ``HF_AUTH_TOKEN`` environment variable with your own public access token, and the worker will be able to install flows with gated models. diff --git a/docs/HardwareFAQ.rst b/docs/HardwareFAQ.rst deleted file mode 100644 index d7cf6a5..0000000 --- a/docs/HardwareFAQ.rst +++ /dev/null @@ -1,38 +0,0 @@ -Hardware FAQ -============ - -First, you can take a look at the information in the `ComfyUI repository `_. - -.. note:: If you are using Windows and want to avoid hassles, currently, there are no alternatives to Nvidia. PyTorch is expected to release a native version for AMD for Windows soon, but until then, Nvidia is the only option. - -List of GPUs by usefulness: - -1. Nvidia 4090 ``24 GB`` -2. AMD 7900 XTX ``24 GB`` -3. Nvidia 3090 ``24 GB`` -4. Nvidia 4080 Super ``16 GB`` -5. Nvidia 4070 Ti Super ``16 GB`` -6. AMD RX 7900 XT ``20 GB`` -7. AMD RX 7900 GRE ``16 GB`` -8. Nvidia 4060 Ti ``16 GB`` -9. Nvidia 3060 ``12 GB`` - -.. note:: You can also look at any performance tests of hardware for ComfyUI as a reference. - ---- - -Q: Why are there no AMD cards other than *AMD 7900 series* on the list? - -A: *ROCM (Radeon Open Compute) "officially" supports only* `these cards `_. - ---- - -Q: How much RAM is needed in the system? - -A: *For normal operation, 32 GB is sufficient, but if you want to handle large resolutions with Supir Scaler Workflow, then 64 GB is recommended.* - ---- - -Q: How to use 2 GPUs? - -A: *The simplest way is to run 2 workers, each assigned to its own GPU, so they can process tasks in parallel.* diff --git a/docs/Installation.rst b/docs/Installation.rst deleted file mode 100644 index f838eb2..0000000 --- a/docs/Installation.rst +++ /dev/null @@ -1,90 +0,0 @@ -Manual Installation -=================== - -In most cases, we recommend using automatic installation via an ``easy-install`` script. - -For those who want to install everything manually, here you will find step-by-step instructions on what the script does. - -Virtual Environment creation -"""""""""""""""""""""""""""" - -First clone the repository with :command:`git`:: - - git clone https://github.com/Visionatrix/Visionatrix.git && cd Visionatrix - - -Setup the virtual environment with :command:`python`:: - - python -m venv venv - - -Activate Virtual Environment(**Linux/macOS**) with :command:`source`:: - - source venv/bin/activate - - -Activate Virtual Environment(**Windows**) with :command:`powershell`:: - - .\venv\Scripts\Activate.ps1 - - -**PyTorch** installation -"""""""""""""""""""""""" - -.. note:: - On macOS with Apple Silicon currently no action is needed. - -For AMD graphic cards on **Linux** install **ROCM** version of PyTorch using :command:`pip`:: - - pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.1 - - ---------- - -For AMD graphics cards on **Windows** install PyTorch with DirectML support using :command:`pip`:: - - pip install torch-directml - - -.. note:: - **Python3.10** is the only currently supported version by **torch-directml**. - ---------- - -For NVIDIA graphics cards on **both Linux or Windows** install PyTorch using :command:`pip`:: - - pip install torch torchvision torchaudio - - -Install Visionatrix -""""""""""""""""""" - -Install Visionatrix from the previously cloned sources using :command:`pip`:: - - pip install . - - -Run **Visionatrix** initialization command using :command:`python`:: - - python -m visionatrix install - - -Run Visionatrix -""""""""""""""" - -Execute from the activated virtual environment **run** command using :command:`python`:: - - python -m visionatrix run --ui - - -Manual Update -""""""""""""" - -1. Pull last changes from repository with :command:`git`:: - - git pull - - -2. Execute **update** command from **activated** virtual environment with :command:`python`:: - - python -m visionatrix update diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index c078295..0000000 --- a/docs/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -PYTHON = python3 -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SOURCEDIR = . -BUILDDIR = _build -LINKCHECKDIR = _build/linkcheck - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -github: - @make html - @cp -a build/html/. ./docs - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: links -links: - @$(SPHINXBUILD) -b linkcheck "$(SOURCEDIR)" "$(LINKCHECKDIR)" $(ALLSPHINXOPTS) diff --git a/docs/TechnicalInformation.rst b/docs/TechnicalInformation.rst deleted file mode 100644 index 99a18ee..0000000 --- a/docs/TechnicalInformation.rst +++ /dev/null @@ -1,97 +0,0 @@ -Technical Information -===================== - -Bundled ComfyUI nodes ---------------------- - -Visionatrix by default install and update these nodes: - - * `comfyui-art-venture `_ - * `ComfyUI-AutoCropFaces `_ - * `ComfyUI-BRIA_AI-RMBG `_ - * `ComfyUI-BiRefNet `_ - * `ComfyUI-Custom-Scripts `_ - * `ComfyUI-Impact-Pack `_ - * `comfyui-ollama `_ - * `ComfyUI-SUPIR `_ - * `ComfyUI-Visionatrix `_ - * `ComfyUI-WD14-Tagger `_ - * `comfyui_controlnet_aux `_ - * `ComfyUI_essentials `_ - * `ComfyUI_FizzNodes `_ - * `ComfyUI_Gemini_Flash `_ - * `ComfyUI_InstantID `_ - * `ComfyUI_IPAdapter_plus `_ - * `ComfyUI_UltimateSDUpscale `_ - * `efficiency-nodes-comfyui `_ - * `PuLID_ComfyUI `_ - * `rgthree-comfy `_ - * `Skimmed_CFG `_ - * `style_aligned_comfy `_ - * `was-node-suite-comfyui `_ - -We are gradually expanding the list. - -The main reason many components are missing is that they are quite difficult to install, and we believe that an easy installation process is more important in most cases. - - -Workflows storage ------------------ - -All public flows are located in `VixFlowsDocs `_ repository. - -The repository consists of a development branch **main** and a set of branches **version-X.Y**: - -* version-0.5 -* version-0.6 -* ... -* version-1.0 -* version-1.1 -* main - -Sets of public workflows are packaged in the root of the documentation and have the following form: - -* flows-0.5.zip -* flows-0.6.zip -* ... -* flows-1.0.zip -* flows-1.1.zip -* flows.zip - -The development version of Visionatrix fetches the ``flows.zip`` archive by default. - -Release versions of Visionatrix fetch sets of flows for their version. - -The ``FLOWS_URL`` variable in Visionatrix has the default value of ``https://visionatrix.github.io/VixFlowsDocs/`` - -When **FLOWS_URL** ends with "/", the Visionatrix fetches an archive with flows for its version. - -.. note:: - You can also specify a specific path/URL to the archive file with flows, and only that will be used. - - -Update algorithm of Visionatrix -------------------------------- - -Developer versions are updated only to development versions, release versions only to release ones. - -.. note:: - If you are not a developer, you are better off using the release version, as they should be more stable. - -It is recommended to update Vix with the ``easy_install.py`` script. - -The update scheme in `easy_install.py `_ is quite simple; everything is done with ordinary Git commands. - -* If the current version is a dev release or the current branch is ``main`` then: - - 1. Check out the ``main`` branch. - 2. Pull the latest changes from the remote repository. - -* If the current version is a tagged release version: - - 1. Determine the latest tag for the current major version, and if a newer version tag is found, check out the latest version tag within the current major version. - 2. If no newer version is found within the current major version, check for the next major version. - 3. If a newer major version tag is found, prompt the user to update to this newer major version. - -* After checking out the appropriate version, run a ``pip install`` command to update the Python packages. -* Finally, run the ``python3 -m visionatrix update`` command to ensure that any additional necessary updates are applied (ComfyUI, custom nodes, flows). diff --git a/docs/VixWorkflows.rst b/docs/VixWorkflows.rst deleted file mode 100644 index 46f78dd..0000000 --- a/docs/VixWorkflows.rst +++ /dev/null @@ -1,207 +0,0 @@ -Vix Workflows -============= - -Introduction -"""""""""""" - -ComfyUI workflows are designed for developers and those interested in diffusion processes. - -Visionatrix workflows are created on top of ComfyUI workflows for easy deployment and straightforward use. - -Currently, there are two main issues with using ComfyUI flows for the general public: - - 1. It's unclear where to get the model from and how to deploy/install it – a **deployment/installation issue** - 2. Without some experience, it's unclear how to just provide inputs to simple get results - a **usability issue** - - -.. _models-mapping: - -Automatic models mapping -"""""""""""""""""""""""" - -To address the first issue with model mapping, Visionatrix includes a `models_catalog.json `_ file. - -By default, it is taken and updated from the Visionatrix repository on GitHub, in case you add a new flow and need to add new model mappings you can change its path using an environment variable to a local file path. - -.. note:: - - We hope that after you add something locally, you will open a pull request so that the community can benefit from it. - -The file structure consists of a set of objects, each describing a ComfyUI Node class that loads or uses a model. - -.. code-block:: python - - "InstantID-ControlNet": { - "regexes": [ - { - "class_name": "ControlNetLoader", - "input_value": "^(?=.*(?i:instantid)).*" - } - ], - "save_path": "controlnet/instantid-controlnet.safetensors", - "url": "https://huggingface.co/InstantX/InstantID/resolve/main/ControlNetModel/diffusion_pytorch_model.safetensors", - "homepage": "https://huggingface.co/InstantX/InstantID", - "hash": "c8127be9f174101ebdafee9964d856b49b634435cf6daa396d3f593cf0bbbb05" - } - -"regexes" -''''''''' - -Regexes are used to understand the if this record related to the specified model from the ComfyUI workflow. - -``"input_name"``, ``"class_name"``, and ``"input_value"`` are supported, both together and separately. - -.. note:: - - If these conditions prove insufficient, please create an issue and we will find a solution together. - -"save_path" -''''''''''' - -Specifies where the model will be saved. -Default paths are relative to the root of the external models folder specified in the ComfyUI file `"extra_model_paths.yaml"` - -By default in Visionatrix, this is the path to the `"vix_models"` folder. - -If a Node does not support ComfyUI's model placement configurations and requires them to be located only in the ComfyUI folder, the entry may take the form: - -.. code-block:: - - save_path="{root}models/insightface/models/antelopev2.zip" - -"url" -''''' - -Indicates where to download the model from if it is not already present. - -It is preferable for the model to be hosted on Hugging Face, but "civitai.com" is also supported. - -"homepage" -'''''''''' - -An optional field with a link to the model's home page where you can view the license. - -"hash" -'''''' - -The SHA256 hash of the model. Used to verify the integrity of the model and check for download errors. - -Vix workflow overview -""""""""""""""""""""" - -Starting from the Visionatrix **0.6.0**, the workflow consists of a single file: ``flow_name.json``, -which is a ComfyUI workflow file adopted to Visionatrix. - -.. note:: - - The main difference between Visionatrix and ComfyUI: - - **A task is created with a single request, which includes both incoming text parameters and input files.** - -The flow metadata fields described below are filled in the `VixUi-WorkflowMetadata` node. - -"name" -'''''' - -The name of the workflow. It usually matches the name of the file with workflow. - -"display_name" -'''''''''''''' - -Used in the UI to display the name of the flow. - -"description" -''''''''''''' - -A brief description of the flow for user display. - -"author" -'''''''' - -The name of the ComfyUI flow author or the Visionatrix flow author. - -"homepage" -'''''''''' - -A link that will open when clicking on the flow author's name. - -"license" -''''''''' - -The general license under which the flow can be used (to simplify understanding whether it can be used behind the API service, whether it can be used commercially, etc.) - -"documentation" -''''''''''''''' - -Link to additional information about the flow. - -"tags" -'''''' - -A list of string tags that can be used to label the categories of the flow. - -"input_params" -'''''''''''''' - -.. note:: Starting with Visionatrix 0.6.0, the input params are parsed automatically from the adopted ComfyUI workflow. - Based on the information from this field, the Visionatrix UI dynamically displays the interface. - -Technically, this is a list of objects, where each object is one input parameter, which includes: - - * "name" - the key(used only when `"type"` is equal to `"text"`) - * "display_name" - the name of the parameter displayed in the UI - * "type" - a string that can have values: `"text"` or `"image"` - - .. note:: - `"video"` and `"audio"` types will be added as soon as there is the first Workflow requiring it. - - * "optional" - indicates whether the parameter is optional. If an optional field is not provided, the backend will fill it in automatically. - * "advanced" - used only in the UI, shows whether the field should be hidden by default (we do not want to overload the interface for regular users) - * "default" - the field value to initiate. - - .. note:: - Used for both UI and backend, but not mandatory even for optional fields (as in the ComfyUI flow, the Node value is still set) - - * "comfy_node_id" - **a field only for the backend**, which defines what to do with this value (where to use it in the ComfyUI Flow) - - -Create task based on Flow -""""""""""""""""""""""""" - - -.. code-block:: python - - @ROUTER.post("/create") - async def create_task( - request: Request, - name: str = Form(description="Name of the flow from which the task should be created"), - count: int = Form(1, description="Number of tasks to be created"), - input_params: str = Form(None, description="List of input parameters as an encoded json string"), - webhook_url: str | None = Form(None, description="URL to call when task state changes"), - webhook_headers: str | None = Form(None, description="Headers for webhook url as an encoded json string"), - files: list[UploadFile | str] = Form(None, description="List of input files for flow"), # noqa - ) -> TaskRunResults: - """ - Endpoint to initiate the creation and execution of tasks within the Vix workflow environment, - handling both file inputs and task-related parameters. - """ - pass - - -.. warning:: - - It's important to note that text parameters and files are passed in different parameters: - - * input_params - input parameters with "type" == "text" - * files - list of input files (files should be in the order they are defined in the Vix Flow) - -When this endpoint is called, a task will be created and queued for execution by one of available workers. - -You can generate Python client with the help of `openapi-python-client `_ and -an example code for creating a Task will look like this: - -.. code-block:: python - - client_base = visionatrix_client.Client(base_url="http://127.0.0.1:8288") - params = BodyCreateTask(name="sdxl_lighting", input_params=json.dumps({"prompt": "bottle"})) - created_tasks_id_list = api.tasks.create_task.sync(client=client_base, body=params) diff --git a/docs/WorkingModes.rst b/docs/WorkingModes.rst deleted file mode 100644 index 3852ddc..0000000 --- a/docs/WorkingModes.rst +++ /dev/null @@ -1,89 +0,0 @@ -Working modes -============= - -DEFAULT -""""""" - -Visionatrix(**Vix**) consists of: - -1. A server component, namely, the backend `(in short - Server)` -2. A component responsible for processing tasks `(in short - Worker)` -3. TaskQueue - a database (SQLite *(default)*, PgSQL) -4. A simple and understandable User Interface - -By default, Vix launches with all components integrated (Server + Worker + UI) for quick and easy use on a single computer. - -This is **DEFAULT** mode, in which everything is executed within a single process. - -Easy installation, no need to configure, just launch and use. - -.. note:: There is no support for multiple users or authentication in this case, as this mode uses **SQLite** as a database, which is limiting. - -SERVER -"""""" - -In most scenarios, including home use, you likely have more than one device capable of handling AI tasks. -In such cases, it is allowed and recommended to run the server part and the AI processing part of the task separately. - -.. warning:: **SQLite is not supported as a database in this mode.** - -Steps to run `Vix` in a Server mode: - -1. Install both `psycopg` and `greenlet` python libraries: `pip install psycopg greenlet` -2. Set ``VIX_MODE`` environment variable to ``SERVER`` -3. Setup **PostgreSQL** database and set correct ``DATABASE_URI`` environment variable to point on it. - - .. note:: `PgSQL example `_: ``DATABASE_URI="postgresql+psycopg://vix_user:vix_password@localhost:5432/vix_db"`` - -4. Use ``python3 -m visionatrix create-user`` command to create a user in the database. -5. Connect at least one Worker to handle task processing. - - -*We will provide a docker-compose file soon, with full server setup to deploy it in one click.* - -WORKER -"""""" - -Each worker can have a different set of tasks (Flows) installed, which is useful to avoid installing a task on a worker instance that cannot handle it. -A worker will only request the tasks installed for it. - -There is two worker modes, both will be described, we ourselves most use Vix in `Worker to Server` mode. - -Worker to Database-FS -''''''''''''''''''''' - -.. note:: Requirements: - - 1. The database used by the **Server** should be accessible for the worker. - 2. There should be the ability to map the **Server**'s ``vix_tasks_files`` folder to the worker. - -Set the environment variable ``VIX_MODE`` to **WORKER** and leave ``VIX_SERVER`` with an empty value; do not set it. - -In this scenario, the worker must be configured with the correct database path using the ``DATABASE_URI`` environment variable. -The format can be viewed here: `SqlAlchemy Database URLs `_ - -By using the ``TASKS_FILES_DIR`` environment variable or the ``--tasks_files_dir`` argument, you can change the location of the ``vix_tasks_files`` folder. -The worker must have access to the Server's ``vix_tasks_files`` folder. - -With this scaling method, workers independently retrieve tasks from the database and directly write the execution results to the servers *TASKS_FILES_DIR*. - -In this setup, you can imagine workers as Server threads operating remotely. - -Worker to Server -'''''''''''''''' - -This method implies that the workers do not have direct access to the database or the server file system. - -All communication occurs through the network, with workers accessing the server backend directly. - -Set the environment variable ``VIX_MODE`` to **WORKER** and set ``VIX_SERVER`` with the full address of the Server(including port number). - -.. note:: ``VIX_HOST``, ``VIX_PORT``, ``DATABASE_URI`` will be ignored, as the worker in this mode does not need it. - -In this use case, the **vix_tasks_files** directory will contain only temporary files; after uploading results to the Server, the results from the worker instance will be cleared. - -For authentication on the server worker will use ``WORKER_AUTH`` environment variable, which must contain "**USER_ID:PASSWORD**". - -.. note:: - - Workers with an administrator account can process all tasks of all users, workers assigned to a user account can only process tasks created by that user. diff --git a/docs/comfyui_vix_migration.md b/docs/comfyui_vix_migration.md new file mode 100644 index 0000000..8178e1b --- /dev/null +++ b/docs/comfyui_vix_migration.md @@ -0,0 +1,128 @@ +--- +title: ComfyUI to Visionatrix migration +--- + +If you want to adopt your ComfyUI workflow to use in Visionatrix, you +can use this guide to help you do so. There are a few steps you need to +follow. + +--- + +## 1. Install ComfyUI-Visionatrix custom nodes + +First, it is recommended to install our custom +[ComfyUI-Visionatrix](https://github.com/Visionatrix/ComfyUI-Visionatrix) +nodes. Otherwise, you will have to use custom nodes titles which are will +be parsed by Visionatrix. + +``` bash +git clone https://github.com/Visionatrix/ComfyUI-Visionatrix.git +``` + +!!! note + + You can do the required migration via nodes titles, which is less + convenient. The node title must be like this: + `input;Display Name;optional;advanced;order=1;custom_id=custom_name`. + +--- + +## 2. Define the input params + +Visionatrix UI aims simplicity and clarity. Define the most important +input params of your ComfyUI workflow to extract them to the Visionatrix +UI as inputs, for example: + +- prompt (textarea) +- negative prompt (textarea) +- prompt strength (range) +- some logic toggles (checkbox) +- input files (file) + +For that you will need to attach our custom nodes as adapters to your +nodes receiving these inputs that are will be filled by the user from +the Visionatrix UI. + +As example, you can have a look at our [list of worklows](https://github.com/Visionatrix/VixFlowsDocs/tree/main/flows) +adopted to the new format. + +!!! note + + The list of available nodes can be found in the readme of the + [ComfyUI-Visionatrix](https://github.com/Visionatrix/ComfyUI-Visionatrix) + repository. + +--- + +### 2.1 Node to Input mapping via title string + +Alternatively, Visionatrix supports other Nodes mapping as an input +param via node title string separated by semicolon. + +> The nodes titles starting with `input;` keyword are considered as +> Visionatrix input param. + +The parameters list: + +- `input` - keyword to define the input param +- `Display Name` - positional parameter, the name of the input field + displayed in the UI +- `optional` - if present, the optional field is set to True +- `advanced` - if present, the advanced field is set to True +- `order=1` - the order of the input param in the UI +- `custom_id=custom_name` - the custom id of the input param + +!!! note + + `order` is very important for files if workflow accepts more than 1 file. + +--- + +### 2.2 External nodes used via Node to Input mapping + +In our workflows, we use some external nodes mapped as input params, +that you can use as example: + +- `SDXLAspectRatioSelector` - select input field used from + [comfyui-art-venture](https://github.com/Visionatrix/comfyui-art-venture) + for Aspect Ratio select. Usually it's an optional and hidden to the + advanced prompt options: + `input;Aspect Ratio;optional;advanced;custom_id=aspect_ratio`; +- `LoadImage` - default ComfyUI image loader node as image file input + field. As required title: `input;Input image;order=1`, or optional + advanced: `input;Optional helper image;optional;advanced;order=20`; + +--- + +## 3. Map the models for automatic download + +Visionatrix simplifies and automates the process of downloading the +models. + +As the third step of the migration, you need to map the models that are used in your workflow (see `models-mapping`). + +--- + +## 4. Build the list of available flows + +The last step is to build the list of available flows in the Visionatrix +UI. Follow the steps described in +[options.py](https://github.com/Visionatrix/Visionatrix/blob/main/visionatrix/options.py#L56-L59) +file for `FLOWS_URL` and `MODELS_CATALOG_URL` to enable Visionatrix +local workflows development mode: + +Create a zip with adjusted/new flows: + +``` bash +cd ../VixFlowsDocs && zip -r ../Visionatrix/flows.zip flows && cd ../Visionatrix +``` + +And uncomment appropriate code lines in [options.py file](https://github.com/Visionatrix/Visionatrix/blob/main/visionatrix/options.py) +to use local versions of the flows. + +--- + +## 5. Verify and test the workflow + +Last step is to run Visionatrix and set up your workflow to verify that +everything works as expected. diff --git a/docs/command_line_options.md b/docs/command_line_options.md new file mode 100644 index 0000000..9f8359a --- /dev/null +++ b/docs/command_line_options.md @@ -0,0 +1,224 @@ +--- +title: Command Line Options +--- + +## Command Line Options + +Most of the options supported by `ComfyUI` are supported. + +They can be specified when starting manually: + +``` shell +python3 -m visionatrix run --ui --use-split-cross-attention --disable-smart-memory +``` + +Here are the list of the supported command line options related to [Visionatrix] for **run** command: + + +#### --backend_dir=BACKEND_DIR + +> Directory for the backend(folder with ComfyUI) +> +> Default: `vix_backend` + + +#### --flows_dir=FLOWS_DIR + +> Directory for the flows +> +> Default: `vix_flows` + + +#### --models_dir=MODELS_DIR + +> Directory for the models +> +> Default: `vix_models` + + +#### --tasks_files_dir=FILES_DIR + +> Directory for input/output files +> +> Default: `vix_task_files` + +#### --host=HOST + +> Host to be used by Visionatrix + +#### --port=PORT + +> Port to be used by Visionatrix + +#### --ui + +> Flag to enable User interface(JS frontend). + +--- + +## Additional Vix commands + +### install-flow + +Can be used for Workers that do not have a user interface. + +``` shell +python3 -m visionatrix install-flow --file=path_to_json +``` + +File should contain ComfyUI workflow with the +[metadata](https://visionatrix.github.io/VixFlowsDocs/vix_workflows.html#vix-workflow-overview) +needed for Visionatrix. + +``` shell +python3 -m visionatrix install-flow --name=photo_stickers +``` + +This will install flow by it's `ID` which is equal to it's folder name +[here](https://github.com/Visionatrix/VixFlows/tree/main/flows) + +--- + +## Supported **ComfyUI** options + +#### --cuda-device DEVICE_ID + +> Set the id of the cuda device this instance will use. + +#### --cuda-malloc + +> Enable cudaMallocAsync (enabled by default for torch 2.0 and up). + + +#### --disable-cuda-malloc + +> Disable cudaMallocAsync. + +#### --force-fp32 + +> Force fp32 (If this makes your GPU work better please report it). + +#### --force-fp16 + +> Force fp16. + +#### --bf16-unet + +> Run the UNET in bf16. This should only be used for testing stuff. + +#### --fp16-unet + +> Store unet weights in fp16. + +#### --fp8_e4m3fn-unet + +> Store unet weights in fp8_e4m3fn. + +#### --fp8_e5m2-unet + +> Store unet weights in fp8_e5m2. + +#### --fp16-vae + +> Run the VAE in fp16, might cause black images. + +#### --fp32-vae + +> Run the VAE in full precision fp32. + +#### --bf16-vae + +> Run the VAE in bf16. + +#### --cpu-vae + +> Run the VAE on the CPU. + +#### --fp8_e4m3fn-text-enc + +> Store text encoder weights in fp8 (e4m3fn variant). + +#### --fp8_e5m2-text-enc + +> Store text encoder weights in fp8 (e5m2 variant). + +#### --fp16-text-enc + +> Store text encoder weights in fp16. + +#### --fp32-text-enc + +> Store text encoder weights in fp32. +> + +#### --cache-classic + +> Use the old style (aggressive) caching. **(Default)** + +#### --cache-lru + +> Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM. + +#### --disable-ipex-optimize + +> Disables ipex.optimize when loading models with Intel GPUs. + +#### --use-split-cross-attention + +> Use the split cross attention optimization. **Ignored when xformers is used.** + +#### --use-quad-cross-attention + +> Use the sub-quadratic cross attention optimization. **Ignored when xformers is used.** + +#### --use-pytorch-cross-attention + +> Use the new pytorch 2.0 cross attention function. + +#### --disable-xformers + +> Disable xformers. + +#### --force-upcast-attention + +> Force enable attention upcasting, please report if it fixes black images. + +#### --dont-upcast-attention + +> Disable all upcasting of attention. **Should be unnecessary except for debugging.** + +#### --gpu-only + +> Store and run everything (text encoders/CLIP models, etc... on the GPU). + +#### --highvram + +> By default, models will be unloaded to CPU memory after being used. This option keeps them in GPU memory. + +#### --normalvram + +> Used to force normal vram use if lowvram gets automatically enabled. + +#### --lowvram + +> Split the unet in parts to use less vram. + +#### --novram + +> When lowvram isn't enough. + +#### --cpu + +> To use the CPU for everything (slow). + +#### --reserve-vram + +> Set the amount of VRAM in GB you want to reserve for use by your OS/other software. + +#### --disable-smart-memory + +> Force ComfyUI to aggressively offload to regular ram instead of keeping models in vram when it can. + +#### --fast + +> Enable some untested and potentially quality deteriorating optimizations. diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index cc1a2c9..0000000 --- a/docs/conf.py +++ /dev/null @@ -1,95 +0,0 @@ -import os -import sys -import requests -import re -from datetime import datetime - -import sphinx_rtd_theme - -dir_path = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.abspath(dir_path + "/_ext")) -sys.path.insert(0, os.path.abspath("../.")) - -now = datetime.now() - -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.extlinks", - "sphinx.ext.intersphinx", - "sphinx.ext.viewcode", - "sphinx_copybutton", - "sphinx_inline_tabs", - "sphinx_issues", - "sphinx_rtd_theme", - "sphinxcontrib.autodoc_pydantic", -] - -intersphinx_mapping = { - "python": ("https://docs.python.org/3", None), -} - -autodoc_pydantic_model_show_json = False - -# General information about the project. -project = "Visionatrix" -copyright = str(now.year) + f" {project} Authors" # noqa - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. - -url = "https://raw.githubusercontent.com/Visionatrix/Visionatrix/main/visionatrix/_version.py" -response = requests.get(url) -response.raise_for_status() -match = re.search(r'__version__\s*=\s*"(.*?)"', response.text) -if not match: - raise ValueError("Version string not found in _version.py") - -version = match.group(1) -release = version - -html_theme = "sphinx_rtd_theme" -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - -html_logo = "resources/logo.svg" - -html_theme_options = { - "display_version": True, - "logo_only": True, -} - -# If true, `todos` produce output. Else they produce nothing. -todo_include_todos = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# If true, Sphinx will warn about all references where the target cannot be found. -# Default is False. You can activate this mode temporarily using the -n command-line -# switch. -nitpicky = True -nitpick_ignore_regex = [ - (r"py:class", r"starlette\.requests\.Request"), - (r"py:class", r"starlette\.requests\.HTTPConnection"), - (r"py:class", r"ComputedFieldInfo"), - (r"py:.*", r"httpx.*"), -] - -autodoc_member_order = "bysource" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["resources"] - - -def setup(app): - app.add_js_file("js/script.js") - app.add_css_file("css/styles.css") - app.add_css_file("css/dark.css") - app.add_css_file("css/light.css") - - -issues_github_path = "Visionatrix/Visionatrix" diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 0000000..627ced4 --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,20 @@ +--- +title: FAQ +--- + +#### Can I use ComfyUI which is included in Visionatrix? + +Yes, you can install your Nodes there and run ComfyUI separately. You +can also tell Visionatrix to use ComfyUI installed in a different path. +See the +[\--backend_dir](https://visionatrix.github.io/VixFlowsDocs/command_line_options/) +parameter. + +--- + +#### Can I run it on multiple GPU? + +You can run one worker on one GPU and process tasks in parallel, take +a look at [Server and Worker modes](https://visionatrix.github.io/VixFlowsDocs/working_modes#server). + +--- diff --git a/docs/gated_models.md b/docs/gated_models.md new file mode 100644 index 0000000..7e6add5 --- /dev/null +++ b/docs/gated_models.md @@ -0,0 +1,42 @@ +--- +title: Gated Models +--- + +It often happens that the model you are using is not available for +download without authentication. These are referred to as [Gated Models](https://huggingface.co/docs/hub/models-gated). + +Flows with such models have a separate mark in the Visionatrix UI. + +To be able to install such a flow, you need to specify an `Access Token` + +!!! note + + Currently, only HuggingFace Access Tokens are supported. + + +Steps to Access Gated Models: + +1. Register on [HuggingFace](https://huggingface.co) if you are not + already registered +2. Gain access to the model on your account by going to its page (you + can click on the model from Visionatrix UI) and filling out the form +3. Generate an access token in the settings of HuggingFace (click on + your icon -\> settings -\> access tokens) +4. Click on `Set Permissions` of the token after generation and select + `Read access to contents of all public gated repos you can access` +5. Go to the Visionatrix settings and enter this access token + +Alternatively, you can set an environment variable named `HF_AUTH_TOKEN` +with the token value, but this requires setting up the environment +variable for each worker if you have many of them. + +#### I'm a user and want to connect my own worker to process flows with closed models. + +As user's workers cannot receive global access tokens from the server +to avoid leaks, you have two options: + +1. Download the model yourself and place it in the folder specified in + `models_catalog.json` under the `save_path` key. +2. Set the `HF_AUTH_TOKEN` environment variable with your own public + access token, and the worker will be able to install flows with + gated models. diff --git a/docs/hardware_faq.md b/docs/hardware_faq.md new file mode 100644 index 0000000..cff25f2 --- /dev/null +++ b/docs/hardware_faq.md @@ -0,0 +1,47 @@ +--- +title: Hardware FAQ +--- + +First, you can take a look at the information in the [ComfyUI repository](https://github.com/comfyanonymous/ComfyUI/wiki/Which-GPU-should-I-buy-for-ComfyUI). + +!!! note + + If you are using Windows and want to avoid hassles, currently, there are + no alternatives to Nvidia. PyTorch is expected to release a native + version for AMD for Windows soon, but until then, Nvidia is the only + option. + +List of GPUs by usefulness: + +1. Nvidia 4090 `24 GB` +2. AMD 7900 XTX `24 GB` +3. Nvidia 3090 `24 GB` +4. Nvidia 4080 Super `16 GB` +5. Nvidia 4070 Ti Super `16 GB` +6. AMD RX 7900 XT `20 GB` +7. AMD RX 7900 GRE `16 GB` +8. Nvidia 4060 Ti `16 GB` +9. Nvidia 3060 `12 GB` + +!!! note + + You can also look at any performance tests of hardware for ComfyUI as a reference. + +--- + +Q: Why are there no AMD cards other than *AMD 7900 series* on the list? + +A: *ROCM (Radeon Open Compute) `officially` supports only* [these cards](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/reference/system-requirements.html#supported-gpus). + +--- + +Q: How much RAM is needed in the system? + +A: *For normal operation, 32 GB is sufficient, but if you want to handle +large resolutions with Supir Scaler Workflow, then 64 GB is recommended.* + +--- + +Q: How to use 2 GPUs? + +A: *The simplest way is to run 2 workers, each assigned to its own GPU, so they can process tasks in parallel.* diff --git a/docs/hardware_results.md b/docs/hardware_results.md new file mode 100644 index 0000000..72705db --- /dev/null +++ b/docs/hardware_results.md @@ -0,0 +1,179 @@ +# Hardware Test Results + +## SDXL Lighting + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| 4_steps | 0.9 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| 4_steps | 1.1 | i9_10900-7900XTX | 2024/09/13 11:54:14 | +| 8_steps | 1.4 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| 8_steps | 1.7 | i9_10900-7900XTX | 2024/09/13 11:54:14 | + +## Juggernaut Lite + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 9.2 | i9_10900-7900XTX | 2024/09/13 11:54:14 | +| default | 10.2 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | + +## Juggernaut XL + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 10.9 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| default | 15.1 | i9_10900-7900XTX | 2024/09/13 11:54:14 | +| fast_run | 4.6 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| fast_run | 6.3 | i9_10900-7900XTX | 2024/09/13 11:54:14 | + +## Colorful XL + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| fast_run_30steps | 1.5 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| fast_run_30steps | 2.0 | i9_10900-7900XTX | 2024/09/13 11:54:14 | +| fast_run_60steps | 2.7 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| fast_run_60steps | 3.5 | i9_10900-7900XTX | 2024/09/13 11:54:14 | +| usual_run_30steps | 3.8 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| usual_run_30steps | 5.0 | i9_10900-7900XTX | 2024/09/13 11:54:14 | +| usual_run_60steps | 7.2 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| usual_run_60steps | 9.6 | i9_10900-7900XTX | 2024/09/13 11:54:14 | + +## Mobius XL + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| fast_run_30steps | 5.0 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| fast_run_30steps | 7.2 | i9_10900-7900XTX | 2024/09/13 11:54:14 | +| usual_run_30steps | 12.0 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| usual_run_30steps | 17.3 | i9_10900-7900XTX | 2024/09/13 11:54:14 | + +## Aesthetic images + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 10.5 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| default | 15.1 | i9_10900-7900XTX | 2024/09/13 11:54:14 | +| fast_run | 4.4 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| fast_run | 6.2 | i9_10900-7900XTX | 2024/09/13 11:54:14 | + +## Prometheus model + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| 25steps | 5.7 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| 25steps | 8.9 | i9_10900-7900XTX | 2024/09/13 11:54:14 | +| 50steps | 11.2 | R9_7900X-4070TiS | 2024/09/13 13:12:33 | +| 50steps | 17.4 | i9_10900-7900XTX | 2024/09/13 11:54:14 | + +## Flux (Small) + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| 20steps | 23.9 | R9_7900X-4070TiS | 2024/09/13 14:02:12 | +| 20steps | 37.4 | i9_10900-7900XTX | 2024/09/13 14:21:16 | +| 40steps | 46.7 | R9_7900X-4070TiS | 2024/09/13 14:02:12 | +| 40steps | 74.4 | i9_10900-7900XTX | 2024/09/13 14:21:16 | + +## Flux Lighting (Small) + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 5.5 | R9_7900X-4070TiS | 2024/09/13 14:02:12 | +| default | 7.4 | i9_10900-7900XTX | 2024/09/13 14:21:16 | + +## Stable Cascade + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| one_pass | 5.8 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| one_pass | 7.7 | i9_10900-7900XTX | 2024/09/13 13:41:37 | +| three_pass | 38.4 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| three_pass | 65.4 | i9_10900-7900XTX | 2024/09/13 13:41:37 | +| two_pass | 18.3 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| two_pass | 27.0 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## HunyuanDiT + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| 20steps | 11.0 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| 20steps | 20.9 | i9_10900-7900XTX | 2024/09/13 13:41:37 | +| 40steps | 21.9 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| 40steps | 41.5 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## Vintage Portrait + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 19.6 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| default | 25.5 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## Sketch Portrait + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 3.7 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| default | 4.3 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## ComicU Portrait + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 3.7 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| default | 4.4 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## Ghibli Portrait + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 3.2 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| default | 4.1 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## Memoji Portrait + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 3.2 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| default | 3.8 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## Photo from 1 image + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 9.5 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| default | 13.1 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## Remove background (BiRefNet) + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| 1024x1024 | 0.4 | i9_10900-7900XTX | 2024/09/13 13:41:37 | +| 1024x1024 | 0.4 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | + +## Remove background + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| 1024x1024 | 0.2 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| 1024x1024 | 0.3 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## SUPIR Upscaler + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| 1MPx1.5 | 93.3 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| 1MPx1.5 | 124.5 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## Photo Stickers + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| default | 28.5 | R9_7900X-4070TiS | 2024/09/13 13:22:53 | +| default | 33.3 | i9_10900-7900XTX | 2024/09/13 13:41:37 | + +## Flux + +| Test Case | Avg Execution Time (s) | Hardware Description | Test Time | +| ---------- | :---------------------: | -------------------- | --------- | +| 20steps | 52.6 | i9_10900-7900XTX | 2024/09/13 14:33:49 | +| 40steps | 103.5 | i9_10900-7900XTX | 2024/09/13 14:33:49 | diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..c089ee1 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,17 @@ +# Visionatrix Documentation + +Welcome to the **Visionatrix** project documentation. Here, you will find all the necessary information to get started and understand how the project works. + +## Table of Contents + +- [Installation](installation.md) +- [Working Modes](working_modes.md) +- [Command Line Options](command_line_options.md) +- [Technical Information](technical_information.md) +- [Gated Models](gated_models.md) +- [ComfyUI to Vix Workflows Migration](comfyui_vix_migration.md) +- [Vix Workflows](vix_workflows.md) +- [FAQ](faq.md) +- [Hardware FAQ](hardware_faq.md) +- [Hardware Results](hardware_results.md) +- [Swagger API](swagger.html) diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index 4c1bc61..0000000 --- a/docs/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -Visionatrix documentation -========================= - -Here will leave all docs that is not suitable for Readme file. - -.. toctree:: - :maxdepth: 1 - - Flows/index.rst - Installation.rst - CommandLineOptions.rst - WorkingModes.rst - GatedModels.rst - VixWorkflows.rst - ComfyUI2VixMigration.rst - TechnicalInformation.rst - FAQ.rst - HardwareFAQ.rst - - -Different utilities -""""""""""""""""""" - -`Visionatrix OpenAPI Specs `_ diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 0000000..56e06cf --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,83 @@ +--- +title: Manual Installation +--- + +# Manual Installation + +In most cases, we recommend using automatic installation via an +`easy-install` script. + +For those who want to install everything manually, here you will find +step-by-step instructions on what the script does. + +## Virtual Environment creation + +First clone the repository with `git`: + + git clone https://github.com/Visionatrix/Visionatrix.git && cd Visionatrix + +Setup the virtual environment with `python`: + + python -m venv venv + +Activate Virtual Environment(**Linux/macOS**) with `source`: + + source venv/bin/activate + +Activate Virtual Environment(**Windows**) with `powershell`: + + .\venv\Scripts\Activate.ps1 + +## PyTorch installation + +!!! note + + **On macOS currently no action is needed**. + +For AMD graphic cards on **Linux** install **ROCM** version of PyTorch using `pip`: + + pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.1 + +------------------------------------------------------------------------ + +For AMD graphics cards on **Windows** install PyTorch with DirectML support using `pip`: + + pip install torch-directml + +> **Python3.10** is the only currently supported version by **torch-directml**. + +------------------------------------------------------------------------ + +For NVIDIA graphics cards on **Linux** install PyTorch with next `pip` command: + + pip install torch torchvision torchaudio + +For NVIDIA graphics cards on **Windows** install PyTorch using `pip` specifying PyTorch and CUDA version: + + pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/cu121 + +## Install Visionatrix + +Install Visionatrix from the previously cloned sources using `pip`: + + pip install . + +Run **Visionatrix** initialization command using `python`: + + python -m visionatrix install + +## Run Visionatrix + +Execute from the activated virtual environment **run** command using `python`: + + python -m visionatrix run --ui + +## Manual Update + +1. Pull last changes from repository with `git`: + + git pull + +2. Execute **update** command from **activated** virtual environment with `python`: + + python -m visionatrix update diff --git a/docs/resources/css/dark.css b/docs/resources/css/dark.css deleted file mode 100644 index 8866c07..0000000 --- a/docs/resources/css/dark.css +++ /dev/null @@ -1,1996 +0,0 @@ -@media (prefers-color-scheme: dark) { - html { - background-color: #181a1b !important; - } - - html, body, input, textarea, select, button { - background-color: #181a1b; - } - - html, body, input, textarea, select, button { - border-color: #736b5e; - color: #e8e6e3; - } - - a { - color: #3391ff; - } - - table { - border-color: #545b5e; - } - - ::placeholder { - color: #b2aba1; - } - - input:-webkit-autofill, - textarea:-webkit-autofill, - select:-webkit-autofill { - background-color: #555b00 !important; - color: #e8e6e3 !important; - } - - ::selection { - background-color: #004daa !important; - color: #e8e6e3 !important; - } - - ::-moz-selection { - background-color: #004daa !important; - color: #e8e6e3 !important; - } - - /* Invert Style */ - .jfk-bubble.gtx-bubble, embed[type="application/pdf"] { - filter: invert(100%) hue-rotate(180deg) contrast(90%) !important; - } - - /* Override Style */ - .vimvixen-hint { - background-color: #7b5300 !important; - border-color: #d8b013 !important; - color: #f3e8c8 !important; - } - - ::placeholder { - opacity: 0.5 !important; - } - - /* Variables Style */ - :root { - --darkreader-neutral-background: #181a1b; - --darkreader-neutral-text: #e8e6e3; - --darkreader-selection-background: #004daa; - --darkreader-selection-text: #e8e6e3; - } - - /* Modified CSS */ - a:hover, - a:active { - outline-color: initial; - } - - abbr[title] { - border-bottom-color: initial; - } - - ins { - background-image: initial; - background-color: rgb(112, 112, 0); - color: rgb(232, 230, 227); - text-decoration-color: initial; - } - - mark { - background-image: initial; - background-color: rgb(204, 204, 0); - color: rgb(232, 230, 227); - } - - ul, - ol, - dl { - list-style-image: none; - } - - li { - list-style-image: initial; - } - - img { - border-color: initial; - } - - fieldset { - border-color: initial; - } - - legend { - border-color: initial; - } - - .chromeframe { - background-image: initial; - background-color: rgb(53, 57, 59); - color: rgb(232, 230, 227); - } - - .ir { - border-color: initial; - background-color: transparent; - } - - .visuallyhidden { - border-color: initial; - } - - .fa-border { - border-color: rgb(53, 57, 59); - } - - .fa-inverse { - color: rgb(232, 230, 227); - } - - .sr-only { - border-color: initial; - } - - .fa::before, - .wy-menu-vertical li span.toctree-expand::before, - .wy-menu-vertical li.on a span.toctree-expand::before, - .wy-menu-vertical li.current > a span.toctree-expand::before, - .rst-content .admonition-title::before, - .rst-content h1 .headerlink::before, - .rst-content h2 .headerlink::before, - .rst-content h3 .headerlink::before, - .rst-content h4 .headerlink::before, - .rst-content h5 .headerlink::before, - .rst-content h6 .headerlink::before, - .rst-content dl dt .headerlink::before, - .rst-content p.caption .headerlink::before, - .rst-content table > caption .headerlink::before, - .rst-content .code-block-caption .headerlink::before, - .rst-content tt.download span:first-child::before, - .rst-content code.download span:first-child::before, - .icon::before, - .wy-dropdown .caret::before, - .wy-inline-validate.wy-inline-validate-success .wy-input-context::before, - .wy-inline-validate.wy-inline-validate-danger .wy-input-context::before, - .wy-inline-validate.wy-inline-validate-warning .wy-input-context::before, - .wy-inline-validate.wy-inline-validate-info .wy-input-context::before { - text-decoration-color: inherit; - } - - a .fa, - a .wy-menu-vertical li span.toctree-expand, - .wy-menu-vertical li a span.toctree-expand, - .wy-menu-vertical li.on a span.toctree-expand, - .wy-menu-vertical li.current > a span.toctree-expand, - a .rst-content .admonition-title, - .rst-content a .admonition-title, - a .rst-content h1 .headerlink, - .rst-content h1 a .headerlink, - a .rst-content h2 .headerlink, - .rst-content h2 a .headerlink, - a .rst-content h3 .headerlink, - .rst-content h3 a .headerlink, - a .rst-content h4 .headerlink, - .rst-content h4 a .headerlink, - a .rst-content h5 .headerlink, - .rst-content h5 a .headerlink, - a .rst-content h6 .headerlink, - .rst-content h6 a .headerlink, - a .rst-content dl dt .headerlink, - .rst-content dl dt a .headerlink, - a .rst-content p.caption .headerlink, - .rst-content p.caption a .headerlink, - a .rst-content table > caption .headerlink, - .rst-content table > caption a .headerlink, - a .rst-content .code-block-caption .headerlink, - .rst-content .code-block-caption a .headerlink, - a .rst-content tt.download span:first-child, - .rst-content tt.download a span:first-child, - a .rst-content code.download span:first-child, - .rst-content code.download a span:first-child, - a .icon { - text-decoration-color: inherit; - } - - .wy-alert, - .rst-content .note, - .rst-content .attention, - .rst-content .caution, - .rst-content .danger, - .rst-content .error, - .rst-content .hint, - .rst-content .important, - .rst-content .tip, - .rst-content .warning, - .rst-content .seealso, - .rst-content .admonition-todo, - .rst-content .admonition { - background-image: initial; - background-color: rgb(32, 35, 36); - } - - .wy-alert-title, - .rst-content .admonition-title { - color: rgb(232, 230, 227); - background-image: initial; - background-color: rgb(29, 91, 131); - } - - .wy-alert.wy-alert-danger, - .rst-content .wy-alert-danger.note, - .rst-content .wy-alert-danger.attention, - .rst-content .wy-alert-danger.caution, - .rst-content .danger, - .rst-content .error, - .rst-content .wy-alert-danger.hint, - .rst-content .wy-alert-danger.important, - .rst-content .wy-alert-danger.tip, - .rst-content .wy-alert-danger.warning, - .rst-content .wy-alert-danger.seealso, - .rst-content .wy-alert-danger.admonition-todo, - .rst-content .wy-alert-danger.admonition { - background-image: initial; - background-color: rgb(52, 12, 8); - } - - .wy-alert.wy-alert-danger .wy-alert-title, - .rst-content .wy-alert-danger.note .wy-alert-title, - .rst-content .wy-alert-danger.attention .wy-alert-title, - .rst-content .wy-alert-danger.caution .wy-alert-title, - .rst-content .danger .wy-alert-title, - .rst-content .error .wy-alert-title, - .rst-content .wy-alert-danger.hint .wy-alert-title, - .rst-content .wy-alert-danger.important .wy-alert-title, - .rst-content .wy-alert-danger.tip .wy-alert-title, - .rst-content .wy-alert-danger.warning .wy-alert-title, - .rst-content .wy-alert-danger.seealso .wy-alert-title, - .rst-content .wy-alert-danger.admonition-todo .wy-alert-title, - .rst-content .wy-alert-danger.admonition .wy-alert-title, - .wy-alert.wy-alert-danger .rst-content .admonition-title, - .rst-content .wy-alert.wy-alert-danger .admonition-title, - .rst-content .wy-alert-danger.note .admonition-title, - .rst-content .wy-alert-danger.attention .admonition-title, - .rst-content .wy-alert-danger.caution .admonition-title, - .rst-content .danger .admonition-title, - .rst-content .error .admonition-title, - .rst-content .wy-alert-danger.hint .admonition-title, - .rst-content .wy-alert-danger.important .admonition-title, - .rst-content .wy-alert-danger.tip .admonition-title, - .rst-content .wy-alert-danger.warning .admonition-title, - .rst-content .wy-alert-danger.seealso .admonition-title, - .rst-content .wy-alert-danger.admonition-todo .admonition-title, - .rst-content .wy-alert-danger.admonition .admonition-title { - background-image: initial; - background-color: rgb(108, 22, 13); - } - - .wy-alert.wy-alert-warning, - .rst-content .wy-alert-warning.note, - .rst-content .attention, - .rst-content .caution, - .rst-content .wy-alert-warning.danger, - .rst-content .wy-alert-warning.error, - .rst-content .wy-alert-warning.hint, - .rst-content .wy-alert-warning.important, - .rst-content .wy-alert-warning.tip, - .rst-content .warning, - .rst-content .wy-alert-warning.seealso, - .rst-content .admonition-todo, - .rst-content .wy-alert-warning.admonition { - background-image: initial; - background-color: rgb(82, 53, 0); - } - - .wy-alert.wy-alert-warning .wy-alert-title, - .rst-content .wy-alert-warning.note .wy-alert-title, - .rst-content .attention .wy-alert-title, - .rst-content .caution .wy-alert-title, - .rst-content .wy-alert-warning.danger .wy-alert-title, - .rst-content .wy-alert-warning.error .wy-alert-title, - .rst-content .wy-alert-warning.hint .wy-alert-title, - .rst-content .wy-alert-warning.important .wy-alert-title, - .rst-content .wy-alert-warning.tip .wy-alert-title, - .rst-content .warning .wy-alert-title, - .rst-content .wy-alert-warning.seealso .wy-alert-title, - .rst-content .admonition-todo .wy-alert-title, - .rst-content .wy-alert-warning.admonition .wy-alert-title, - .wy-alert.wy-alert-warning .rst-content .admonition-title, - .rst-content .wy-alert.wy-alert-warning .admonition-title, - .rst-content .wy-alert-warning.note .admonition-title, - .rst-content .attention .admonition-title, - .rst-content .caution .admonition-title, - .rst-content .wy-alert-warning.danger .admonition-title, - .rst-content .wy-alert-warning.error .admonition-title, - .rst-content .wy-alert-warning.hint .admonition-title, - .rst-content .wy-alert-warning.important .admonition-title, - .rst-content .wy-alert-warning.tip .admonition-title, - .rst-content .warning .admonition-title, - .rst-content .wy-alert-warning.seealso .admonition-title, - .rst-content .admonition-todo .admonition-title, - .rst-content .wy-alert-warning.admonition .admonition-title { - background-image: initial; - background-color: rgb(123, 65, 14); - } - - .wy-alert.wy-alert-info, - .rst-content .note, - .rst-content .wy-alert-info.attention, - .rst-content .wy-alert-info.caution, - .rst-content .wy-alert-info.danger, - .rst-content .wy-alert-info.error, - .rst-content .wy-alert-info.hint, - .rst-content .wy-alert-info.important, - .rst-content .wy-alert-info.tip, - .rst-content .wy-alert-info.warning, - .rst-content .seealso, - .rst-content .wy-alert-info.admonition-todo, - .rst-content .wy-alert-info.admonition { - background-image: initial; - background-color: rgb(32, 35, 36); - } - - .wy-alert.wy-alert-info .wy-alert-title, - .rst-content .note .wy-alert-title, - .rst-content .wy-alert-info.attention .wy-alert-title, - .rst-content .wy-alert-info.caution .wy-alert-title, - .rst-content .wy-alert-info.danger .wy-alert-title, - .rst-content .wy-alert-info.error .wy-alert-title, - .rst-content .wy-alert-info.hint .wy-alert-title, - .rst-content .wy-alert-info.important .wy-alert-title, - .rst-content .wy-alert-info.tip .wy-alert-title, - .rst-content .wy-alert-info.warning .wy-alert-title, - .rst-content .seealso .wy-alert-title, - .rst-content .wy-alert-info.admonition-todo .wy-alert-title, - .rst-content .wy-alert-info.admonition .wy-alert-title, - .wy-alert.wy-alert-info .rst-content .admonition-title, - .rst-content .wy-alert.wy-alert-info .admonition-title, - .rst-content .note .admonition-title, - .rst-content .wy-alert-info.attention .admonition-title, - .rst-content .wy-alert-info.caution .admonition-title, - .rst-content .wy-alert-info.danger .admonition-title, - .rst-content .wy-alert-info.error .admonition-title, - .rst-content .wy-alert-info.hint .admonition-title, - .rst-content .wy-alert-info.important .admonition-title, - .rst-content .wy-alert-info.tip .admonition-title, - .rst-content .wy-alert-info.warning .admonition-title, - .rst-content .seealso .admonition-title, - .rst-content .wy-alert-info.admonition-todo .admonition-title, - .rst-content .wy-alert-info.admonition .admonition-title { - background-image: initial; - background-color: rgb(29, 91, 131); - } - - .wy-alert.wy-alert-success, - .rst-content .wy-alert-success.note, - .rst-content .wy-alert-success.attention, - .rst-content .wy-alert-success.caution, - .rst-content .wy-alert-success.danger, - .rst-content .wy-alert-success.error, - .rst-content .hint, - .rst-content .important, - .rst-content .tip, - .rst-content .wy-alert-success.warning, - .rst-content .wy-alert-success.seealso, - .rst-content .wy-alert-success.admonition-todo, - .rst-content .wy-alert-success.admonition { - background-image: initial; - background-color: rgb(9, 66, 58); - } - - .wy-alert.wy-alert-success .wy-alert-title, - .rst-content .wy-alert-success.note .wy-alert-title, - .rst-content .wy-alert-success.attention .wy-alert-title, - .rst-content .wy-alert-success.caution .wy-alert-title, - .rst-content .wy-alert-success.danger .wy-alert-title, - .rst-content .wy-alert-success.error .wy-alert-title, - .rst-content .hint .wy-alert-title, - .rst-content .important .wy-alert-title, - .rst-content .tip .wy-alert-title, - .rst-content .wy-alert-success.warning .wy-alert-title, - .rst-content .wy-alert-success.seealso .wy-alert-title, - .rst-content .wy-alert-success.admonition-todo .wy-alert-title, - .rst-content .wy-alert-success.admonition .wy-alert-title, - .wy-alert.wy-alert-success .rst-content .admonition-title, - .rst-content .wy-alert.wy-alert-success .admonition-title, - .rst-content .wy-alert-success.note .admonition-title, - .rst-content .wy-alert-success.attention .admonition-title, - .rst-content .wy-alert-success.caution .admonition-title, - .rst-content .wy-alert-success.danger .admonition-title, - .rst-content .wy-alert-success.error .admonition-title, - .rst-content .hint .admonition-title, - .rst-content .important .admonition-title, - .rst-content .tip .admonition-title, - .rst-content .wy-alert-success.warning .admonition-title, - .rst-content .wy-alert-success.seealso .admonition-title, - .rst-content .wy-alert-success.admonition-todo .admonition-title, - .rst-content .wy-alert-success.admonition .admonition-title { - background-image: initial; - background-color: rgb(21, 150, 125); - } - - .wy-alert.wy-alert-neutral, - .rst-content .wy-alert-neutral.note, - .rst-content .wy-alert-neutral.attention, - .rst-content .wy-alert-neutral.caution, - .rst-content .wy-alert-neutral.danger, - .rst-content .wy-alert-neutral.error, - .rst-content .wy-alert-neutral.hint, - .rst-content .wy-alert-neutral.important, - .rst-content .wy-alert-neutral.tip, - .rst-content .wy-alert-neutral.warning, - .rst-content .wy-alert-neutral.seealso, - .rst-content .wy-alert-neutral.admonition-todo, - .rst-content .wy-alert-neutral.admonition { - background-image: initial; - background-color: rgb(27, 36, 36); - } - - .wy-alert.wy-alert-neutral .wy-alert-title, - .rst-content .wy-alert-neutral.note .wy-alert-title, - .rst-content .wy-alert-neutral.attention .wy-alert-title, - .rst-content .wy-alert-neutral.caution .wy-alert-title, - .rst-content .wy-alert-neutral.danger .wy-alert-title, - .rst-content .wy-alert-neutral.error .wy-alert-title, - .rst-content .wy-alert-neutral.hint .wy-alert-title, - .rst-content .wy-alert-neutral.important .wy-alert-title, - .rst-content .wy-alert-neutral.tip .wy-alert-title, - .rst-content .wy-alert-neutral.warning .wy-alert-title, - .rst-content .wy-alert-neutral.seealso .wy-alert-title, - .rst-content .wy-alert-neutral.admonition-todo .wy-alert-title, - .rst-content .wy-alert-neutral.admonition .wy-alert-title, - .wy-alert.wy-alert-neutral .rst-content .admonition-title, - .rst-content .wy-alert.wy-alert-neutral .admonition-title, - .rst-content .wy-alert-neutral.note .admonition-title, - .rst-content .wy-alert-neutral.attention .admonition-title, - .rst-content .wy-alert-neutral.caution .admonition-title, - .rst-content .wy-alert-neutral.danger .admonition-title, - .rst-content .wy-alert-neutral.error .admonition-title, - .rst-content .wy-alert-neutral.hint .admonition-title, - .rst-content .wy-alert-neutral.important .admonition-title, - .rst-content .wy-alert-neutral.tip .admonition-title, - .rst-content .wy-alert-neutral.warning .admonition-title, - .rst-content .wy-alert-neutral.seealso .admonition-title, - .rst-content .wy-alert-neutral.admonition-todo .admonition-title, - .rst-content .wy-alert-neutral.admonition .admonition-title { - color: rgb(192, 186, 178); - background-image: initial; - background-color: rgb(40, 43, 45); - } - - .wy-alert.wy-alert-neutral a, - .rst-content .wy-alert-neutral.note a, - .rst-content .wy-alert-neutral.attention a, - .rst-content .wy-alert-neutral.caution a, - .rst-content .wy-alert-neutral.danger a, - .rst-content .wy-alert-neutral.error a, - .rst-content .wy-alert-neutral.hint a, - .rst-content .wy-alert-neutral.important a, - .rst-content .wy-alert-neutral.tip a, - .rst-content .wy-alert-neutral.warning a, - .rst-content .wy-alert-neutral.seealso a, - .rst-content .wy-alert-neutral.admonition-todo a, - .rst-content .wy-alert-neutral.admonition a { - color: rgb(84, 164, 217); - } - - .wy-tray-container li { - background-image: initial; - background-color: transparent; - color: rgb(232, 230, 227); - box-shadow: rgba(0, 0, 0, 0.1) 0px 5px 5px 0px; - } - - .wy-tray-container li.wy-tray-item-success { - background-image: initial; - background-color: rgb(31, 139, 77); - } - - .wy-tray-container li.wy-tray-item-info { - background-image: initial; - background-color: rgb(33, 102, 148); - } - - .wy-tray-container li.wy-tray-item-warning { - background-image: initial; - background-color: rgb(178, 94, 20); - } - - .wy-tray-container li.wy-tray-item-danger { - background-image: initial; - background-color: rgb(162, 33, 20); - } - - .btn { - color: rgb(232, 230, 227); - border-color: rgba(140, 130, 115, 0.1); - background-color: rgb(31, 139, 77); - text-decoration-color: initial; - box-shadow: rgba(24, 26, 27, 0.5) 0px 1px 2px -1px inset, - rgba(0, 0, 0, 0.1) 0px -2px 0px 0px inset; - } - - .btn-hover { - background-image: initial; - background-color: rgb(37, 114, 165); - color: rgb(232, 230, 227); - } - - .btn:hover { - background-image: initial; - background-color: rgb(35, 156, 86); - color: rgb(232, 230, 227); - } - - .btn:focus { - background-image: initial; - background-color: rgb(35, 156, 86); - outline-color: initial; - } - - .btn:active { - box-shadow: rgba(0, 0, 0, 0.05) 0px -1px 0px 0px inset, - rgba(0, 0, 0, 0.1) 0px 2px 0px 0px inset; - } - - .btn:visited { - color: rgb(232, 230, 227); - } - - .btn:disabled { - background-image: none; - box-shadow: none; - } - - .btn-disabled { - background-image: none; - box-shadow: none; - } - - .btn-disabled:hover, - .btn-disabled:focus, - .btn-disabled:active { - background-image: none; - box-shadow: none; - } - - .btn-info { - background-color: rgb(33, 102, 148) !important; - } - - .btn-info:hover { - background-color: rgb(37, 114, 165) !important; - } - - .btn-neutral { - background-color: rgb(27, 36, 36) !important; - color: rgb(192, 186, 178) !important; - } - - .btn-neutral:hover { - color: rgb(192, 186, 178); - background-color: rgb(34, 44, 44) !important; - } - - .btn-neutral:visited { - color: rgb(192, 186, 178) !important; - } - - .btn-success { - background-color: rgb(31, 139, 77) !important; - } - - .btn-success:hover { - background-color: rgb(27, 122, 68) !important; - } - - .btn-danger { - background-color: rgb(162, 33, 20) !important; - } - - .btn-danger:hover { - background-color: rgb(149, 30, 18) !important; - } - - .btn-warning { - background-color: rgb(178, 94, 20) !important; - } - - .btn-warning:hover { - background-color: rgb(165, 87, 18) !important; - } - - .btn-invert { - background-color: rgb(26, 28, 29); - } - - .btn-invert:hover { - background-color: rgb(35, 38, 40) !important; - } - - .btn-link { - color: rgb(84, 164, 217); - box-shadow: none; - background-color: transparent !important; - border-color: transparent !important; - } - - .btn-link:hover { - box-shadow: none; - background-color: transparent !important; - color: rgb(79, 162, 216) !important; - } - - .btn-link:active { - box-shadow: none; - background-color: transparent !important; - color: rgb(79, 162, 216) !important; - } - - .btn-link:visited { - color: rgb(164, 103, 188); - } - - .wy-dropdown-menu { - background-image: initial; - background-color: rgb(26, 28, 29); - border-color: rgb(60, 65, 67); - box-shadow: rgba(0, 0, 0, 0.1) 0px 2px 2px 0px; - } - - .wy-dropdown-menu > dd > a { - color: rgb(192, 186, 178); - } - - .wy-dropdown-menu > dd > a:hover { - background-image: initial; - background-color: rgb(33, 102, 148); - color: rgb(232, 230, 227); - } - - .wy-dropdown-menu > dd.divider { - border-top-color: rgb(60, 65, 67); - } - - .wy-dropdown-menu > dd.call-to-action { - background-image: initial; - background-color: rgb(40, 43, 45); - } - - .wy-dropdown-menu > dd.call-to-action:hover { - background-image: initial; - background-color: rgb(40, 43, 45); - } - - .wy-dropdown-menu > dd.call-to-action .btn { - color: rgb(232, 230, 227); - } - - .wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu { - background-image: initial; - background-color: rgb(26, 28, 29); - } - - .wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover { - background-image: initial; - background-color: rgb(33, 102, 148); - color: rgb(232, 230, 227); - } - - .wy-dropdown-arrow::before { - border-bottom-color: rgb(51, 55, 57); - border-left-color: transparent; - border-right-color: transparent; - } - - fieldset { - border-color: initial; - } - - legend { - border-color: initial; - } - - label { - color: rgb(200, 195, 188); - } - - .wy-control-group.wy-control-group-required > label::after { - color: rgb(233, 88, 73); - } - - .wy-form-message-inline { - color: rgb(168, 160, 149); - } - - .wy-form-message { - color: rgb(168, 160, 149); - } - - input[type="text"], input[type="password"], input[type="email"], input[type="url"], input[type="date"], input[type="month"], input[type="time"], input[type="datetime"], input[type="datetime-local"], input[type="week"], input[type="number"], input[type="search"], input[type="tel"], input[type="color"] { - border-color: rgb(62, 68, 70); - box-shadow: rgb(43, 47, 49) 0px 1px 3px inset; - } - - input[type="text"]:focus, input[type="password"]:focus, input[type="email"]:focus, input[type="url"]:focus, input[type="date"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="week"]:focus, input[type="number"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="color"]:focus { - outline-color: initial; - border-color: rgb(123, 114, 101); - } - - input.no-focus:focus { - border-color: rgb(62, 68, 70) !important; - } - - input[type="file"]:focus, input[type="radio"]:focus, input[type="checkbox"]:focus { - outline-color: rgb(13, 113, 167); - } - - input[type="text"][disabled], input[type="password"][disabled], input[type="email"][disabled], input[type="url"][disabled], input[type="date"][disabled], input[type="month"][disabled], input[type="time"][disabled], input[type="datetime"][disabled], input[type="datetime-local"][disabled], input[type="week"][disabled], input[type="number"][disabled], input[type="search"][disabled], input[type="tel"][disabled], input[type="color"][disabled] { - background-color: rgb(27, 29, 30); - } - - input:focus:invalid, - textarea:focus:invalid, - select:focus:invalid { - color: rgb(233, 88, 73); - border-color: rgb(149, 31, 18); - } - - input:focus:invalid:focus, - textarea:focus:invalid:focus, - select:focus:invalid:focus { - border-color: rgb(149, 31, 18); - } - - input[type="file"]:focus:invalid:focus, input[type="radio"]:focus:invalid:focus, input[type="checkbox"]:focus:invalid:focus { - outline-color: rgb(149, 31, 18); - } - - select, - textarea { - border-color: rgb(62, 68, 70); - box-shadow: rgb(43, 47, 49) 0px 1px 3px inset; - } - - select { - border-color: rgb(62, 68, 70); - background-color: rgb(24, 26, 27); - } - - select:focus, - textarea:focus { - outline-color: initial; - } - - select[disabled], - textarea[disabled], - input[readonly], - select[readonly], - textarea[readonly] { - background-color: rgb(27, 29, 30); - } - - .wy-checkbox, - .wy-radio { - color: rgb(192, 186, 178); - } - - .wy-input-prefix .wy-input-context, - .wy-input-suffix .wy-input-context { - background-color: rgb(27, 36, 36); - border-color: rgb(62, 68, 70); - color: rgb(168, 160, 149); - } - - .wy-input-suffix .wy-input-context { - border-left-color: initial; - } - - .wy-input-prefix .wy-input-context { - border-right-color: initial; - } - - .wy-switch::before { - background-image: initial; - background-color: rgb(53, 57, 59); - } - - .wy-switch::after { - background-image: initial; - background-color: rgb(82, 88, 92); - } - - .wy-switch span { - color: rgb(200, 195, 188); - } - - .wy-switch.active::before { - background-image: initial; - background-color: rgb(24, 106, 58); - } - - .wy-switch.active::after { - background-image: initial; - background-color: rgb(31, 139, 77); - } - - .wy-control-group.wy-control-group-error .wy-form-message, - .wy-control-group.wy-control-group-error > label { - color: rgb(233, 88, 73); - } - - .wy-control-group.wy-control-group-error input[type="text"], .wy-control-group.wy-control-group-error input[type="password"], .wy-control-group.wy-control-group-error input[type="email"], .wy-control-group.wy-control-group-error input[type="url"], .wy-control-group.wy-control-group-error input[type="date"], .wy-control-group.wy-control-group-error input[type="month"], .wy-control-group.wy-control-group-error input[type="time"], .wy-control-group.wy-control-group-error input[type="datetime"], .wy-control-group.wy-control-group-error input[type="datetime-local"], .wy-control-group.wy-control-group-error input[type="week"], .wy-control-group.wy-control-group-error input[type="number"], .wy-control-group.wy-control-group-error input[type="search"], .wy-control-group.wy-control-group-error input[type="tel"], .wy-control-group.wy-control-group-error input[type="color"] { - border-color: rgb(149, 31, 18); - } - - .wy-control-group.wy-control-group-error textarea { - border-color: rgb(149, 31, 18); - } - - .wy-inline-validate.wy-inline-validate-success .wy-input-context { - color: rgb(92, 218, 145); - } - - .wy-inline-validate.wy-inline-validate-danger .wy-input-context { - color: rgb(233, 88, 73); - } - - .wy-inline-validate.wy-inline-validate-warning .wy-input-context { - color: rgb(232, 138, 54); - } - - .wy-inline-validate.wy-inline-validate-info .wy-input-context { - color: rgb(84, 164, 217); - } - - .wy-table caption, - .rst-content table.docutils caption, - .rst-content table.field-list caption { - color: rgb(232, 230, 227); - } - - .wy-table thead, - .rst-content table.docutils thead, - .rst-content table.field-list thead { - color: rgb(232, 230, 227); - } - - .wy-table thead th, - .rst-content table.docutils thead th, - .rst-content table.field-list thead th { - border-bottom-color: rgb(56, 61, 63); - } - - .wy-table td, - .rst-content table.docutils td, - .rst-content table.field-list td { - background-color: transparent; - } - - .wy-table-secondary { - color: rgb(152, 143, 129); - } - - .wy-table-tertiary { - color: rgb(152, 143, 129); - } - - .wy-table-odd td, - .wy-table-striped tr:nth-child(2n-1) td, - .rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td { - background-color: rgb(27, 36, 36); - } - - .wy-table-backed { - background-color: rgb(27, 36, 36); - } - - .wy-table-bordered-all, - .rst-content table.docutils { - border-color: rgb(56, 61, 63); - } - - .wy-table-bordered-all td, - .rst-content table.docutils td { - border-bottom-color: rgb(56, 61, 63); - border-left-color: rgb(56, 61, 63); - } - - .wy-table-bordered { - border-color: rgb(56, 61, 63); - } - - .wy-table-bordered-rows td { - border-bottom-color: rgb(56, 61, 63); - } - - .wy-table-horizontal td, - .wy-table-horizontal th { - border-bottom-color: rgb(56, 61, 63); - } - - a { - color: rgb(84, 164, 217); - text-decoration-color: initial; - } - - a:hover { - color: rgb(68, 156, 214); - } - - a:visited { - color: rgb(164, 103, 188); - } - - body { - color: rgb(192, 186, 178); - background-image: initial; - background-color: rgb(33, 35, 37); - } - - .wy-text-strike { - text-decoration-color: initial; - } - - .wy-text-warning { - color: rgb(232, 138, 54) !important; - } - - a.wy-text-warning:hover { - color: rgb(236, 157, 87) !important; - } - - .wy-text-info { - color: rgb(84, 164, 217) !important; - } - - a.wy-text-info:hover { - color: rgb(79, 162, 216) !important; - } - - .wy-text-success { - color: rgb(92, 218, 145) !important; - } - - a.wy-text-success:hover { - color: rgb(73, 214, 133) !important; - } - - .wy-text-danger { - color: rgb(233, 88, 73) !important; - } - - a.wy-text-danger:hover { - color: rgb(237, 118, 104) !important; - } - - .wy-text-neutral { - color: rgb(192, 186, 178) !important; - } - - a.wy-text-neutral:hover { - color: rgb(176, 169, 159) !important; - } - - hr { - border-right-color: initial; - border-bottom-color: initial; - border-left-color: initial; - border-top-color: rgb(56, 61, 63); - } - - code, - .rst-content tt, - .rst-content code { - background-image: initial; - background-color: rgb(24, 26, 27); - border-color: rgb(56, 61, 63); - color: rgb(233, 88, 73); - } - - .wy-plain-list-disc, - .rst-content .section ul, - .rst-content .toctree-wrapper ul, - article ul { - list-style-image: initial; - } - - .wy-plain-list-disc li, - .rst-content .section ul li, - .rst-content .toctree-wrapper ul li, - article ul li { - list-style-image: initial; - } - - .wy-plain-list-disc li li, - .rst-content .section ul li li, - .rst-content .toctree-wrapper ul li li, - article ul li li { - list-style-image: initial; - } - - .wy-plain-list-disc li li li, - .rst-content .section ul li li li, - .rst-content .toctree-wrapper ul li li li, - article ul li li li { - list-style-image: initial; - } - - .wy-plain-list-disc li ol li, - .rst-content .section ul li ol li, - .rst-content .toctree-wrapper ul li ol li, - article ul li ol li { - list-style-image: initial; - } - - .wy-plain-list-decimal, - .rst-content .section ol, - .rst-content ol.arabic, - article ol { - list-style-image: initial; - } - - .wy-plain-list-decimal li, - .rst-content .section ol li, - .rst-content ol.arabic li, - article ol li { - list-style-image: initial; - } - - .wy-plain-list-decimal li ul li, - .rst-content .section ol li ul li, - .rst-content ol.arabic li ul li, - article ol li ul li { - list-style-image: initial; - } - - .wy-breadcrumbs li code, - .wy-breadcrumbs li .rst-content tt, - .rst-content .wy-breadcrumbs li tt { - border-color: initial; - background-image: none; - background-color: initial; - } - - .wy-breadcrumbs li code.literal, - .wy-breadcrumbs li .rst-content tt.literal, - .rst-content .wy-breadcrumbs li tt.literal { - color: rgb(192, 186, 178); - } - - .wy-breadcrumbs-extra { - color: rgb(184, 178, 169); - } - - .wy-menu a:hover { - text-decoration-color: initial; - } - - .wy-menu-horiz li:hover { - background-image: initial; - background-color: rgba(24, 26, 27, 0.1); - } - - .wy-menu-horiz li.divide-left { - border-left-color: rgb(119, 110, 98); - } - - .wy-menu-horiz li.divide-right { - border-right-color: rgb(119, 110, 98); - } - - .wy-menu-vertical header, - .wy-menu-vertical p.caption { - color: rgb(99, 161, 201); - } - - .wy-menu-vertical li.divide-top { - border-top-color: rgb(119, 110, 98); - } - - .wy-menu-vertical li.divide-bottom { - border-bottom-color: rgb(119, 110, 98); - } - - .wy-menu-vertical li.current { - background-image: initial; - background-color: rgb(40, 43, 45); - } - - .wy-menu-vertical li.current a { - color: rgb(152, 143, 129); - border-right-color: rgb(63, 69, 71); - } - - .wy-menu-vertical li.current a:hover { - background-image: initial; - background-color: rgb(47, 51, 53); - } - - .wy-menu-vertical li code, - .wy-menu-vertical li .rst-content tt, - .rst-content .wy-menu-vertical li tt { - border-color: initial; - background-image: inherit; - background-color: inherit; - color: inherit; - } - - .wy-menu-vertical li span.toctree-expand { - color: rgb(183, 177, 168); - } - - .wy-menu-vertical li.on a, - .wy-menu-vertical li.current > a { - color: rgb(192, 186, 178); - background-image: initial; - background-color: rgb(26, 28, 29); - border-color: initial; - } - - .wy-menu-vertical li.on a:hover, - .wy-menu-vertical li.current > a:hover { - background-image: initial; - background-color: rgb(26, 28, 29); - } - - .wy-menu-vertical li.on a:hover span.toctree-expand, - .wy-menu-vertical li.current > a:hover span.toctree-expand { - color: rgb(152, 143, 129); - } - - .wy-menu-vertical li.on a span.toctree-expand, - .wy-menu-vertical li.current > a span.toctree-expand { - color: rgb(200, 195, 188); - } - - .wy-menu-vertical li.toctree-l1.current > a { - border-bottom-color: rgb(63, 69, 71); - border-top-color: rgb(63, 69, 71); - } - - .wy-menu-vertical li.toctree-l2 a, - .wy-menu-vertical li.toctree-l3 a, - .wy-menu-vertical li.toctree-l4 a { - color: rgb(192, 186, 178); - } - - .wy-menu-vertical li.toctree-l2.current > a { - background-image: initial; - background-color: rgb(54, 59, 61); - } - - .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a { - background-image: initial; - background-color: rgb(54, 59, 61); - } - - .wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand { - color: rgb(152, 143, 129); - } - - .wy-menu-vertical li.toctree-l2 span.toctree-expand { - color: rgb(174, 167, 156); - } - - .wy-menu-vertical li.toctree-l3.current > a { - background-image: initial; - background-color: rgb(61, 66, 69); - } - - .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a { - background-image: initial; - background-color: rgb(61, 66, 69); - } - - .wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand { - color: rgb(152, 143, 129); - } - - .wy-menu-vertical li.toctree-l3 span.toctree-expand { - color: rgb(166, 158, 146); - } - - .wy-menu-vertical li.toctree-l2.current a, - .wy-menu-vertical li.toctree-l3.current a { - background-color: #363636; - } - - .wy-menu-vertical li ul li a { - color: rgb(208, 204, 198); - } - - .wy-menu-vertical a { - color: rgb(208, 204, 198); - } - - .wy-menu-vertical a:hover { - background-color: rgb(57, 62, 64); - } - - .wy-menu-vertical a:hover span.toctree-expand { - color: rgb(208, 204, 198); - } - - .wy-menu-vertical a:active { - background-color: rgb(33, 102, 148); - color: rgb(232, 230, 227); - } - - .wy-menu-vertical a:active span.toctree-expand { - color: rgb(232, 230, 227); - } - - .wy-side-nav-search { - background-color: rgb(33, 102, 148); - color: rgb(230, 228, 225); - } - - .wy-side-nav-search input[type="text"] { - border-color: rgb(35, 111, 160); - } - - .wy-side-nav-search img { - background-color: rgb(33, 102, 148); - } - - .wy-side-nav-search > a, - .wy-side-nav-search .wy-dropdown > a { - color: rgb(230, 228, 225); - } - - .wy-side-nav-search > a:hover, - .wy-side-nav-search .wy-dropdown > a:hover { - background-image: initial; - background-color: rgba(24, 26, 27, 0.1); - } - - .wy-side-nav-search > a img.logo, - .wy-side-nav-search .wy-dropdown > a img.logo { - background-image: initial; - background-color: transparent; - } - - .wy-side-nav-search > div.version { - color: rgba(232, 230, 227, 0.3); - } - - .wy-nav .wy-menu-vertical header { - color: rgb(84, 164, 217); - } - - .wy-nav .wy-menu-vertical a { - color: rgb(184, 178, 169); - } - - .wy-nav .wy-menu-vertical a:hover { - background-color: rgb(33, 102, 148); - color: rgb(232, 230, 227); - } - - .wy-body-for-nav { - background-image: initial; - background-color: rgb(24, 26, 27); - } - - .wy-nav-side { - color: rgb(169, 161, 150); - background-image: initial; - background-color: rgb(38, 41, 43); - } - - .wy-nav-top { - background-image: initial; - background-color: rgb(33, 102, 148); - color: rgb(232, 230, 227); - } - - .wy-nav-top a { - color: rgb(232, 230, 227); - } - - .wy-nav-top img { - background-color: rgb(33, 102, 148); - } - - .wy-nav-content-wrap { - background-image: initial; - background-color: rgb(26, 28, 29); - } - - .wy-body-mask { - background-image: initial; - background-color: rgba(0, 0, 0, 0.2); - } - - footer { - color: rgb(152, 143, 129); - } - - footer span.commit code, - footer span.commit .rst-content tt, - .rst-content footer span.commit tt { - background-image: none; - background-color: initial; - border-color: initial; - color: rgb(152, 143, 129); - } - - #search-results .search li { - border-bottom-color: rgb(56, 61, 63); - } - - #search-results .search li:first-child { - border-top-color: rgb(56, 61, 63); - } - - #search-results .context { - color: rgb(152, 143, 129); - } - - @media screen and (min-width: 1100px) { - .wy-nav-content-wrap { - background-image: initial; - background-color: rgba(0, 0, 0, 0.05); - } - - .wy-nav-content { - background-image: initial; - background-color: rgb(26, 28, 29); - } - } - .rst-versions { - color: rgb(230, 228, 225); - background-image: initial; - background-color: rgb(23, 24, 25); - } - - .rst-versions a { - color: rgb(84, 164, 217); - text-decoration-color: initial; - } - - .rst-versions .rst-current-version { - background-color: rgb(29, 31, 32); - color: rgb(92, 218, 145); - } - - .rst-versions .rst-current-version .fa, - .rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand, - .wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand, - .rst-versions .rst-current-version .rst-content .admonition-title, - .rst-content .rst-versions .rst-current-version .admonition-title, - .rst-versions .rst-current-version .rst-content h1 .headerlink, - .rst-content h1 .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content h2 .headerlink, - .rst-content h2 .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content h3 .headerlink, - .rst-content h3 .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content h4 .headerlink, - .rst-content h4 .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content h5 .headerlink, - .rst-content h5 .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content h6 .headerlink, - .rst-content h6 .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content dl dt .headerlink, - .rst-content dl dt .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content p.caption .headerlink, - .rst-content p.caption .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content table > caption .headerlink, - .rst-content table > caption .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content .code-block-caption .headerlink, - .rst-content .code-block-caption .rst-versions .rst-current-version .headerlink, - .rst-versions .rst-current-version .rst-content tt.download span:first-child, - .rst-content tt.download .rst-versions .rst-current-version span:first-child, - .rst-versions .rst-current-version .rst-content code.download span:first-child, - .rst-content code.download .rst-versions .rst-current-version span:first-child, - .rst-versions .rst-current-version .icon { - color: rgb(230, 228, 225); - } - - .rst-versions .rst-current-version.rst-out-of-date { - background-color: rgb(162, 33, 20); - color: rgb(232, 230, 227); - } - - .rst-versions .rst-current-version.rst-active-old-version { - background-color: rgb(192, 156, 11); - color: rgb(232, 230, 227); - } - - .rst-versions .rst-other-versions { - color: rgb(152, 143, 129); - } - - .rst-versions .rst-other-versions hr { - border-right-color: initial; - border-bottom-color: initial; - border-left-color: initial; - border-top-color: rgb(119, 111, 98); - } - - .rst-versions .rst-other-versions dd a { - color: rgb(230, 228, 225); - } - - .rst-versions.rst-badge { - border-color: initial; - } - - .rst-content abbr[title] { - text-decoration-color: initial; - } - - .rst-content.style-external-links a.reference.external::after { - color: rgb(184, 178, 169); - } - - .rst-content pre.literal-block, .rst-content div[class^="highlight"] { - border-color: rgb(56, 61, 63); - } - - .rst-content pre.literal-block div[class^="highlight"], .rst-content div[class^="highlight"] div[class^="highlight"] { - border-color: initial; - } - - .rst-content .linenodiv pre { - border-right-color: rgb(54, 59, 61); - } - - .rst-content .admonition table { - border-color: rgba(140, 130, 115, 0.1); - } - - .rst-content .admonition table td, - .rst-content .admonition table th { - background-image: initial !important; - background-color: transparent !important; - border-color: rgba(140, 130, 115, 0.1) !important; - } - - .rst-content .section ol.loweralpha, - .rst-content .section ol.loweralpha li { - list-style-image: initial; - } - - .rst-content .section ol.upperalpha, - .rst-content .section ol.upperalpha li { - list-style-image: initial; - } - - .rst-content .toc-backref { - color: rgb(192, 186, 178); - } - - .rst-content .sidebar { - background-image: initial; - background-color: rgb(27, 36, 36); - border-color: rgb(56, 61, 63); - } - - .rst-content .sidebar .sidebar-title { - background-image: initial; - background-color: rgb(40, 43, 45); - } - - .rst-content .highlighted { - background-image: initial; - background-color: rgb(192, 156, 11); - } - - .rst-content table.docutils.citation, - .rst-content table.docutils.footnote { - background-image: none; - background-color: initial; - border-color: initial; - color: rgb(152, 143, 129); - } - - .rst-content table.docutils.citation td, - .rst-content table.docutils.citation tr, - .rst-content table.docutils.footnote td, - .rst-content table.docutils.footnote tr { - border-color: initial; - background-color: transparent !important; - } - - .rst-content table.docutils.citation tt, - .rst-content table.docutils.citation code, - .rst-content table.docutils.footnote tt, - .rst-content table.docutils.footnote code { - color: rgb(178, 172, 162); - } - - .rst-content table.docutils th { - border-color: rgb(56, 61, 63); - } - - .rst-content table.field-list { - border-color: initial; - } - - .rst-content table.field-list td { - border-color: initial; - } - - .rst-content tt, - .rst-content tt, - .rst-content code { - color: rgb(232, 230, 227); - } - - .rst-content tt.literal, - .rst-content tt.literal, - .rst-content code.literal { - color: rgb(233, 88, 73); - } - - .rst-content tt.xref, - a .rst-content tt, - .rst-content tt.xref, - .rst-content code.xref, - a .rst-content tt, - a .rst-content code { - color: rgb(192, 186, 178); - } - - .rst-content a tt, - .rst-content a tt, - .rst-content a code { - color: rgb(84, 164, 217); - } - - .rst-content dl:not(.docutils) dt { - background-image: initial; - background-color: rgb(32, 35, 36); - color: rgb(84, 164, 217); - border-top-color: rgb(28, 89, 128); - } - - .rst-content dl:not(.docutils) dt::before { - color: rgb(109, 178, 223); - } - - .rst-content dl:not(.docutils) dt .headerlink { - color: rgb(192, 186, 178); - } - - .rst-content dl:not(.docutils) dl dt { - border-top-color: initial; - border-right-color: initial; - border-bottom-color: initial; - border-left-color: rgb(62, 68, 70); - background-image: initial; - background-color: rgb(32, 35, 37); - color: rgb(178, 172, 162); - } - - .rst-content dl:not(.docutils) dl dt .headerlink { - color: rgb(192, 186, 178); - } - - .rst-content dl:not(.docutils) tt.descname, - .rst-content dl:not(.docutils) tt.descclassname, - .rst-content dl:not(.docutils) tt.descname, - .rst-content dl:not(.docutils) code.descname, - .rst-content dl:not(.docutils) tt.descclassname, - .rst-content dl:not(.docutils) code.descclassname { - background-color: transparent; - border-color: initial; - } - - .rst-content dl:not(.docutils) .optional { - color: rgb(232, 230, 227); - } - - .rst-content .viewcode-link, - .rst-content .viewcode-back { - color: rgb(92, 218, 145); - } - - .rst-content tt.download, - .rst-content code.download { - background-image: inherit; - background-color: inherit; - color: inherit; - border-color: inherit; - } - - .rst-content .guilabel { - border-color: rgb(27, 84, 122); - background-image: initial; - background-color: rgb(32, 35, 36); - } - - span[id*="MathJax-Span"] { - color: rgb(192, 186, 178); - } - - .highlight .hll { - background-color: rgb(82, 82, 0); - } - - .highlight { - background-image: initial; - background-color: rgb(61, 82, 0); - } - - .highlight .c { - color: rgb(119, 179, 195); - } - - .highlight .err { - border-color: rgb(179, 0, 0); - } - - .highlight .k { - color: rgb(126, 255, 163); - } - - .highlight .o { - color: rgb(168, 160, 149); - } - - .highlight .ch { - color: rgb(119, 179, 195); - } - - .highlight .cm { - color: rgb(119, 179, 195); - } - - .highlight .cp { - color: rgb(126, 255, 163); - } - - .highlight .cpf { - color: rgb(119, 179, 195); - } - - .highlight .c1 { - color: rgb(119, 179, 195); - } - - .highlight .cs { - color: rgb(119, 179, 195); - background-color: rgb(60, 0, 0); - } - - .highlight .gd { - color: rgb(255, 92, 92); - } - - .highlight .gr { - color: rgb(255, 26, 26); - } - - .highlight .gh { - color: rgb(127, 174, 255); - } - - .highlight .gi { - color: rgb(92, 255, 92); - } - - .highlight .go { - color: rgb(200, 195, 188); - } - - .highlight .gp { - color: rgb(246, 147, 68); - } - - .highlight .gu { - color: rgb(255, 114, 255); - } - - .highlight .gt { - color: rgb(71, 160, 255); - } - - .highlight .kc { - color: rgb(126, 255, 163); - } - - .highlight .kd { - color: rgb(126, 255, 163); - } - - .highlight .kn { - color: rgb(126, 255, 163); - } - - .highlight .kp { - color: rgb(126, 255, 163); - } - - .highlight .kr { - color: rgb(126, 255, 163); - } - - .highlight .kt { - color: rgb(255, 137, 103); - } - - .highlight .m { - color: rgb(125, 222, 174); - } - - .highlight .s { - color: rgb(123, 166, 202); - } - - .highlight .na { - color: rgb(123, 166, 202); - } - - .highlight .nb { - color: rgb(126, 255, 163); - } - - .highlight .nc { - color: rgb(81, 194, 242); - } - - .highlight .no { - color: rgb(103, 177, 215); - } - - .highlight .nd { - color: rgb(178, 172, 162); - } - - .highlight .ni { - color: rgb(217, 100, 73); - } - - .highlight .ne { - color: rgb(126, 255, 163); - } - - .highlight .nf { - color: rgb(131, 186, 249); - } - - .highlight .nl { - color: rgb(137, 193, 255); - } - - .highlight .nn { - color: rgb(81, 194, 242); - } - - .highlight .nt { - color: rgb(138, 191, 249); - } - - .highlight .nv { - color: rgb(190, 103, 215); - } - - .highlight .ow { - color: rgb(126, 255, 163); - } - - .highlight .w { - color: rgb(189, 183, 175); - } - - .highlight .mb { - color: rgb(125, 222, 174); - } - - .highlight .mf { - color: rgb(125, 222, 174); - } - - .highlight .mh { - color: rgb(125, 222, 174); - } - - .highlight .mi { - color: rgb(125, 222, 174); - } - - .highlight .mo { - color: rgb(125, 222, 174); - } - - .highlight .sa { - color: rgb(123, 166, 202); - } - - .highlight .sb { - color: rgb(123, 166, 202); - } - - .highlight .sc { - color: rgb(123, 166, 202); - } - - .highlight .dl { - color: rgb(123, 166, 202); - } - - .highlight .sd { - color: rgb(123, 166, 202); - } - - .highlight .s2 { - color: rgb(123, 166, 202); - } - - .highlight .se { - color: rgb(123, 166, 202); - } - - .highlight .sh { - color: rgb(123, 166, 202); - } - - .highlight .si { - color: rgb(117, 168, 209); - } - - .highlight .sx { - color: rgb(246, 147, 68); - } - - .highlight .sr { - color: rgb(133, 182, 224); - } - - .highlight .s1 { - color: rgb(123, 166, 202); - } - - .highlight .ss { - color: rgb(188, 230, 128); - } - - .highlight .bp { - color: rgb(126, 255, 163); - } - - .highlight .fm { - color: rgb(131, 186, 249); - } - - .highlight .vc { - color: rgb(190, 103, 215); - } - - .highlight .vg { - color: rgb(190, 103, 215); - } - - .highlight .vi { - color: rgb(190, 103, 215); - } - - .highlight .vm { - color: rgb(190, 103, 215); - } - - .highlight .il { - color: rgb(125, 222, 174); - } - - .rst-other-versions a { - border-color: initial; - } - - .ethical-sidebar .ethical-image-link, - .ethical-footer .ethical-image-link { - border-color: initial; - } - - .ethical-sidebar, - .ethical-footer { - background-color: rgb(34, 36, 38); - border-color: rgb(62, 68, 70); - color: rgb(226, 223, 219); - } - - .ethical-sidebar ul { - list-style-image: initial; - } - - .ethical-sidebar ul li { - background-color: rgb(5, 77, 121); - color: rgb(232, 230, 227); - } - - .ethical-sidebar a, - .ethical-sidebar a:visited, - .ethical-sidebar a:hover, - .ethical-sidebar a:active, - .ethical-footer a, - .ethical-footer a:visited, - .ethical-footer a:hover, - .ethical-footer a:active { - color: rgb(226, 223, 219); - text-decoration-color: initial !important; - border-bottom-color: initial !important; - } - - .ethical-callout a { - color: rgb(161, 153, 141) !important; - text-decoration-color: initial !important; - } - - .ethical-fixedfooter { - background-color: rgb(34, 36, 38); - border-top-color: rgb(66, 72, 74); - color: rgb(192, 186, 178); - } - - .ethical-fixedfooter .ethical-text::before { - background-color: rgb(61, 140, 64); - color: rgb(232, 230, 227); - } - - .ethical-fixedfooter .ethical-callout { - color: rgb(168, 160, 149); - } - - .ethical-fixedfooter a, - .ethical-fixedfooter a:hover, - .ethical-fixedfooter a:active, - .ethical-fixedfooter a:visited { - color: rgb(192, 186, 178); - text-decoration-color: initial; - } - - .ethical-rtd .ethical-sidebar { - color: rgb(184, 178, 169); - } - - .ethical-alabaster a.ethical-image-link { - border-color: initial !important; - } - - .ethical-dark-theme .ethical-sidebar { - background-color: rgb(58, 62, 65); - border-color: rgb(75, 81, 84); - color: rgb(193, 188, 180) !important; - } - - .ethical-dark-theme a, - .ethical-dark-theme a:visited { - color: rgb(216, 213, 208) !important; - border-bottom-color: initial !important; - } - - .ethical-dark-theme .ethical-callout a { - color: rgb(184, 178, 169) !important; - } - - .keep-us-sustainable { - border-color: rgb(87, 133, 38); - } - - .keep-us-sustainable a, - .keep-us-sustainable a:hover, - .keep-us-sustainable a:visited { - text-decoration-color: initial; - } - - .wy-body-for-nav .keep-us-sustainable { - color: rgb(184, 178, 169); - } - - .wy-body-for-nav .keep-us-sustainable a { - color: rgb(222, 219, 215); - } - - /* For black-on-white/transparent images at handbook/text-anchors.html */ - #text-anchors img { - filter: invert(1) brightness(0.85) hue-rotate(-60deg); - } -} diff --git a/docs/resources/css/light.css b/docs/resources/css/light.css deleted file mode 100644 index 04edd7b..0000000 --- a/docs/resources/css/light.css +++ /dev/null @@ -1,8 +0,0 @@ -@media (prefers-color-scheme: light) { - - .wy-menu-vertical li.toctree-l2.current a, - .wy-menu-vertical li.toctree-l3.current a { - background-color: #c9c9c9; - } - -} diff --git a/docs/resources/css/styles.css b/docs/resources/css/styles.css deleted file mode 100644 index 62f995e..0000000 --- a/docs/resources/css/styles.css +++ /dev/null @@ -1,12 +0,0 @@ -th p { - margin-bottom: 0; -} - -.rst-content tr .line-block { - font-size: 1rem; - margin-bottom: 0; -} - -.wy-nav-content { - max-width: 80% !important; -} diff --git a/docs/resources/js/script.js b/docs/resources/js/script.js deleted file mode 100644 index 5cb6494..0000000 --- a/docs/resources/js/script.js +++ /dev/null @@ -1,58 +0,0 @@ -jQuery(document).ready(function ($) { - setTimeout(function () { - var sectionID = 'base'; - var search = function ($section, $sidebarItem) { - $section.children('.section, .function, .method').each(function () { - if ($(this).hasClass('section')) { - sectionID = $(this).attr('id'); - search($(this), $sidebarItem.parent().find('[href="#'+sectionID+'"]')); - } else { - var $dt = $(this).children('dt'); - var id = $dt.attr('id'); - if (id === undefined) { - return; - } - - var $functionsUL = $sidebarItem.siblings('[data-sectionID='+sectionID+']'); - if (!$functionsUL.length) { - $functionsUL = $('