diff --git a/backend/src/nodes/impl/pytorch/utils.py b/backend/src/nodes/impl/pytorch/utils.py
index 35552b770e..74f13405a6 100644
--- a/backend/src/nodes/impl/pytorch/utils.py
+++ b/backend/src/nodes/impl/pytorch/utils.py
@@ -37,6 +37,7 @@ def to_pytorch_execution_options(options: ExecutionOptions):
onnx_should_tensorrt_cache=options.onnx_should_tensorrt_cache,
onnx_tensorrt_cache_path=options.onnx_tensorrt_cache_path,
onnx_should_tensorrt_fp16=options.onnx_should_tensorrt_fp16,
+ reserved_system_memory=options.reserved_system_memory,
)
diff --git a/backend/src/nodes/utils/exec_options.py b/backend/src/nodes/utils/exec_options.py
index fb25ae2b04..b1139e74d3 100644
--- a/backend/src/nodes/utils/exec_options.py
+++ b/backend/src/nodes/utils/exec_options.py
@@ -16,6 +16,7 @@ def __init__(
onnx_should_tensorrt_cache: bool,
onnx_tensorrt_cache_path: str,
onnx_should_tensorrt_fp16: bool,
+ reserved_system_memory: int,
) -> None:
self.__device = device
self.__fp16 = fp16
@@ -26,6 +27,7 @@ def __init__(
self.__onnx_should_tensorrt_cache = onnx_should_tensorrt_cache
self.__onnx_tensorrt_cache_path = onnx_tensorrt_cache_path
self.__onnx_should_tensorrt_fp16 = onnx_should_tensorrt_fp16
+ self.__reserved_system_memory = reserved_system_memory
if (
not os.path.exists(onnx_tensorrt_cache_path)
@@ -34,7 +36,14 @@ def __init__(
os.makedirs(onnx_tensorrt_cache_path)
logger.debug(
- f"PyTorch execution options: fp16: {fp16}, device: {self.full_device} | NCNN execution options: gpu_index: {ncnn_gpu_index} | ONNX execution options: gpu_index: {onnx_gpu_index}, execution_provider: {onnx_execution_provider}, should_tensorrt_cache: {onnx_should_tensorrt_cache}, tensorrt_cache_path: {onnx_tensorrt_cache_path}, should_tensorrt_fp16: {onnx_should_tensorrt_fp16}"
+ f"PyTorch execution options: fp16: {fp16}, device: {self.full_device} |"
+ f" NCNN execution options: gpu_index: {ncnn_gpu_index} | ONNX execution"
+ f" options: gpu_index: {onnx_gpu_index}, execution_provider:"
+ f" {onnx_execution_provider}, should_tensorrt_cache:"
+ f" {onnx_should_tensorrt_cache}, tensorrt_cache_path:"
+ f" {onnx_tensorrt_cache_path}, should_tensorrt_fp16:"
+ f" {onnx_should_tensorrt_fp16}, reserved_system_memory:"
+ f" {reserved_system_memory}"
)
@property
@@ -75,9 +84,13 @@ def onnx_tensorrt_cache_path(self):
def onnx_should_tensorrt_fp16(self):
return self.__onnx_should_tensorrt_fp16
+ @property
+ def reserved_system_memory(self):
+ return self.__reserved_system_memory
+
__global_exec_options = ExecutionOptions(
- "cpu", False, 0, 0, 0, "CPUExecutionProvider", False, "", False
+ "cpu", False, 0, 0, 0, "CPUExecutionProvider", False, "", False, 1024
)
@@ -102,6 +115,7 @@ class JsonExecutionOptions(TypedDict):
onnxShouldTensorRtCache: bool
onnxTensorRtCachePath: str
onnxShouldTensorRtFp16: bool
+ reservedSystemMemory: int
def parse_execution_options(json: JsonExecutionOptions) -> ExecutionOptions:
@@ -115,4 +129,5 @@ def parse_execution_options(json: JsonExecutionOptions) -> ExecutionOptions:
onnx_should_tensorrt_cache=json["onnxShouldTensorRtCache"],
onnx_tensorrt_cache_path=json["onnxTensorRtCachePath"],
onnx_should_tensorrt_fp16=json["onnxShouldTensorRtFp16"],
+ reserved_system_memory=json["reservedSystemMemory"],
)
diff --git a/backend/src/packages/chaiNNer_pytorch/pytorch/processing/upscale_image.py b/backend/src/packages/chaiNNer_pytorch/pytorch/processing/upscale_image.py
index 75b4ff79b3..58fa854212 100644
--- a/backend/src/packages/chaiNNer_pytorch/pytorch/processing/upscale_image.py
+++ b/backend/src/packages/chaiNNer_pytorch/pytorch/processing/upscale_image.py
@@ -4,6 +4,7 @@
import numpy as np
import torch
+import psutil
from sanic.log import logger
from nodes.impl.pytorch.auto_split import pytorch_auto_split
@@ -21,6 +22,9 @@
from nodes.utils.exec_options import ExecutionOptions, get_execution_options
from nodes.utils.utils import get_h_w_c
+from system import is_arm_mac
+
+
from .. import processing_group
@@ -39,13 +43,28 @@ def upscale(
device = torch.device(options.full_device)
def estimate():
+ element_size = 2 if use_fp16 else 4
+ model_bytes = sum(p.numel() * element_size for p in model.parameters())
+
if "cuda" in options.full_device:
mem_info: Tuple[int, int] = torch.cuda.mem_get_info(device) # type: ignore
free, _total = mem_info
- element_size = 2 if use_fp16 else 4
- model_bytes = sum(p.numel() * element_size for p in model.parameters())
budget = int(free * 0.8)
+ return MaxTileSize(
+ estimate_tile_size(
+ budget,
+ model_bytes,
+ img,
+ element_size,
+ )
+ )
+
+ if is_arm_mac:
+ total_memory = psutil.virtual_memory().total
+ reserved_system_memory = options.reserved_system_memory * (1024**2)
+ budget = int(total_memory - reserved_system_memory)
+
return MaxTileSize(
estimate_tile_size(
budget,
@@ -71,8 +90,10 @@ def estimate():
@processing_group.register(
schema_id="chainner:pytorch:upscale_image",
name="Upscale Image",
- description="Upscales an image using a PyTorch Super-Resolution model. \
- Select a manual number of tiles if you are having issues with the automatic mode. ",
+ description=(
+ "Upscales an image using a PyTorch Super-Resolution model. Select a"
+ " manual number of tiles if you are having issues with the automatic mode. "
+ ),
icon="PyTorch",
inputs=[
ImageInput().with_id(1),
@@ -80,10 +101,16 @@ def estimate():
TileSizeDropdown()
.with_id(2)
.with_docs(
- "Tiled upscaling is used to allow large images to be upscaled without hitting memory limits.",
- "This works by splitting the image into tiles (with overlap), upscaling each tile individually, and seamlessly recombining them.",
- "Generally it's recommended to use the largest tile size possible for best performance (with the ideal scenario being no tiling at all), but depending on the model and image size, this may not be possible.",
- "If you are having issues with the automatic mode, you can manually select a tile size. Sometimes, a manually selected tile size may be faster than what the automatic mode picks.",
+ "Tiled upscaling is used to allow large images to be upscaled without"
+ " hitting memory limits.",
+ "This works by splitting the image into tiles (with overlap), upscaling"
+ " each tile individually, and seamlessly recombining them.",
+ "Generally it's recommended to use the largest tile size possible for best"
+ " performance (with the ideal scenario being no tiling at all), but"
+ " depending on the model and image size, this may not be possible.",
+ "If you are having issues with the automatic mode, you can manually select"
+ " a tile size. Sometimes, a manually selected tile size may be faster than"
+ " what the automatic mode picks.",
hint=True,
),
],
@@ -111,7 +138,8 @@ def upscale_image_node(
scale = model.scale
h, w, c = get_h_w_c(img)
logger.debug(
- f"Upscaling a {h}x{w}x{c} image with a {scale}x model (in_nc: {in_nc}, out_nc: {out_nc})"
+ f"Upscaling a {h}x{w}x{c} image with a {scale}x model (in_nc: {in_nc}, out_nc:"
+ f" {out_nc})"
)
return convenient_upscale(
diff --git a/src/common/Backend.ts b/src/common/Backend.ts
index 4d58bc1c00..47a587eadb 100644
--- a/src/common/Backend.ts
+++ b/src/common/Backend.ts
@@ -77,6 +77,7 @@ export interface BackendExecutionOptions {
onnxShouldTensorRtCache: boolean;
onnxTensorRtCachePath: string;
onnxShouldTensorRtFp16: boolean;
+ reservedSystemMemory: number;
}
export interface BackendRunRequest {
data: BackendJsonNode[];
diff --git a/src/common/env.ts b/src/common/env.ts
index ace19241f5..03e25afd65 100644
--- a/src/common/env.ts
+++ b/src/common/env.ts
@@ -31,3 +31,5 @@ export const sanitizedEnv = env;
export const getOnnxTensorRtCacheLocation = (userDataPath: string) => {
return path.join(userDataPath, 'onnx-tensorrt-cache');
};
+
+export const totalMemory = os.totalmem();
diff --git a/src/main/cli/run.ts b/src/main/cli/run.ts
index dcc4c710d5..95d93da189 100644
--- a/src/main/cli/run.ts
+++ b/src/main/cli/run.ts
@@ -138,6 +138,7 @@ const getExecutionOptions = (): BackendExecutionOptions => {
onnxShouldTensorRtCache: getSetting('onnx-should-tensorrt-cache', false),
onnxTensorRtCachePath: getOnnxTensorRtCacheLocation(app.getPath('userData')),
onnxShouldTensorRtFp16: getSetting('onnx-should-tensorrt-fp16', false),
+ reservedSystemMemory: getSetting('reserved-system-memory', 0),
};
};
diff --git a/src/renderer/components/SettingsModal.tsx b/src/renderer/components/SettingsModal.tsx
index 08e34d1ea6..456b703253 100644
--- a/src/renderer/components/SettingsModal.tsx
+++ b/src/renderer/components/SettingsModal.tsx
@@ -8,6 +8,7 @@ import {
Input,
InputGroup,
InputLeftElement,
+ InputRightElement,
Modal,
ModalBody,
ModalCloseButton,
@@ -47,7 +48,7 @@ import {
import { BsFillPencilFill, BsPaletteFill } from 'react-icons/bs';
import { FaPython, FaTools } from 'react-icons/fa';
import { useContext } from 'use-context-selector';
-import { getOnnxTensorRtCacheLocation, hasTensorRt, isArmMac } from '../../common/env';
+import { getOnnxTensorRtCacheLocation, hasTensorRt, isArmMac, totalMemory } from '../../common/env';
import { log } from '../../common/log';
import { ipcRenderer } from '../../common/safeIpc';
import { BackendContext } from '../contexts/BackendContext';
@@ -270,7 +271,7 @@ const AppearanceSettings = memo(() => {
});
const EnvironmentSettings = memo(() => {
- const { useStartupTemplate } = useContext(SettingsContext);
+ const { useStartupTemplate, useReservedSystemMemory } = useContext(SettingsContext);
const [startupTemplate, setStartupTemplate] = useStartupTemplate;
@@ -297,11 +298,54 @@ const EnvironmentSettings = memo(() => {
}
}, [startupTemplate, lastDirectory, setStartupTemplate]);
+ const [reservedSystemMemory, setReservedSystemMemory] = useReservedSystemMemory;
+
+ // Maximum amount reserved for the system is 80 % of the total memory
+ const calculateMaxValue = () => (totalMemory / 1024 ** 2) * 0.8;
+ const minValue = 8192;
+
return (
}
w="full"
>
+ {isArmMac ? (
+
+
+ {
+ const value = Number(number);
+
+ if (!Number.isNaN(value)) {
+ setReservedSystemMemory(value);
+ }
+ }}
+ >
+
+ MB
+
+
+
+
+
+
+
+ ) : (
+ []
+ )}
{
// eslint-disable-next-line @typescript-eslint/no-misused-promises
onClick={onButtonClick}
/>
+
+
+ }
+ size="xs"
+ onClick={() => setStartupTemplate('')}
+ />
+
- }
- size="xs"
- onClick={() => setStartupTemplate('')}
- />
Looking for the CPU and FP16 settings? They moved to the Python tab.
diff --git a/src/renderer/contexts/SettingsContext.tsx b/src/renderer/contexts/SettingsContext.tsx
index d812663cee..c092ef0571 100644
--- a/src/renderer/contexts/SettingsContext.tsx
+++ b/src/renderer/contexts/SettingsContext.tsx
@@ -2,6 +2,7 @@ import { useColorMode } from '@chakra-ui/react';
import React, { memo, useEffect } from 'react';
import { createContext } from 'use-context-selector';
import { SchemaId } from '../../common/common-types';
+import { totalMemory } from '../../common/env';
import { GetSetState, SetState } from '../helpers/types';
import { useLocalStorage } from '../hooks/useLocalStorage';
import { useMemoArray, useMemoObject } from '../hooks/useMemo';
@@ -26,6 +27,7 @@ interface Settings {
setSnapToGridAmount: SetState
];
useStartupTemplate: GetSetState;
+ useReservedSystemMemory: GetSetState;
useSelectTheme: GetSetState;
useAnimateChain: GetSetState;
useExperimentalFeatures: GetSetState;
@@ -37,6 +39,8 @@ interface Settings {
useNodeSelectorCollapsed: GetSetState;
}
+const calculatedReservedMemory = () => (totalMemory / 1024 ** 2) * 0.5;
+
// TODO: create context requires default values
export const SettingsContext = createContext>({} as Settings);
@@ -65,6 +69,9 @@ export const SettingsProvider = memo(({ children }: React.PropsWithChildren {
useOnnxExecutionProvider,
useOnnxShouldTensorRtCache,
useOnnxShouldTensorRtFp16,
+ useReservedSystemMemory,
} = useContext(SettingsContext);
const [isCpu] = useIsCpu;
@@ -37,6 +38,7 @@ export const useBackendExecutionOptions = (): BackendExecutionOptions => {
}, []);
const [onnxShouldTensorRtFp16] = useOnnxShouldTensorRtFp16;
+ const [reservedSystemMemory] = useReservedSystemMemory;
return {
isCpu,
@@ -48,5 +50,6 @@ export const useBackendExecutionOptions = (): BackendExecutionOptions => {
onnxShouldTensorRtCache,
onnxTensorRtCachePath,
onnxShouldTensorRtFp16,
+ reservedSystemMemory,
};
};