-
Notifications
You must be signed in to change notification settings - Fork 22
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[cm] Custom Neutone exceptions #58
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,6 +7,7 @@ | |
import torch | ||
from torch.autograd.profiler import record_function | ||
from neutone_sdk import constants | ||
from neutone_sdk.exceptions import INFERENCE_MODE_EXCEPTION | ||
from neutone_sdk.sqw import SampleQueueWrapper | ||
from neutone_sdk.utils import load_neutone_model, model_to_torchscript | ||
import numpy as np | ||
|
@@ -90,44 +91,47 @@ def benchmark_speed_( | |
np.set_printoptions(precision=3) | ||
torch.set_num_threads(num_threads) | ||
torch.set_num_interop_threads(num_interop_threads) | ||
with torch.no_grad(): | ||
m, _ = load_neutone_model(model_file) | ||
log.info( | ||
f"Running benchmark for buffer sizes {buffer_size} and sample rates {sample_rate}. Outliers will be removed from the calculation of mean and std and displayed separately if existing." | ||
) | ||
for sr, bs in itertools.product(sample_rate, buffer_size): | ||
m.set_daw_sample_rate_and_buffer_size(sr, bs) | ||
for _ in range(n_iters): # Warmup | ||
m.forward(torch.rand((daw_n_ch, bs))) | ||
m.reset() | ||
try: | ||
with torch.inference_mode(): | ||
m, _ = load_neutone_model(model_file) | ||
log.info( | ||
f"Running benchmark for buffer sizes {buffer_size} and sample rates {sample_rate}. Outliers will be removed from the calculation of mean and std and displayed separately if existing." | ||
) | ||
for sr, bs in itertools.product(sample_rate, buffer_size): | ||
m.set_daw_sample_rate_and_buffer_size(sr, bs) | ||
for _ in range(n_iters): # Warmup | ||
m.forward(torch.rand((daw_n_ch, bs))) | ||
m.reset() | ||
|
||
# Pregenerate random buffers to more accurately benchmark the model itself | ||
def get_random_buffer_generator(): | ||
buffers = torch.rand(100, daw_n_ch, bs) | ||
i = 0 | ||
# Pregenerate random buffers to more accurately benchmark the model itself | ||
def get_random_buffer_generator(): | ||
buffers = torch.rand(100, daw_n_ch, bs) | ||
i = 0 | ||
|
||
def return_next_random_buffer(): | ||
nonlocal i | ||
i = (i + 1) % 100 | ||
return buffers[i] | ||
def return_next_random_buffer(): | ||
nonlocal i | ||
i = (i + 1) % 100 | ||
return buffers[i] | ||
|
||
return return_next_random_buffer | ||
return return_next_random_buffer | ||
|
||
rbg = get_random_buffer_generator() | ||
rbg = get_random_buffer_generator() | ||
|
||
durations = np.array( | ||
timeit.repeat(lambda: m.forward(rbg()), repeat=repeat, number=n_iters) | ||
) | ||
m.reset() | ||
mean, std = np.mean(durations), np.std(durations) | ||
outlier_mask = np.abs(durations - mean) > 2 * std | ||
outliers = durations[outlier_mask] | ||
# Remove outliers from general benchmark | ||
durations = durations[~outlier_mask] | ||
mean, std = np.mean(durations), np.std(durations) | ||
log.info( | ||
f"Sample rate: {sr: 6} | Buffer size: {bs: 6} | duration: {mean: 6.3f}±{std:.3f} | 1/RTF: {bs/(mean/n_iters*sr): 6.3f} | Outliers: {outliers[:3]}" | ||
) | ||
durations = np.array( | ||
timeit.repeat(lambda: m.forward(rbg()), repeat=repeat, number=n_iters) | ||
) | ||
m.reset() | ||
mean, std = np.mean(durations), np.std(durations) | ||
outlier_mask = np.abs(durations - mean) > 2 * std | ||
outliers = durations[outlier_mask] | ||
# Remove outliers from general benchmark | ||
durations = durations[~outlier_mask] | ||
mean, std = np.mean(durations), np.std(durations) | ||
log.info( | ||
f"Sample rate: {sr: 6} | Buffer size: {bs: 6} | duration: {mean: 6.3f}±{std:.3f} | 1/RTF: {bs/(mean/n_iters*sr): 6.3f} | Outliers: {outliers[:3]}" | ||
) | ||
except RuntimeError as e: | ||
INFERENCE_MODE_EXCEPTION.raise_if_triggered(e) | ||
|
||
|
||
@cli.command() | ||
|
@@ -163,7 +167,7 @@ def benchmark_latency_( | |
log.info(f"Native buffer sizes: {nbs[:10]}, Native sample rates: {nsr[:10]}") | ||
if len(nbs) > 10 or len(nsr) > 10: | ||
log.info(f"Showing only the first 10 values in case there are more.") | ||
with torch.no_grad(): | ||
with torch.inference_mode(): | ||
delays = [] | ||
for sr, bs in itertools.product(sample_rate, buffer_size): | ||
m.set_daw_sample_rate_and_buffer_size(sr, bs) | ||
|
@@ -212,34 +216,36 @@ def profile_sqw( | |
sqw.prepare_for_inference() | ||
if convert_to_torchscript: | ||
log.info("Converting to TorchScript") | ||
with torch.no_grad(): | ||
with torch.inference_mode(): | ||
sqw = model_to_torchscript(sqw, freeze=False, optimize=False) | ||
|
||
with torch.inference_mode(): | ||
with torch.profiler.profile( | ||
activities=[torch.profiler.ProfilerActivity.CPU], | ||
with_stack=True, | ||
profile_memory=True, | ||
record_shapes=False, | ||
) as prof: | ||
with record_function("forward"): | ||
for audio_buff, param_buff in tqdm(zip(audio_buffers, param_buffers)): | ||
out_buff = sqw.forward(audio_buff, param_buff) | ||
try: | ||
with torch.inference_mode(): | ||
with torch.profiler.profile( | ||
activities=[torch.profiler.ProfilerActivity.CPU], | ||
with_stack=True, | ||
profile_memory=True, | ||
record_shapes=False, | ||
) as prof: | ||
with record_function("forward"): | ||
for audio_buff, param_buff in tqdm(zip(audio_buffers, param_buffers)): | ||
out_buff = sqw.forward(audio_buff, param_buff) | ||
|
||
log.info("Displaying Total CPU Time") | ||
log.info(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) | ||
# log.info(prof.key_averages(group_by_stack_n=5).table(sort_by="cpu_time_total", row_limit=10)) | ||
log.info("Displaying CPU Memory Usage") | ||
log.info( | ||
prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10) | ||
) | ||
log.info("Displaying Grouped CPU Memory Usage") | ||
log.info( | ||
prof.key_averages(group_by_stack_n=5).table( | ||
sort_by="self_cpu_memory_usage", row_limit=5 | ||
log.info("Displaying Total CPU Time") | ||
log.info(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) | ||
# log.info(prof.key_averages(group_by_stack_n=5).table(sort_by="cpu_time_total", row_limit=10)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you remove this? |
||
log.info("Displaying CPU Memory Usage") | ||
log.info( | ||
prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10) | ||
) | ||
) | ||
|
||
log.info("Displaying Grouped CPU Memory Usage") | ||
log.info( | ||
prof.key_averages(group_by_stack_n=5).table( | ||
sort_by="self_cpu_memory_usage", row_limit=5 | ||
) | ||
) | ||
except RuntimeError as e: | ||
INFERENCE_MODE_EXCEPTION.raise_if_triggered(e) | ||
|
||
@cli.command() | ||
@click.option("--model_file", help="Path to model file") | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
import logging | ||
import os | ||
|
||
logging.basicConfig() | ||
log = logging.getLogger(__name__) | ||
log.setLevel(level=os.environ.get("LOGLEVEL", "INFO")) | ||
|
||
|
||
class NeutoneException(Exception): | ||
""" | ||
Custom exception class for Neutone. This is used to wrap other exceptions with more | ||
information and tips when other, more cryptic exceptions are raised. | ||
""" | ||
def __init__(self, message: str, trigger_type: type[Exception], trigger_str: str): | ||
""" | ||
Args: | ||
message: The message to display when this exception is raised. | ||
trigger_type: The type of exception that triggers this exception. | ||
trigger_str: Text that must be in the message of the trigger exception. | ||
""" | ||
super().__init__(message) | ||
self.trigger_type = trigger_type | ||
self.trigger_str = trigger_str | ||
|
||
def raise_if_triggered(self, orig_exception: Exception) -> None: | ||
""" | ||
Raises this exception from the original exception (still includes the stack | ||
trace and information of the original exception) if it is of the trigger type | ||
and contains the trigger string in its message. Otherwise, raises the original | ||
exception. | ||
""" | ||
if (isinstance(orig_exception, self.trigger_type) | ||
and self.trigger_str in str(orig_exception)): | ||
raise self from orig_exception | ||
else: | ||
raise orig_exception | ||
|
||
|
||
# TODO(cm): constant for now, but if we need more of these we could use a factory method | ||
INFERENCE_MODE_EXCEPTION = NeutoneException( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could this be a list of possible exceptions that you iterate through it in the try/catch block? |
||
message=""" | ||
Your model does not support inference mode. Ensure you are not calling forward on | ||
your model before wrapping it or saving it using `save_neutone_model()`. Also, try | ||
to make sure that you are not creating new tensors in the forward call of your | ||
model, instead pre-allocate them in the constructor. If these suggestions fail, try | ||
creating and saving your model entirely inside of a `with torch.inference_mode():` | ||
block. | ||
""", | ||
trigger_type=RuntimeError, | ||
trigger_str="Inference tensors cannot be saved for backward." | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I am a bit worried about:
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Agreed, I used only a snippet of the text, but it's probably better to change it to use almost the entire text. I think it would also make sense to change this to a list of strings that can each trigger the exception such that if the text is slightly different in older pytorch versions we can simply add those messages to the list. |
||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -112,7 +112,7 @@ def save_neutone_model( | |
|
||
sqw = SampleQueueWrapper(model) | ||
|
||
with tr.no_grad(): | ||
with tr.inference_mode(): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do we need this |
||
log.info("Converting model to torchscript...") | ||
script = model_to_torchscript(sqw, freeze=freeze, optimize=optimize) | ||
|
||
|
@@ -131,8 +131,8 @@ def save_neutone_model( | |
with open(root_dir / "metadata.json", "w") as f: | ||
json.dump(metadata, f, indent=4) | ||
|
||
log.info("Running model on audio samples...") | ||
if audio_sample_pairs is None: | ||
log.info("Running model on default audio samples...") | ||
input_samples = get_default_audio_samples() | ||
audio_sample_pairs = [] | ||
for input_sample in input_samples: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Also similar for this one, do we actually need it?