From 4e9eddba0a1d85a8e19e6a0360139ef7cd2d8f58 Mon Sep 17 00:00:00 2001 From: Thomas Polasek Date: Fri, 11 Oct 2024 13:04:09 -0700 Subject: [PATCH] Convert directory fbcode/deeplearning to use the Ruff Formatter Summary: X-link: https://github.com/flashlight/wav2letter/pull/1024 X-link: https://github.com/flashlight/text/pull/88 X-link: https://github.com/flashlight/flashlight/pull/1176 X-link: https://github.com/pytorch/FBGEMM/pull/3242 Converts the directory specified to use the Ruff formatter in pyfmt ruff_dog If this diff causes merge conflicts when rebasing, please run `hg status -n -0 --change . -I '**/*.{py,pyi}' | xargs -0 arc pyfmt` on your diff, and amend any changes before rebasing onto latest. That should help reduce or eliminate any merge conflicts. allow-large-files bypass-github-export-checks Differential Revision: D63766623 --- benchmarks/models.py | 1 - benchmarks/profiler.py | 1 - crypten/communicator/communicator.py | 1 - .../communicator/in_process_communicator.py | 1 - crypten/cryptensor.py | 11 ++------ crypten/gradients.py | 11 -------- crypten/mpc/primitives/arithmetic.py | 18 ++++++------- crypten/mpc/primitives/binary.py | 12 ++++----- crypten/mpc/primitives/converters.py | 1 - crypten/mpc/provider/ttp_provider.py | 1 - crypten/nn/module.py | 12 ++------- crypten/nn/onnx_converter.py | 25 +++++++++---------- crypten/nn/tensorboard.py | 1 - examples/bandits/launcher.py | 1 - examples/bandits/plain_contextual_bandits.py | 1 - .../bandits/private_contextual_bandits.py | 1 - examples/meters.py | 1 - examples/mpc_autograd_cnn/mpc_autograd_cnn.py | 1 - examples/mpc_cifar/mpc_cifar.py | 1 - examples/mpc_imagenet/mpc_imagenet.py | 1 - examples/multiprocess_launcher.py | 1 - examples/util.py | 1 - pycon-workshop-2020/multiprocess_launcher.py | 1 - test/test_arithmetic.py | 1 - test/test_autograd.py | 8 ------ test/test_binary.py | 1 - test/test_cuda.py | 1 - test/test_gradients.py | 2 -- test/test_mpc.py | 2 -- test/test_nn.py | 12 --------- test/test_tensorboard.py | 1 - 31 files changed, 31 insertions(+), 103 deletions(-) diff --git a/benchmarks/models.py b/benchmarks/models.py index 4cacc8d5..4db863ea 100644 --- a/benchmarks/models.py +++ b/benchmarks/models.py @@ -8,7 +8,6 @@ Contains models used for benchmarking """ - from dataclasses import dataclass from typing import Any diff --git a/benchmarks/profiler.py b/benchmarks/profiler.py index f3a82001..ba509719 100644 --- a/benchmarks/profiler.py +++ b/benchmarks/profiler.py @@ -10,7 +10,6 @@ Run via Jupyter """ - from benchmark import ModelBenchmarks diff --git a/crypten/communicator/communicator.py b/crypten/communicator/communicator.py index 199c9c4d..5ede390a 100644 --- a/crypten/communicator/communicator.py +++ b/crypten/communicator/communicator.py @@ -162,7 +162,6 @@ def _logging(func): @wraps(func) def logging_wrapper(self, *args, **kwargs): - # TODO: Replace this # - hacks the inputs into some of the functions for world_size 1: world_size = self.get_world_size() diff --git a/crypten/communicator/in_process_communicator.py b/crypten/communicator/in_process_communicator.py index 6ab6ef65..ca4bb3cd 100644 --- a/crypten/communicator/in_process_communicator.py +++ b/crypten/communicator/in_process_communicator.py @@ -17,7 +17,6 @@ class InProcessCommunicator(Communicator): - BYTES_PER_ELEMENT = 8 tls = threading.local() mailbox = None diff --git a/crypten/cryptensor.py b/crypten/cryptensor.py index e9aef2f5..4f71d7d6 100644 --- a/crypten/cryptensor.py +++ b/crypten/cryptensor.py @@ -78,16 +78,12 @@ def register_cryptensor_cls(cls): if name in CrypTensor.__CRYPTENSOR_TYPES__: raise ValueError( "Cannot register duplicate CrypTensor type: \ - tensor type {} already exists.".format( - name - ) + tensor type {} already exists.".format(name) ) if not issubclass(cls, CrypTensor): raise ValueError( "Registered tensor ({}: {}) must extend \ - CrypTensor".format( - name, cls.__name__ - ) + CrypTensor".format(name, cls.__name__) ) CrypTensor.__CRYPTENSOR_TYPES__[name] = cls return cls @@ -222,7 +218,6 @@ def backward(self, grad_input=None, top_node=True): """ if self.requires_grad: with CrypTensor.no_grad(): # disable autograd for backward pass - # in initial backward call, identify all required nodes: if top_node: self._identify_required_grads() @@ -302,7 +297,6 @@ def __torch_function__(self, func, types, args=(), kwargs=None): ) def _get_forward_function_no_ctx(self, grad_fn): - # determine if self is a dummy object (the case for staticmethods): is_dummy = getattr(self, "__IS_DUMMY__", False) @@ -320,7 +314,6 @@ def autograd_forward_no_ctx(*args, **kwargs): return autograd_forward_no_ctx def _get_autograd_forward_function(self, name, grad_fn, in_place): - # determine if self is a dummy object (the case for staticmethods): is_dummy = getattr(self, "__IS_DUMMY__", False) diff --git a/crypten/gradients.py b/crypten/gradients.py index 9fd1a020..58b19daa 100644 --- a/crypten/gradients.py +++ b/crypten/gradients.py @@ -408,7 +408,6 @@ def backward(ctx, grad_output): class AutogradSqueeze(AutogradFunction): @staticmethod def forward(ctx, *args, **kwargs): - # preprocess inputs: assert len(args) >= 1 if len(args) == 1: @@ -497,7 +496,6 @@ def backward(ctx, grad_output): class AutogradDropout(AutogradFunction): @staticmethod def forward(ctx, input, p=0.5, training=True, inplace=False): - if training and inplace: logging.warning( "CrypTen dropout does not support inplace computation during training." @@ -534,7 +532,6 @@ def backward(ctx, grad_output): class AutogradFeatureDropout(AutogradFunction): @staticmethod def forward(ctx, input, p=0.5, training=True, inplace=False): - if training and inplace: logging.warning( "CrypTen _feature_dropout does not support inplace computation during training." @@ -1133,7 +1130,6 @@ def backward(ctx, grad_output): class AutogradSum(AutogradFunction): @staticmethod def forward(ctx, *args, **kwargs): - # preprocess inputs: assert len(args) >= 1 if len(args) == 1: @@ -1192,7 +1188,6 @@ def backward(ctx, grad_output): class AutogradMean(AutogradFunction): @staticmethod def forward(ctx, *args, **kwargs): - # preprocess inputs: assert len(args) >= 1 if len(args) == 1: @@ -1230,7 +1225,6 @@ def backward(ctx, grad_output): class AutogradVariance(AutogradFunction): @staticmethod def forward(ctx, self, *args, **kwargs): - # preprocess inputs: if len(args) == 0: dim = None @@ -1287,7 +1281,6 @@ def backward(ctx, grad_output): class AutogradMin(AutogradFunction): @staticmethod def forward(ctx, *args, **kwargs): - # preprocess inputs: assert len(args) >= 1 if len(args) == 1: @@ -1335,7 +1328,6 @@ def backward(ctx, grad_output): class AutogradMax(AutogradFunction): @staticmethod def forward(ctx, *args, **kwargs): - # preprocess inputs: assert len(args) >= 1 if len(args) == 1: @@ -1453,7 +1445,6 @@ def backward(ctx, grad_output): class AutogradAvgPool2D(AutogradFunction): @staticmethod def forward(ctx, input, kernel_size, stride=None, padding=0, ceil_mode=False): - # preprocess inputs: if stride is None: stride = kernel_size @@ -1528,7 +1519,6 @@ def forward( ceil_mode=False, return_indices=False, ): - # preprocess inputs: if stride is None: stride = kernel_size @@ -1887,7 +1877,6 @@ def backward(ctx, grad_output): grad_output = grad_output.mul(weight) grad_input = grad_output.mul(inv_var) if training: - # compute gradient term that is due to the mean: num_element = reduce( lambda x, y: x * y, [grad_output.size(d) for d in stats_dimensions] diff --git a/crypten/mpc/primitives/arithmetic.py b/crypten/mpc/primitives/arithmetic.py index 89686481..f3f4dabb 100644 --- a/crypten/mpc/primitives/arithmetic.py +++ b/crypten/mpc/primitives/arithmetic.py @@ -223,9 +223,9 @@ def __setitem__(self, index, value): """Set tensor values by index""" if isinstance(value, (int, float)) or is_tensor(value): value = ArithmeticSharedTensor(value) - assert isinstance( - value, ArithmeticSharedTensor - ), "Unsupported input type %s for __setitem__" % type(value) + assert isinstance(value, ArithmeticSharedTensor), ( + "Unsupported input type %s for __setitem__" % type(value) + ) self.share.__setitem__(index, value.share) def pad(self, pad, mode="constant", value=0): @@ -268,9 +268,9 @@ def stack(tensors, *args, **kwargs): for i, tensor in enumerate(tensors): if is_tensor(tensor): tensors[i] = ArithmeticSharedTensor(tensor) - assert isinstance( - tensors[i], ArithmeticSharedTensor - ), "Can't stack %s with ArithmeticSharedTensor" % type(tensor) + assert isinstance(tensors[i], ArithmeticSharedTensor), ( + "Can't stack %s with ArithmeticSharedTensor" % type(tensor) + ) result = tensors[0].shallow_copy() result.share = torch_stack( @@ -630,9 +630,9 @@ def scatter_(self, dim, index, src): """ if is_tensor(src): src = ArithmeticSharedTensor(src) - assert isinstance( - src, ArithmeticSharedTensor - ), "Unrecognized scatter src type: %s" % type(src) + assert isinstance(src, ArithmeticSharedTensor), ( + "Unrecognized scatter src type: %s" % type(src) + ) self.share.scatter_(dim, index, src.share) return self diff --git a/crypten/mpc/primitives/binary.py b/crypten/mpc/primitives/binary.py index d8ace3c9..72379147 100644 --- a/crypten/mpc/primitives/binary.py +++ b/crypten/mpc/primitives/binary.py @@ -318,9 +318,9 @@ def __setitem__(self, index, value): """Set tensor values by index""" if is_tensor(value) or isinstance(value, list): value = BinarySharedTensor(value) - assert isinstance( - value, BinarySharedTensor - ), "Unsupported input type %s for __setitem__" % type(value) + assert isinstance(value, BinarySharedTensor), ( + "Unsupported input type %s for __setitem__" % type(value) + ) self.share.__setitem__(index, value.share) @staticmethod @@ -436,9 +436,9 @@ def scatter_(self, dim, index, src): """ if is_tensor(src): src = BinarySharedTensor(src) - assert isinstance( - src, BinarySharedTensor - ), "Unrecognized scatter src type: %s" % type(src) + assert isinstance(src, BinarySharedTensor), ( + "Unrecognized scatter src type: %s" % type(src) + ) self.share.scatter_(dim, index, src.share) return self diff --git a/crypten/mpc/primitives/converters.py b/crypten/mpc/primitives/converters.py index b46d1628..f94b9fc1 100644 --- a/crypten/mpc/primitives/converters.py +++ b/crypten/mpc/primitives/converters.py @@ -16,7 +16,6 @@ def _A2B(arithmetic_tensor): - # first try memory-inefficient implementation that takes O(log P) rounds: try: binary_tensor = BinarySharedTensor.stack( diff --git a/crypten/mpc/provider/ttp_provider.py b/crypten/mpc/provider/ttp_provider.py index 5a28d5da..9ff5bf31 100644 --- a/crypten/mpc/provider/ttp_provider.py +++ b/crypten/mpc/provider/ttp_provider.py @@ -309,7 +309,6 @@ def _get_binary_PRSS(self, size, bitlength=None, remove_rank=None): return result def additive(self, size0, size1, op, *args, **kwargs): - # Add all shares of `a` and `b` to get plaintext `a` and `b` a = self._get_additive_PRSS(size0) b = self._get_additive_PRSS(size1) diff --git a/crypten/nn/module.py b/crypten/nn/module.py index f2bbeff8..21d0fd5e 100644 --- a/crypten/nn/module.py +++ b/crypten/nn/module.py @@ -470,7 +470,6 @@ def _apply(self, fn): def encrypt(self, mode=True, src=0): """Encrypts the model.""" if mode != self.encrypted: - # encrypt / decrypt parameters: self.encrypted = mode for name, param in self.named_parameters(recurse=False): @@ -705,7 +704,6 @@ def _clear_unused_values(): _mark_as_computed(input_name) node_to_compute = _find_computable_node() while node_to_compute is not None: - # compute output of module: input = [values[name] for name in self._graph[node_to_compute]] if len(input) == 1: @@ -726,8 +724,8 @@ def _clear_unused_values(): assert isinstance( output, tuple ), f"expected outputs {output_names} of {module} to be tuple, not {type(output)}" - assert len(output_names) == len( - output + assert ( + len(output_names) == len(output) ), f"expected {len(output_names)} outputs from {module}, received {len(output)}" for node, value in zip(output_names, output): values[node] = value @@ -1381,7 +1379,6 @@ class Expand(Module): """ def forward(self, x): - # unpack inputs: input, shape = tuple(x) if torch.is_tensor(shape): @@ -1966,7 +1963,6 @@ def __init__(self, stride, padding, dilation, groups=1): self.groups = groups def forward(self, x): - # unpack inputs: if len(x) == 2: x, weight = x @@ -2010,7 +2006,6 @@ def forward(self, x): @staticmethod def from_onnx(attributes=None): - # check attribute inputs: if attributes is None: attributes = {} @@ -2124,7 +2119,6 @@ def __init__( groups=1, bias=True, ): - # check inputs: super().__init__() assert isinstance(stride, int), "stride must be an integer" @@ -2558,7 +2552,6 @@ def forward(self, x): @staticmethod def from_onnx(pool_type, attributes=None): - # check attributes: if attributes is None: attributes = {} @@ -2887,7 +2880,6 @@ def forward(self, input): @staticmethod def from_onnx(parameters=None, attributes=None): - # preprocess all attributes: if parameters is None: parameters = {} diff --git a/crypten/nn/onnx_converter.py b/crypten/nn/onnx_converter.py index cdc11191..9a95a10f 100644 --- a/crypten/nn/onnx_converter.py +++ b/crypten/nn/onnx_converter.py @@ -178,7 +178,6 @@ def _to_crypten(onnx_model): # loop over all nodes: for node in onnx_model.graph.node: - # get attributes and node type: attributes = {attr.name: _get_attribute_value(attr) for attr in node.attribute} crypten_class = _get_operator_class(node.op_type, attributes) @@ -271,21 +270,21 @@ def _update_onnx_symbolic_registry(): for version_key, version_val in sym_registry._registry.items(): for function_key in version_val.keys(): if function_key == "softmax": - sym_registry._registry[version_key][ - function_key - ] = _onnx_crypten_softmax + sym_registry._registry[version_key][function_key] = ( + _onnx_crypten_softmax + ) if function_key == "log_softmax": - sym_registry._registry[version_key][ - function_key - ] = _onnx_crypten_logsoftmax + sym_registry._registry[version_key][function_key] = ( + _onnx_crypten_logsoftmax + ) if function_key == "dropout": - sym_registry._registry[version_key][ - function_key - ] = _onnx_crypten_dropout + sym_registry._registry[version_key][function_key] = ( + _onnx_crypten_dropout + ) if function_key == "feature_dropout": - sym_registry._registry[version_key][ - function_key - ] = _onnx_crypten_feature_dropout + sym_registry._registry[version_key][function_key] = ( + _onnx_crypten_feature_dropout + ) else: # Update ONNX symbolic registry using torch.onnx.register_custom_op_symbolic torch.onnx.register_custom_op_symbolic( diff --git a/crypten/nn/tensorboard.py b/crypten/nn/tensorboard.py index 4ad4aa19..150e820b 100644 --- a/crypten/nn/tensorboard.py +++ b/crypten/nn/tensorboard.py @@ -50,7 +50,6 @@ def graph(model): # loop all graph connections: for output_name, input_names in model._graph.items(): - # get parameters and type of module: module = modules[output_name] op = str(type(module)) diff --git a/examples/bandits/launcher.py b/examples/bandits/launcher.py index 0fcd392d..93b36628 100644 --- a/examples/bandits/launcher.py +++ b/examples/bandits/launcher.py @@ -380,7 +380,6 @@ def build_learner(args, bandits, download_mnist): logging.info("Loading clusters from file...") clusters = torch.load(clusters_file) else: - # load data and allocate clusters: context, _ = load_data( split=args.mnist_split, pca=pca, download_mnist_func=download_mnist diff --git a/examples/bandits/plain_contextual_bandits.py b/examples/bandits/plain_contextual_bandits.py index 9f40343b..aaaaf436 100644 --- a/examples/bandits/plain_contextual_bandits.py +++ b/examples/bandits/plain_contextual_bandits.py @@ -50,7 +50,6 @@ def online_learner( # initialization of model parameters: if idx == 0: - # initialize accumulators for linear least squares: A_inv = torch.stack( [ diff --git a/examples/bandits/private_contextual_bandits.py b/examples/bandits/private_contextual_bandits.py index 24bf97a8..f62e34f7 100644 --- a/examples/bandits/private_contextual_bandits.py +++ b/examples/bandits/private_contextual_bandits.py @@ -62,7 +62,6 @@ def online_learner( # initialization of model parameters: if idx == 0: - # initialize accumulators for linear least squares: A_inv = [torch.eye(num_features).unsqueeze(0) for _ in range(num_arms)] A_inv = crypten.cat([crypten.cryptensor(A) for A in A_inv]) diff --git a/examples/meters.py b/examples/meters.py index f1bd91cc..03ee6773 100644 --- a/examples/meters.py +++ b/examples/meters.py @@ -38,7 +38,6 @@ def reset(self): self.values = [] def add(self, output, ground_truth): - # compute predicted classes (ordered): _, prediction = output.topk(self.maxk, 1, True, True) prediction = prediction.t() diff --git a/examples/mpc_autograd_cnn/mpc_autograd_cnn.py b/examples/mpc_autograd_cnn/mpc_autograd_cnn.py index 75b7a85a..84e445dc 100644 --- a/examples/mpc_autograd_cnn/mpc_autograd_cnn.py +++ b/examples/mpc_autograd_cnn/mpc_autograd_cnn.py @@ -94,7 +94,6 @@ def train_encrypted( print(f"Epoch {epoch} in progress:") for j in range(0, num_samples, batch_size): - # define the start and end of the training mini-batch start, end = j, min(j + batch_size, num_samples) diff --git a/examples/mpc_cifar/mpc_cifar.py b/examples/mpc_cifar/mpc_cifar.py index e3f67b7a..e0f07386 100644 --- a/examples/mpc_cifar/mpc_cifar.py +++ b/examples/mpc_cifar/mpc_cifar.py @@ -153,7 +153,6 @@ def train(train_loader, model, criterion, optimizer, epoch, print_freq=10): end = time.time() for i, (input, target) in enumerate(train_loader): - # compute output output = model(input) loss = criterion(output, target) diff --git a/examples/mpc_imagenet/mpc_imagenet.py b/examples/mpc_imagenet/mpc_imagenet.py index f54674b6..2c5839d9 100644 --- a/examples/mpc_imagenet/mpc_imagenet.py +++ b/examples/mpc_imagenet/mpc_imagenet.py @@ -79,7 +79,6 @@ def run_experiment( # loop over dataset: meter = AccuracyMeter() for idx, sample in enumerate(dataset): - # preprocess sample: image, target = sample image = transform(image) diff --git a/examples/multiprocess_launcher.py b/examples/multiprocess_launcher.py index 259da7a5..9b1a151e 100644 --- a/examples/multiprocess_launcher.py +++ b/examples/multiprocess_launcher.py @@ -14,7 +14,6 @@ class MultiProcessLauncher: - # run_process_fn will be run in subprocesses. def __init__(self, world_size, run_process_fn, fn_args=None): env = os.environ.copy() diff --git a/examples/util.py b/examples/util.py index d45cfdf5..2824e9ee 100644 --- a/examples/util.py +++ b/examples/util.py @@ -69,7 +69,6 @@ def kmeans(data, K, max_iter=100): clusters = data[perm[:K], :] assignments = None for iter in range(max_iter): - # compute assignments, and stop if converged: prev_assignments = assignments assignments = kmeans_inference(data, clusters) diff --git a/pycon-workshop-2020/multiprocess_launcher.py b/pycon-workshop-2020/multiprocess_launcher.py index affc4e1c..2ddaf1cc 100644 --- a/pycon-workshop-2020/multiprocess_launcher.py +++ b/pycon-workshop-2020/multiprocess_launcher.py @@ -19,7 +19,6 @@ class MultiProcessLauncher: - # run_process_fn will be run in subprocesses. def __init__(self, world_size, run_process_fn, fn_args=None): env = os.environ.copy() diff --git a/test/test_arithmetic.py b/test/test_arithmetic.py index e506a15a..eada416f 100644 --- a/test/test_arithmetic.py +++ b/test/test_arithmetic.py @@ -92,7 +92,6 @@ def test_encrypt_decrypt(self) -> None: (5, 3, 32, 32), ] for size in sizes: - # encryption and decryption without source: reference = get_random_test_tensor(size=size, is_float=True) encrypted_tensor = ArithmeticSharedTensor(reference) diff --git a/test/test_autograd.py b/test/test_autograd.py index f4462b78..790a64e2 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -62,7 +62,6 @@ def test_non_differentiable_marking(self): # repeat test multiple times: for _ in range(10): - # mark non-differentiable inputs as such: differentiable = [random.random() > 0.5 for _ in range(len(inputs))] for idx, diff in enumerate(differentiable): @@ -190,7 +189,6 @@ def test_detach(self): """Tests that detach() works as expected.""" for func_name in ["detach", "detach_"]: - # get test case: input_size = (12, 5) input1 = get_random_test_tensor(size=input_size, is_float=True) @@ -215,7 +213,6 @@ def test_forward_tracking(self): """Tests that requires_grad influences tracking of forward computations.""" for requires_grad in [True, False]: - # get test case: input = get_random_test_tensor(size=(12, 5), is_float=True) input = crypten.cryptensor(input, requires_grad=requires_grad) @@ -362,7 +359,6 @@ def test_case9(input, encr_input): if callable(value) and key.startswith("test_case") ] for idx, test_case in enumerate(test_cases): - # get input tensors: input = get_random_test_tensor(size=(12, 5), is_float=True) input.requires_grad = True @@ -387,7 +383,6 @@ def test_case9(input, encr_input): # test cases in which tensor gets combined with itself: for func_name in ["sub", "add", "mul"]: - # get input tensors: input = get_random_test_tensor(size=(12, 5), is_float=True) input.requires_grad = True @@ -415,7 +410,6 @@ def test_autograd(self) -> None: # PyTorch test case: for test in tests: - # get test case: number_of_inputs, ops = test inputs = [ @@ -433,7 +427,6 @@ def test_autograd(self) -> None: # perform forward pass, logging all intermediate outputs: outputs, encr_outputs = [inputs], [encr_inputs] for op in ops: - # get inputs for current operation: input, output = outputs[-1], [] encr_input, encr_output = encr_outputs[-1], [] @@ -475,7 +468,6 @@ def test_autograd_repetition(self) -> None: # re-use the same input multiple times: for _ in range(7): - # perform forward pass: output = input.exp().sum() encr_output = encr_input.exp().sum() diff --git a/test/test_binary.py b/test/test_binary.py index e10ca5a9..83654406 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -71,7 +71,6 @@ def test_encrypt_decrypt(self) -> None: (5, 3, 32, 32), ] for size in sizes: - # encryption and decryption without source: reference = get_random_test_tensor(size=size, is_float=False) encrypted_tensor = BinarySharedTensor(reference) diff --git a/test/test_cuda.py b/test/test_cuda.py index 4e0bbd28..5ce67d4a 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -237,7 +237,6 @@ def _patched_conv2d(self, image_size, in_channels): padding, stride, ) in itertools.product(*[v for _, v in kwargs.items()]): - # sample input: input_size = (batches, in_channels, *image_size) input = get_random_test_tensor(size=input_size, is_float=False) diff --git a/test/test_gradients.py b/test/test_gradients.py index 28eae3b0..ce43d81b 100644 --- a/test/test_gradients.py +++ b/test/test_gradients.py @@ -259,7 +259,6 @@ def _reductions_helper(self, input_reductions, method=None): # Check dim 0 if tensor is 0-dimensional dims = 1 if tensor.dim() == 0 else tensor.dim() for dim in range(dims): - # check when keepdim is not provided as a kwarg if method is None: self._check_forward_backward(reduction, tensor, dim=dim) @@ -826,7 +825,6 @@ def test_batchnorm(self) -> None: torch.autograd.set_detect_anomaly(True) for size in sizes: for is_training in (False, True): - # sample input data, weight, and bias: tensor = get_random_test_tensor(size=size, is_float=True) encrypted_input = crypten.cryptensor(tensor) diff --git a/test/test_mpc.py b/test/test_mpc.py index 8514aa58..20a8a9cd 100644 --- a/test/test_mpc.py +++ b/test/test_mpc.py @@ -114,7 +114,6 @@ def _generate_tensor(ptype): # test both types: for ptype in [Ptype.arithmetic, Ptype.binary]: - # generate shares, sync them between parties, and create tensor: shares, reference = _generate_tensor(ptype) share = comm.get().scatter(shares, 0) @@ -1434,7 +1433,6 @@ def test_broadcast_matmul(self): batch_dims = [(), (1,), (5,), (1, 1), (1, 5), (5, 5)] for tensor_type in [lambda x: x, MPCTensor]: - for size in matmul_sizes: for batch1, batch2 in itertools.combinations(batch_dims, 2): size1 = (*batch1, *size) diff --git a/test/test_nn.py b/test/test_nn.py index 9ec747a4..ef80b20f 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -370,7 +370,6 @@ def test_non_pytorch_modules(self): # loop over all modules: for module_name in module_args.keys(): - # create encrypted CrypTen module: encr_module = getattr(crypten.nn, module_name)(*module_args[module_name]) encr_module.encrypt() @@ -405,7 +404,6 @@ def test_non_pytorch_modules(self): # some modules take additional inputs: if module_name in additional_inputs: - # base inputs: if inputs is None: inputs, encr_inputs = [], [] @@ -571,7 +569,6 @@ def test_pytorch_modules(self): # check value of parameters: for key in ["weight", "bias"]: if hasattr(module, key): # if PyTorch model has key - # find that key in the crypten.nn.Graph: if from_pytorch: for encr_node in encr_module.modules(): @@ -639,7 +636,6 @@ def test_conv(self): # try different dimensionalities: for dim in range(1, 3): for compute_gradients in [True, False]: - # fixed attributes of convolution: stride = tuple([1] * dim) padding = tuple([0] * dim) @@ -753,7 +749,6 @@ def test_parameter_module(self): torch.nn.parameter.Parameter, crypten.cryptensor, ]: - # check creation of Parameter: num_rows, num_cols = 5, 4 size = (num_rows, num_cols) @@ -838,7 +833,6 @@ def test_sequential(self): # try networks of different depth: for num_layers in range(1, 6): for compute_gradients in [True, False]: - # construct sequential container: input_size = (3, 10) output_size = (input_size[0], input_size[1] - num_layers) @@ -900,7 +894,6 @@ def test_graph(self): """ for compute_gradients in [True, False]: for num_inputs in [1, 2]: - # define test case: input_size = (3, 10) input = get_random_test_tensor(size=input_size, is_float=True) @@ -1139,7 +1132,6 @@ def test_training(self): # perform training iterations: for _ in range(10): for compute_gradients in [True, False]: - # get training sample: input = get_random_test_tensor( size=(batch_size, num_inputs), is_float=True @@ -1275,7 +1267,6 @@ def test_batchnorm_module(self) -> None: ) for batchnorm_fn, size in batchnorm_fn_and_size: for is_training in (True, False): - # create random input tensor: tensor = get_random_test_tensor(size=size, is_float=True) tensor.requires_grad = True @@ -1382,7 +1373,6 @@ def test_unencrypted_modules(self) -> None: # function running the actual test: def _run_test(_sample, _target): - # forward pass fails when feeding encrypted input into unencrypted model: linear.zero_grad() if not linear.encrypted and not torch.is_tensor(_sample): @@ -1467,7 +1457,6 @@ def _check_state_dict(model, state_dict): } for module_name, args in module_args.items(): for encrypt in [False, True]: - # create module and get state dict: module = getattr(crypten.nn, module_name)(*args) if encrypt: @@ -1498,7 +1487,6 @@ def _check_state_dict(model, state_dict): # tests for model that is sequence of modules: for num_layers in range(1, 6): for encrypt in [False, True]: - # some variables that we need: input_size = (3, 10) output_size = (input_size[0], input_size[1] - num_layers) diff --git a/test/test_tensorboard.py b/test/test_tensorboard.py index 47cd3b51..90d4539e 100644 --- a/test/test_tensorboard.py +++ b/test/test_tensorboard.py @@ -25,7 +25,6 @@ def setUp(self) -> None: crypten.init() def test_tensorboard(self) -> None: - # create small crypten model: model = crypten.nn.Graph("input", "output") model.add_module("intermediate1", crypten.nn.ReLU(), ["input"])