Skip to content

Commit

Permalink
Remove possible manual seeds from test files. (#2436)
Browse files Browse the repository at this point in the history
Summary:
For test files where applicable, removed manual seeds where applicable. Refactoring #2267

Pull Request resolved: #2436

Reviewed By: carolineechen

Differential Revision: D36896854

Pulled By: skim0514

fbshipit-source-id: 7b4dd8a8dbfbef271f5cc56564dc83a760407e6c
  • Loading branch information
Sean Kim authored and facebook-github-bot committed Jun 3, 2022
1 parent b68864c commit f0bc00c
Show file tree
Hide file tree
Showing 17 changed files with 0 additions and 57 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ class TestLibriSpeechRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)

@parameterized.expand(
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ class TestMuSTCRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)

@parameterized.expand(
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ class TestTEDLIUM3RNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)

@parameterized.expand(
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ class TestCropAudioLabel(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)

@parameterized.expand(
[
Expand Down
5 changes: 0 additions & 5 deletions test/torchaudio_unittest/functional/autograd_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,28 +232,23 @@ def test_bandreject_biquad(self, central_freq, Q):
self.assert_grad(F.bandreject_biquad, (x, sr, central_freq, Q))

def test_deemph_biquad(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=1)
self.assert_grad(F.deemph_biquad, (x, 44100))

def test_flanger(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1)
self.assert_grad(F.flanger, (x, 44100))

def test_gain(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1)
self.assert_grad(F.gain, (x, 1.1))

def test_overdrive(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=8000, duration=0.01, n_channels=1)
self.assert_grad(F.gain, (x,))

@parameterized.expand([(True,), (False,)])
def test_phaser(self, sinusoidal):
torch.random.manual_seed(2434)
sr = 8000
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
self.assert_grad(F.phaser, (x, sr, sinusoidal))
Expand Down
20 changes: 0 additions & 20 deletions test/torchaudio_unittest/functional/batch_consistency_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ def test_griffinlim(self):
momentum = 0.99
n_iter = 32
length = 1000
torch.random.manual_seed(0)
batch = torch.rand(self.batch_size, 1, 201, 6)
kwargs = {
"window": window,
Expand Down Expand Up @@ -80,7 +79,6 @@ def test_griffinlim(self):
def test_detect_pitch_frequency(self, sample_rate, n_channels):
# Use different frequencies to ensure each item in the batch returns a
# different answer.
torch.manual_seed(0)
frequencies = torch.randint(100, 1000, [self.batch_size])
waveforms = torch.stack(
[
Expand All @@ -103,7 +101,6 @@ def test_detect_pitch_frequency(self, sample_rate, n_channels):
]
)
def test_amplitude_to_DB(self, top_db):
torch.manual_seed(0)
spec = torch.rand(self.batch_size, 2, 100, 100) * 200

amplitude_mult = 20.0
Expand Down Expand Up @@ -137,7 +134,6 @@ def test_amplitude_to_DB_itemwise_clamps(self):
top_db = 20.0

# Make a batch of noise
torch.manual_seed(0)
spec = torch.rand([2, 2, 100, 100]) * 200
# Make one item blow out the other
spec[0] += 50
Expand All @@ -158,7 +154,6 @@ def test_amplitude_to_DB_not_channelwise_clamps(self):
db_mult = math.log10(max(amin, ref))
top_db = 40.0

torch.manual_seed(0)
spec = torch.rand([1, 2, 100, 100]) * 200
# Make one channel blow out the other
spec[:, 0] += 50
Expand All @@ -173,7 +168,6 @@ def test_amplitude_to_DB_not_channelwise_clamps(self):
assert (difference >= 1e-5).any()

def test_contrast(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
kwargs = {
"enhancement_amount": 80.0,
Expand All @@ -182,7 +176,6 @@ def test_contrast(self):
self.assert_batch_consistency(func, inputs=(waveforms,))

def test_dcshift(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
kwargs = {
"shift": 0.5,
Expand All @@ -192,7 +185,6 @@ def test_dcshift(self):
self.assert_batch_consistency(func, inputs=(waveforms,))

def test_overdrive(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
kwargs = {
"gain": 45,
Expand All @@ -215,7 +207,6 @@ def test_phaser(self):
self.assert_batch_consistency(func, inputs=(batch,))

def test_flanger(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
sample_rate = 44100
kwargs = {
Expand All @@ -234,7 +225,6 @@ def test_flanger(self):
name_func=_name_from_args,
)
def test_sliding_window_cmn(self, center, norm_vars):
torch.manual_seed(0)
spectrogram = torch.rand(self.batch_size, 2, 1024, 1024) * 200
kwargs = {
"center": center,
Expand Down Expand Up @@ -281,15 +271,13 @@ def test_compute_kaldi_pitch(self):

def test_lfilter(self):
signal_length = 2048
torch.manual_seed(2434)
x = torch.randn(self.batch_size, signal_length)
a = torch.rand(self.batch_size, 3)
b = torch.rand(self.batch_size, 3)
self.assert_batch_consistency(F.lfilter, inputs=(x, a, b))

def test_filtfilt(self):
signal_length = 2048
torch.manual_seed(2434)
x = torch.randn(self.batch_size, signal_length)
a = torch.rand(self.batch_size, 3)
b = torch.rand(self.batch_size, 3)
Expand Down Expand Up @@ -319,7 +307,6 @@ def test_psd_with_mask(self):
self.assert_batch_consistency(F.psd, (specgram, mask))

def test_mvdr_weights_souden(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 10
Expand All @@ -332,7 +319,6 @@ def test_mvdr_weights_souden(self):
self.assert_batch_consistency(func, (psd_noise, psd_speech))

def test_mvdr_weights_souden_with_tensor(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 10
Expand All @@ -343,7 +329,6 @@ def test_mvdr_weights_souden_with_tensor(self):
self.assert_batch_consistency(F.mvdr_weights_souden, (psd_noise, psd_speech, reference_channel))

def test_mvdr_weights_rtf(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 129
Expand All @@ -356,7 +341,6 @@ def test_mvdr_weights_rtf(self):
self.assert_batch_consistency(func, (rtf, psd_noise))

def test_mvdr_weights_rtf_with_tensor(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 129
Expand All @@ -367,7 +351,6 @@ def test_mvdr_weights_rtf_with_tensor(self):
self.assert_batch_consistency(F.mvdr_weights_rtf, (rtf, psd_noise, reference_channel))

def test_rtf_evd(self):
torch.random.manual_seed(2434)
batch_size = 2
channel = 4
n_fft_bin = 5
Expand All @@ -382,7 +365,6 @@ def test_rtf_evd(self):
]
)
def test_rtf_power(self, n_iter):
torch.random.manual_seed(2434)
channel = 4
batch_size = 2
n_fft_bin = 10
Expand All @@ -402,7 +384,6 @@ def test_rtf_power(self, n_iter):
]
)
def test_rtf_power_with_tensor(self, n_iter):
torch.random.manual_seed(2434)
channel = 4
batch_size = 2
n_fft_bin = 10
Expand All @@ -417,7 +398,6 @@ def test_rtf_power_with_tensor(self, n_iter):
self.assert_batch_consistency(func, (psd_speech, psd_noise, reference_channel))

def test_apply_beamforming(self):
torch.random.manual_seed(2434)
sr = 8000
n_fft = 400
batch_size, num_channels = 2, 3
Expand Down
1 change: 0 additions & 1 deletion test/torchaudio_unittest/functional/functional_cpu_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ def _smoke_test(self, format, compression, check_num_frames):
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
torch.random.manual_seed(42)
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,6 @@ def test_phase_vocoder(self, rate):
hop_length = 256
num_freq = 1025
num_frames = 400
torch.random.manual_seed(42)

# Due to cummulative sum, numerical error in using torch.float32 will
# result in bottom right values of the stretched sectrogram to not
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,6 @@ def test_lfilter(self):
self._assert_consistency(F.lfilter, (waveform, a_coeffs, b_coeffs, True, True))

def test_filtfilt(self):
torch.manual_seed(296)
waveform = common_utils.get_whitenoise(sample_rate=8000)
b_coeffs = torch.rand(4, device=waveform.device, dtype=waveform.dtype)
a_coeffs = torch.rand(4, device=waveform.device, dtype=waveform.dtype)
Expand Down Expand Up @@ -531,7 +530,6 @@ def func(tensor):
self._assert_consistency(func, (waveform,))

def test_flanger(self):
torch.random.manual_seed(40)
waveform = torch.rand(2, 100) - 0.5

def func(tensor):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ def _gen_inputs(self, input_dim, batch_size, num_frames):

def setUp(self):
super().setUp()
torch.random.manual_seed(31)

def test_torchscript_consistency_forward(self):
r"""Verify that scripting Conformer does not change the behavior of method `forward`."""
Expand Down
2 changes: 0 additions & 2 deletions test/torchaudio_unittest/models/ctc_decoder_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
TorchaudioTestCase,
)


NUM_TOKENS = 8


Expand Down Expand Up @@ -38,7 +37,6 @@ def _get_decoder(self, tokens=None, use_lm=True, use_lexicon=True, **kwargs):
def _get_emissions(self):
B, T, N = 4, 15, NUM_TOKENS

torch.manual_seed(0)
emissions = torch.rand(B, T, N)

return emissions
Expand Down
4 changes: 0 additions & 4 deletions test/torchaudio_unittest/models/rnnt/rnnt_test_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def _get_transcriber_input(self):
input_dim = input_config["input_dim"]
right_context_length = input_config["right_context_length"]

torch.random.manual_seed(31)
input = torch.rand(batch_size, max_input_length + right_context_length, input_dim).to(
device=self.device, dtype=self.dtype
)
Expand All @@ -68,7 +67,6 @@ def _get_transcriber_streaming_input(self):
input_dim = input_config["input_dim"]
right_context_length = input_config["right_context_length"]

torch.random.manual_seed(31)
input = torch.rand(batch_size, segment_length + right_context_length, input_dim).to(
device=self.device, dtype=self.dtype
)
Expand All @@ -83,7 +81,6 @@ def _get_predictor_input(self):
num_symbols = input_config["num_symbols"]
max_target_length = input_config["max_target_length"]

torch.random.manual_seed(31)
input = torch.randint(0, num_symbols, (batch_size, max_target_length)).to(device=self.device, dtype=torch.int32)
lengths = torch.randint(1, max_target_length + 1, (batch_size,)).to(device=self.device, dtype=torch.int32)
return input, lengths
Expand All @@ -95,7 +92,6 @@ def _get_joiner_input(self):
max_target_length = input_config["max_target_length"]
input_dim = input_config["encoding_dim"]

torch.random.manual_seed(31)
utterance_encodings = torch.rand(batch_size, joiner_max_input_length, input_dim).to(
device=self.device, dtype=self.dtype
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ def _get_model(self):
def test_torchscript_consistency_forward(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `forward`."""

torch.random.manual_seed(31)

input_config = self._get_input_config()
batch_size = input_config["batch_size"]
max_input_length = input_config["max_input_length"]
Expand All @@ -74,8 +72,6 @@ def test_torchscript_consistency_forward(self):
def test_torchscript_consistency_infer(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `infer`."""

torch.random.manual_seed(31)

input_config = self._get_input_config()
segment_length = input_config["segment_length"]
right_context_length = input_config["right_context_length"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,6 @@ def test_import_wave2vec2_pretraining_model(self, config, _):
"""Wav2vec2 pretraining models from fairseq can be imported and yields the same results"""
batch_size, num_frames = 3, 1024

torch.manual_seed(0)
original = self._get_model(config).eval()
imported = import_fairseq_model(original).eval()

Expand All @@ -149,7 +148,6 @@ def test_import_hubert_pretraining_model(self, config, factory_func):
"""HuBERT pretraining models from fairseq can be imported and yields the same results"""
batch_size, num_frames = 3, 1024

torch.manual_seed(0)
original = self._get_model(config).eval()
imported = import_fairseq_model(original).eval()

Expand Down Expand Up @@ -241,7 +239,6 @@ def test_recreate_finetuning_model(self, config, factory_func):
reloaded.eval()

# Without mask
torch.manual_seed(0)
x = torch.randn(batch_size, num_frames)
ref, _ = imported(x)
hyp, _ = reloaded(x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ def _get_model(self, config):
raise ValueError(f'Unexpected arch: {config["architectures"]}')

def _test_import_pretrain(self, original, imported, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref = original.feature_extractor(x).transpose(1, 2)
Expand Down Expand Up @@ -173,7 +172,6 @@ def test_import_finetune(self, config, _):
self._test_import_finetune(original, imported, config)

def _test_recreate(self, imported, reloaded, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref, _ = imported.feature_extractor(x, None)
Expand Down
Loading

0 comments on commit f0bc00c

Please sign in to comment.