From 62d92c8af90f8e42cb589771abaf1f7ec5052012 Mon Sep 17 00:00:00 2001 From: Tanay Soni Date: Mon, 16 Dec 2019 16:55:56 +0100 Subject: [PATCH] Disable multiprocessing in tests to reduce memory footprint --- test/test_lm_finetuning.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_lm_finetuning.py b/test/test_lm_finetuning.py index 6052c7cd6..b0d3684c9 100644 --- a/test/test_lm_finetuning.py +++ b/test/test_lm_finetuning.py @@ -36,7 +36,7 @@ def test_lm_finetuning(caplog): max_seq_len=12, next_sent_pred=True ) - data_silo = DataSilo(processor=processor, batch_size=batch_size) + data_silo = DataSilo(processor=processor, batch_size=batch_size, max_processes=1) language_model = LanguageModel.load(lang_model) lm_prediction_head = BertLMHead.load(lang_model) @@ -113,7 +113,7 @@ def test_lm_finetuning_no_next_sentence(caplog): max_seq_len=12, next_sent_pred=False ) - data_silo = DataSilo(processor=processor, batch_size=batch_size) + data_silo = DataSilo(processor=processor, batch_size=batch_size, max_processes=1) language_model = LanguageModel.load(lang_model) lm_prediction_head = BertLMHead.load(lang_model) @@ -190,7 +190,7 @@ def test_lm_finetuning_custom_vocab(caplog): max_seq_len=12, next_sent_pred=True ) - data_silo = DataSilo(processor=processor, batch_size=batch_size) + data_silo = DataSilo(processor=processor, batch_size=batch_size, max_processes=1) language_model = LanguageModel.load(lang_model, n_added_tokens=len(tokenizer.added_tokens_decoder)) lm_prediction_head = BertLMHead.load(lang_model, n_added_tokens=len(tokenizer.added_tokens_decoder))