From fc90d5a28a04ce1614350541714222c457645c5b Mon Sep 17 00:00:00 2001 From: romulosouza Date: Thu, 15 Oct 2020 03:56:44 -0300 Subject: [PATCH 1/9] Change nlu model and extractors to use pathlib --- rasa/nlu/extractors/crf_entity_extractor.py | 18 +++++++++--------- rasa/nlu/extractors/entity_synonyms.py | 12 ++++++------ rasa/nlu/extractors/mitie_entity_extractor.py | 12 ++++++------ rasa/nlu/extractors/regex_entity_extractor.py | 9 +++++---- rasa/nlu/model.py | 16 ++++++++-------- rasa/nlu/utils/__init__.py | 7 +++---- rasa/shared/utils/io.py | 9 ++------- 7 files changed, 39 insertions(+), 44 deletions(-) diff --git a/rasa/nlu/extractors/crf_entity_extractor.py b/rasa/nlu/extractors/crf_entity_extractor.py index 9bff8d3df508..ee2571c86fdd 100644 --- a/rasa/nlu/extractors/crf_entity_extractor.py +++ b/rasa/nlu/extractors/crf_entity_extractor.py @@ -1,8 +1,7 @@ import logging -import os import typing - import numpy as np +from pathlib import Path from typing import Any, Dict, List, Optional, Text, Tuple, Type, Callable import rasa.nlu.utils.bilou_utils as bilou_utils @@ -306,24 +305,25 @@ def load( file_names = meta.get("files") entity_taggers = {} + model_dir = Path(model_dir) if not file_names: logger.debug( f"Failed to load model for 'CRFEntityExtractor'. " f"Maybe you did not provide enough training data and no model was " - f"trained or the path '{os.path.abspath(model_dir)}' doesn't exist?" + f"trained or the path '{model_dir.absolute()}' doesn't exist?" ) return cls(component_config=meta) for name, file_name in file_names.items(): - model_file = os.path.join(model_dir, file_name) - if os.path.exists(model_file): - entity_taggers[name] = joblib.load(model_file) + model_file = model_dir / file_name + if model_file.exists(): + entity_taggers[name] = joblib.load(str(model_file)) else: logger.debug( f"Failed to load model for tag '{name}' for 'CRFEntityExtractor'. " f"Maybe you did not provide enough training data and no model was " - f"trained or the path '{os.path.abspath(model_file)}' doesn't " + f"trained or the path '{model_file.absolute()}' doesn't " f"exist?" ) @@ -341,8 +341,8 @@ def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]] if self.entity_taggers: for name, entity_tagger in self.entity_taggers.items(): file_name = f"{file_name}.{name}.pkl" - model_file_name = os.path.join(model_dir, file_name) - joblib.dump(entity_tagger, model_file_name) + model_file_name = Path(model_dir) / file_name + joblib.dump(entity_tagger, str(model_file_name)) file_names[name] = file_name return {"files": file_names} diff --git a/rasa/nlu/extractors/entity_synonyms.py b/rasa/nlu/extractors/entity_synonyms.py index 922da2aef9ad..8df371421575 100644 --- a/rasa/nlu/extractors/entity_synonyms.py +++ b/rasa/nlu/extractors/entity_synonyms.py @@ -1,4 +1,4 @@ -import os +from pathlib import Path from typing import Any, Dict, List, Optional, Text, Type from rasa.nlu.components import Component @@ -51,10 +51,10 @@ def process(self, message: Message, **kwargs: Any) -> None: message.set(ENTITIES, updated_entities, add_to_output=True) def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: - + model_dir = Path(model_dir) if self.synonyms: file_name = file_name + ".json" - entity_synonyms_file = os.path.join(model_dir, file_name) + entity_synonyms_file = model_dir / file_name write_json_to_file( entity_synonyms_file, self.synonyms, separators=(",", ": ") ) @@ -77,13 +77,13 @@ def load( synonyms = None return cls(meta, synonyms) - entity_synonyms_file = os.path.join(model_dir, file_name) - if os.path.isfile(entity_synonyms_file): + entity_synonyms_file = Path(model_dir) / file_name + if entity_synonyms_file.is_file(): synonyms = rasa.shared.utils.io.read_json_file(entity_synonyms_file) else: synonyms = None rasa.shared.utils.io.raise_warning( - f"Failed to load synonyms file from '{entity_synonyms_file}'.", + f"Failed to load synonyms file from '{str(entity_synonyms_file)}'.", docs=DOCS_URL_TRAINING_DATA + "#synonyms", ) return cls(meta, synonyms) diff --git a/rasa/nlu/extractors/mitie_entity_extractor.py b/rasa/nlu/extractors/mitie_entity_extractor.py index 8f68a821bf67..e6adbd45273d 100644 --- a/rasa/nlu/extractors/mitie_entity_extractor.py +++ b/rasa/nlu/extractors/mitie_entity_extractor.py @@ -1,6 +1,6 @@ import logging -import os import typing +from pathlib import Path from typing import Any, Dict, List, Optional, Text, Type from rasa.nlu.constants import TOKENS_NAMES @@ -158,9 +158,9 @@ def load( if not file_name: return cls(meta) - classifier_file = os.path.join(model_dir, file_name) - if os.path.exists(classifier_file): - extractor = mitie.named_entity_extractor(classifier_file) + classifier_file = Path(model_dir) / file_name + if classifier_file.exists(): + extractor = mitie.named_entity_extractor(str(classifier_file)) return cls(meta, extractor) else: return cls(meta) @@ -169,8 +169,8 @@ def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]] if self.ner: file_name = file_name + ".dat" - entity_extractor_file = os.path.join(model_dir, file_name) - self.ner.save_to_disk(entity_extractor_file, pure_model=True) + entity_extractor_file = Path(model_dir) / file_name + self.ner.save_to_disk(str(entity_extractor_file), pure_model=True) return {"file": file_name} else: return {"file": None} diff --git a/rasa/nlu/extractors/regex_entity_extractor.py b/rasa/nlu/extractors/regex_entity_extractor.py index 0bc3d57fe13b..fab196f88298 100644 --- a/rasa/nlu/extractors/regex_entity_extractor.py +++ b/rasa/nlu/extractors/regex_entity_extractor.py @@ -1,6 +1,6 @@ import logging -import os import re +from pathlib import Path from typing import Any, Dict, List, Optional, Text import rasa.shared.utils.io @@ -116,9 +116,9 @@ def load( ) -> "RegexEntityExtractor": file_name = meta.get("file") - regex_file = os.path.join(model_dir, file_name) + regex_file = Path(model_dir) / file_name - if os.path.exists(regex_file): + if regex_file.exists(): patterns = rasa.shared.utils.io.read_json_file(regex_file) return RegexEntityExtractor(meta, patterns=patterns) @@ -127,8 +127,9 @@ def load( def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: """Persist this model into the passed directory. Return the metadata necessary to load the model again.""" + file_name = f"{file_name}.json" - regex_file = os.path.join(model_dir, file_name) + regex_file = Path(model_dir) / file_name rasa.shared.utils.io.dump_obj_as_json_to_file(regex_file, self.patterns) return {"file": file_name} diff --git a/rasa/nlu/model.py b/rasa/nlu/model.py index 2314f12b1186..fd4b81f6bdef 100644 --- a/rasa/nlu/model.py +++ b/rasa/nlu/model.py @@ -1,7 +1,7 @@ import copy import datetime import logging -import os +from pathlib import Path from typing import Any, Dict, List, Optional, Text import rasa.nlu @@ -74,12 +74,12 @@ def load(model_dir: Text): Returns: Metadata: A metadata object describing the model """ + metadata_file = Path(model_dir) / "metadata.json" try: - metadata_file = os.path.join(model_dir, "metadata.json") data = rasa.shared.utils.io.read_json_file(metadata_file) return Metadata(data, model_dir) except Exception as e: - abspath = os.path.abspath(os.path.join(model_dir, "metadata.json")) + abspath = metadata_file.absolute() raise InvalidModelError( f"Failed to load model metadata from '{abspath}'. {e}" ) @@ -124,7 +124,7 @@ def persist(self, model_dir: Text): } ) - filename = os.path.join(model_dir, "metadata.json") + filename = Path(model_dir) / "metadata.json" write_json_to_file(filename, metadata, indent=4) @@ -233,9 +233,9 @@ def persist( else: model_name = NLU_MODEL_NAME_PREFIX + timestamp - path = os.path.abspath(path) - dir_name = os.path.join(path, model_name) - + path = Path(path).absolute() + dir_name = path / model_name + dir_name = str(dir_name) rasa.shared.utils.io.create_directory(dir_name) if self.training_data and persist_nlu_training_data: @@ -256,7 +256,7 @@ def persist( if persistor is not None: persistor.persist(dir_name, model_name) logger.info( - "Successfully saved model into '{}'".format(os.path.abspath(dir_name)) + "Successfully saved model into '{}'".format(Path(dir_name).absolute()) ) return dir_name diff --git a/rasa/nlu/utils/__init__.py b/rasa/nlu/utils/__init__.py index 4d90d08d9fff..f8cd8cf9b110 100644 --- a/rasa/nlu/utils/__init__.py +++ b/rasa/nlu/utils/__init__.py @@ -1,6 +1,6 @@ import os import re -from typing import Any, Optional, Text +from typing import Any, Optional, Text, Union from pathlib import Path import rasa.shared.utils.io @@ -18,13 +18,12 @@ def module_path_from_object(o: Any) -> Text: return o.__class__.__module__ + "." + o.__class__.__name__ -def write_json_to_file(filename: Text, obj: Any, **kwargs: Any) -> None: +def write_json_to_file(filename: Union[Path, Text], obj: Any, **kwargs: Any) -> None: """Write an object as a json string to a file.""" - write_to_file(filename, rasa.shared.utils.io.json_to_string(obj, **kwargs)) -def write_to_file(filename: Text, text: Any) -> None: +def write_to_file(filename: Union[Path, Text], text: Any) -> None: """Write a text to a file.""" rasa.shared.utils.io.write_text_file(str(text), filename) diff --git a/rasa/shared/utils/io.py b/rasa/shared/utils/io.py index 728f67e9fa97..bf25fddfab12 100644 --- a/rasa/shared/utils/io.py +++ b/rasa/shared/utils/io.py @@ -395,17 +395,12 @@ def dump_obj_as_yaml_to_string( return buffer.getvalue() -def create_directory(directory_path: Text) -> None: +def create_directory(directory_path: Union[Path, Text]) -> None: """Creates a directory and its super paths. Succeeds even if the path already exists.""" - try: - os.makedirs(directory_path) - except OSError as e: - # be happy if someone already created the path - if e.errno != errno.EEXIST: - raise + Path(directory_path).mkdir(parents=True, exist_ok=True) def raise_deprecation_warning( From df212e1c225285d1e86eeb91bd02ac6f148db441 Mon Sep 17 00:00:00 2001 From: romulosouza Date: Wed, 21 Oct 2020 21:07:31 -0300 Subject: [PATCH 2/9] Change nlu tokenizers, utils and some files to use pathlib --- rasa/nlu/config.py | 4 ++-- rasa/nlu/convert.py | 4 ++-- rasa/nlu/extractors/crf_entity_extractor.py | 4 ++-- rasa/nlu/model.py | 11 ++++------- rasa/nlu/persistor.py | 15 +++++++-------- rasa/nlu/tokenizers/jieba_tokenizer.py | 11 +++++------ rasa/nlu/utils/__init__.py | 18 +++++++++++------- rasa/nlu/utils/mitie_utils.py | 6 +++--- 8 files changed, 36 insertions(+), 37 deletions(-) diff --git a/rasa/nlu/config.py b/rasa/nlu/config.py index 540f7d06dadd..44e9bdc61376 100644 --- a/rasa/nlu/config.py +++ b/rasa/nlu/config.py @@ -1,7 +1,7 @@ import copy import logging -import os import ruamel.yaml as yaml +from pathlib import Path from typing import Any, Dict, List, Optional, Text, Union from rasa.shared.exceptions import RasaException @@ -28,7 +28,7 @@ def load( return _load_from_dict(config, **kwargs) file_config = {} - if config is None and os.path.isfile(DEFAULT_CONFIG_PATH): + if config is None and Path(DEFAULT_CONFIG_PATH).is_file(): config = DEFAULT_CONFIG_PATH if config is not None: diff --git a/rasa/nlu/convert.py b/rasa/nlu/convert.py index 4cc0c576ae1c..0dd2f8df116a 100644 --- a/rasa/nlu/convert.py +++ b/rasa/nlu/convert.py @@ -1,6 +1,6 @@ import argparse -import os from typing import Text +from pathlib import Path from rasa.shared.utils.cli import print_error import rasa.shared.nlu.training_data.loading @@ -10,7 +10,7 @@ def convert_training_data( data_file: Text, out_file: Text, output_format: Text, language: Text ): - if not os.path.exists(data_file): + if not Path(data_file).exists(): print_error( "Data file '{}' does not exist. Provide a valid NLU data file using " "the '--data' argument.".format(data_file) diff --git a/rasa/nlu/extractors/crf_entity_extractor.py b/rasa/nlu/extractors/crf_entity_extractor.py index ee2571c86fdd..5248f49b54d5 100644 --- a/rasa/nlu/extractors/crf_entity_extractor.py +++ b/rasa/nlu/extractors/crf_entity_extractor.py @@ -311,7 +311,7 @@ def load( logger.debug( f"Failed to load model for 'CRFEntityExtractor'. " f"Maybe you did not provide enough training data and no model was " - f"trained or the path '{model_dir.absolute()}' doesn't exist?" + f"trained or the path '{model_dir.resolve()}' doesn't exist?" ) return cls(component_config=meta) @@ -323,7 +323,7 @@ def load( logger.debug( f"Failed to load model for tag '{name}' for 'CRFEntityExtractor'. " f"Maybe you did not provide enough training data and no model was " - f"trained or the path '{model_file.absolute()}' doesn't " + f"trained or the path '{model_file.resolve()}' doesn't " f"exist?" ) diff --git a/rasa/nlu/model.py b/rasa/nlu/model.py index 64fc49f5667b..acaf4cb005cc 100644 --- a/rasa/nlu/model.py +++ b/rasa/nlu/model.py @@ -77,7 +77,7 @@ def load(model_dir: Text): data = rasa.shared.utils.io.read_json_file(metadata_file) return Metadata(data, model_dir) except Exception as e: - abspath = metadata_file.absolute() + abspath = metadata_file.resolve() raise InvalidModelError( f"Failed to load model metadata from '{abspath}'. {e}" ) @@ -231,9 +231,8 @@ def persist( else: model_name = NLU_MODEL_NAME_PREFIX + timestamp - path = Path(path).absolute() - dir_name = path / model_name - dir_name = str(dir_name) + dir_path = Path(path).resolve() / model_name + dir_name = str(dir_path) rasa.shared.utils.io.create_directory(dir_name) if self.training_data and persist_nlu_training_data: @@ -253,9 +252,7 @@ def persist( if persistor is not None: persistor.persist(dir_name, model_name) - logger.info( - "Successfully saved model into '{}'".format(Path(dir_name).absolute()) - ) + logger.info("Successfully saved model into '{}'".format(dir_path)) return dir_name diff --git a/rasa/nlu/persistor.py b/rasa/nlu/persistor.py index 53185959e320..ab119f3388d4 100644 --- a/rasa/nlu/persistor.py +++ b/rasa/nlu/persistor.py @@ -2,6 +2,7 @@ import os import shutil import tarfile +from pathlib import Path from typing import List, Optional, Text, Tuple import rasa.shared.utils.common @@ -49,7 +50,8 @@ class Persistor: def persist(self, model_directory: Text, model_name: Text) -> None: """Uploads a model persisted in the `target_dir` to cloud storage.""" - if not os.path.isdir(model_directory): + model_directory = Path(model_directory) + if not model_directory.is_dir(): raise ValueError(f"Target directory '{model_directory}' not found.") file_key, tar_path = self._compress(model_directory, model_name) @@ -65,7 +67,7 @@ def retrieve(self, model_name: Text, target_path: Text) -> None: tar_name = self._tar_name(model_name) self._retrieve_tar(tar_name) - self._decompress(os.path.basename(tar_name), target_path) + self._decompress(Path(tar_name).name, target_path) def list_models(self) -> List[Text]: """Lists all the trained models.""" @@ -89,12 +91,9 @@ def _compress(self, model_directory: Text, model_name: Text) -> Tuple[Text, Text dirpath = tempfile.mkdtemp() base_name = self._tar_name(model_name, include_extension=False) tar_name = shutil.make_archive( - os.path.join(dirpath, base_name), - "gztar", - root_dir=model_directory, - base_dir=".", + Path(dirpath) / base_name, "gztar", root_dir=model_directory, base_dir=".", ) - file_key = os.path.basename(tar_name) + file_key = Path(tar_name).name return file_key, tar_name @staticmethod @@ -177,7 +176,7 @@ def _persist_tar(self, file_key: Text, tar_path: Text) -> None: def _retrieve_tar(self, model_path: Text) -> None: """Downloads a model that has previously been persisted to s3.""" - tar_name = os.path.basename(model_path) + tar_name = Path(model_path).name with open(tar_name, "wb") as f: self.bucket.download_fileobj(model_path, f) diff --git a/rasa/nlu/tokenizers/jieba_tokenizer.py b/rasa/nlu/tokenizers/jieba_tokenizer.py index 4fb9e2481c1a..10a4a073814f 100644 --- a/rasa/nlu/tokenizers/jieba_tokenizer.py +++ b/rasa/nlu/tokenizers/jieba_tokenizer.py @@ -1,13 +1,14 @@ import glob import logging -import os import shutil import typing +from pathlib import Path from typing import Any, Dict, List, Optional, Text from rasa.nlu.components import Component from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer from rasa.shared.nlu.training_data.message import Message +from rasa.shared.utils.io import create_directory logger = logging.getLogger(__name__) @@ -85,7 +86,7 @@ def load( # get real path of dictionary path, if any if relative_dictionary_path is not None: - dictionary_path = os.path.join(model_dir, relative_dictionary_path) + dictionary_path = Path(model_dir) / relative_dictionary_path meta["dictionary_path"] = dictionary_path @@ -93,9 +94,7 @@ def load( @staticmethod def copy_files_dir_to_dir(input_dir: Text, output_dir: Text) -> None: - # make sure target path exists - if not os.path.exists(output_dir): - os.makedirs(output_dir) + create_directory(output_dir) target_file_list = glob.glob(f"{input_dir}/*") for target_file in target_file_list: @@ -106,7 +105,7 @@ def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]] # copy custom dictionaries to model dir, if any if self.dictionary_path is not None: - target_dictionary_path = os.path.join(model_dir, file_name) + target_dictionary_path = Path(model_dir) / file_name self.copy_files_dir_to_dir(self.dictionary_path, target_dictionary_path) return {"dictionary_path": file_name} diff --git a/rasa/nlu/utils/__init__.py b/rasa/nlu/utils/__init__.py index f8cd8cf9b110..e4c615ba7bab 100644 --- a/rasa/nlu/utils/__init__.py +++ b/rasa/nlu/utils/__init__.py @@ -1,7 +1,6 @@ -import os import re from typing import Any, Optional, Text, Union -from pathlib import Path +from pathlib import Path, PurePath import rasa.shared.utils.io @@ -9,7 +8,7 @@ def relative_normpath(f: Optional[Text], path: Text) -> Optional[Path]: """Return the path of file relative to `path`.""" if f is not None: - return Path(os.path.relpath(f, path)) + return PurePath(f).relative_to(path) return None @@ -35,11 +34,16 @@ def is_model_dir(model_dir: Text) -> bool: specifically checks if the directory has no subdirectories and if all files have an appropriate ending.""" allowed_extensions = {".json", ".pkl", ".dat"} - dir_tree = list(os.walk(model_dir)) - if len(dir_tree) != 1: + + dir_tree = Path(model_dir) + if not dir_tree.is_dir(): + return False + + iter_dir = [d for d in dir_tree.iterdir()] + if [d for d in iter_dir if d.is_dir()]: # look for subdirectories return False - model_dir, child_dirs, files = dir_tree[0] - file_extenstions = [os.path.splitext(f)[1] for f in files] + + file_extenstions = [PurePath(f).suffix for f in iter_dir] only_valid_files = all([ext in allowed_extensions for ext in file_extenstions]) return only_valid_files diff --git a/rasa/nlu/utils/mitie_utils.py b/rasa/nlu/utils/mitie_utils.py index 91d37cc392d7..894ca07bfa13 100644 --- a/rasa/nlu/utils/mitie_utils.py +++ b/rasa/nlu/utils/mitie_utils.py @@ -1,5 +1,5 @@ -import os import typing +from pathlib import Path from typing import Any, Dict, List, Optional, Text from rasa.nlu.components import Component @@ -15,7 +15,7 @@ class MitieNLP(Component): defaults = { # name of the language model to load - this contains # the MITIE feature extractor - "model": os.path.join("data", "total_word_feature_extractor.dat") + "model": str(Path("data") / "total_word_feature_extractor.dat") } def __init__( @@ -61,7 +61,7 @@ def cache_key( mitie_file = component_meta.get("model", None) if mitie_file is not None: - return cls.name + "-" + str(os.path.abspath(mitie_file)) + return cls.name + "-" + str(Path(mitie_file).resolve()) else: return None From a0ed4d4d72b3dffd927dfdf9f76f3c5d2a2f09ac Mon Sep 17 00:00:00 2001 From: Sara Silva Date: Thu, 22 Oct 2020 18:09:11 -0300 Subject: [PATCH 3/9] Change nlu classifiers to use pathlib --- rasa/nlu/classifiers/diet_classifier.py | 6 +++--- rasa/nlu/classifiers/keyword_intent_classifier.py | 10 +++++----- rasa/nlu/classifiers/mitie_intent_classifier.py | 8 ++++---- rasa/nlu/classifiers/sklearn_intent_classifier.py | 14 ++++++-------- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/rasa/nlu/classifiers/diet_classifier.py b/rasa/nlu/classifiers/diet_classifier.py index 69add2e27183..286a14374cd7 100644 --- a/rasa/nlu/classifiers/diet_classifier.py +++ b/rasa/nlu/classifiers/diet_classifier.py @@ -4,7 +4,6 @@ from pathlib import Path import numpy as np -import os import scipy.sparse import tensorflow as tf import tensorflow_addons as tfa @@ -954,7 +953,7 @@ def load( logger.debug( f"Failed to load model for '{cls.__name__}'. " f"Maybe you did not provide enough training data and no model was " - f"trained or the path '{os.path.abspath(model_dir)}' doesn't exist?" + f"trained or the path '{Path(model_dir).resolve()}' doesn't exist?" ) return cls(component_config=meta) @@ -1031,7 +1030,8 @@ def _load_model( model_dir: Text, ) -> "RasaModel": file_name = meta.get("file") - tf_model_file = os.path.join(model_dir, file_name + ".tf_model") + tf_model_file = file_name + ".tf_model" + tf_model_file = Path(model_dir) / tf_model_file label_key = LABEL_KEY if meta[INTENT_CLASSIFICATION] else None label_sub_key = LABEL_SUB_KEY if meta[INTENT_CLASSIFICATION] else None diff --git a/rasa/nlu/classifiers/keyword_intent_classifier.py b/rasa/nlu/classifiers/keyword_intent_classifier.py index 61320a9fcb75..e0cf7c98e57c 100644 --- a/rasa/nlu/classifiers/keyword_intent_classifier.py +++ b/rasa/nlu/classifiers/keyword_intent_classifier.py @@ -1,4 +1,4 @@ -import os +from pathlib import Path import logging import re from typing import Any, Dict, Optional, Text @@ -128,7 +128,7 @@ def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]: """ file_name = file_name + ".json" - keyword_file = os.path.join(model_dir, file_name) + keyword_file = Path(model_dir) / file_name utils.write_json_to_file(keyword_file, self.intent_keyword_map) return {"file": file_name} @@ -145,8 +145,8 @@ def load( if model_dir and meta.get("file"): file_name = meta.get("file") - keyword_file = os.path.join(model_dir, file_name) - if os.path.exists(keyword_file): + keyword_file = Path(model_dir) / file_name + if Path(keyword_file).exists(): intent_keyword_map = rasa.shared.utils.io.read_json_file(keyword_file) else: rasa.shared.utils.io.raise_warning( @@ -158,5 +158,5 @@ def load( else: raise Exception( f"Failed to load keyword intent classifier model. " - f"Path {os.path.abspath(meta.get('file'))} doesn't exist." + f"Path {Path(meta.get('files')).resolve()} doesn't exist." ) diff --git a/rasa/nlu/classifiers/mitie_intent_classifier.py b/rasa/nlu/classifiers/mitie_intent_classifier.py index 124c16963a2f..47870658dc1e 100644 --- a/rasa/nlu/classifiers/mitie_intent_classifier.py +++ b/rasa/nlu/classifiers/mitie_intent_classifier.py @@ -1,4 +1,4 @@ -import os +from pathlib import Path import typing from typing import Any, Dict, List, Optional, Text, Type @@ -103,8 +103,8 @@ def load( if not file_name: return cls(meta) - classifier_file = os.path.join(model_dir, file_name) - if os.path.exists(classifier_file): + classifier_file = Path(model_dir) / file_name + if Path(classifier_file).exists(): classifier = mitie.text_categorizer(classifier_file) return cls(meta, classifier) else: @@ -114,7 +114,7 @@ def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]: if self.clf: file_name = file_name + ".dat" - classifier_file = os.path.join(model_dir, file_name) + classifier_file = Path(model_dir) / file_name self.clf.save_to_disk(classifier_file, pure_model=True) return {"file": file_name} else: diff --git a/rasa/nlu/classifiers/sklearn_intent_classifier.py b/rasa/nlu/classifiers/sklearn_intent_classifier.py index b31e289cadee..e9b15fa5f02a 100644 --- a/rasa/nlu/classifiers/sklearn_intent_classifier.py +++ b/rasa/nlu/classifiers/sklearn_intent_classifier.py @@ -1,5 +1,5 @@ import logging -import os +from pathlib import Path import typing import warnings from typing import Any, Dict, List, Optional, Text, Tuple, Type @@ -232,11 +232,9 @@ def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]] classifier_file_name = file_name + "_classifier.pkl" encoder_file_name = file_name + "_encoder.pkl" if self.clf and self.le: + io_utils.json_pickle(Path(model_dir) / encoder_file_name, self.le.classes_) io_utils.json_pickle( - os.path.join(model_dir, encoder_file_name), self.le.classes_ - ) - io_utils.json_pickle( - os.path.join(model_dir, classifier_file_name), self.clf.best_estimator_ + Path(model_dir) / classifier_file_name, self.clf.best_estimator_ ) return {"classifier": classifier_file_name, "encoder": encoder_file_name} @@ -251,10 +249,10 @@ def load( ) -> "SklearnIntentClassifier": from sklearn.preprocessing import LabelEncoder - classifier_file = os.path.join(model_dir, meta.get("classifier")) - encoder_file = os.path.join(model_dir, meta.get("encoder")) + classifier_file = Path(model_dir) / meta.get("classifier") + encoder_file = Path(model_dir) / meta.get("encoder") - if os.path.exists(classifier_file): + if Path(classifier_file).exists(): classifier = io_utils.json_unpickle(classifier_file) classes = io_utils.json_unpickle(encoder_file) encoder = LabelEncoder() From c7f3516d5c624128dbc33da9d2f083f9c35e0c49 Mon Sep 17 00:00:00 2001 From: Sara Silva Date: Thu, 22 Oct 2020 20:12:57 -0300 Subject: [PATCH 4/9] Change nlu featurizers to use pathlib --- changelog/3153.improvement.md | 1 + .../sparse_featurizer/count_vectors_featurizer.py | 8 ++++---- .../nlu/featurizers/sparse_featurizer/regex_featurizer.py | 8 ++++---- 3 files changed, 9 insertions(+), 8 deletions(-) create mode 100644 changelog/3153.improvement.md diff --git a/changelog/3153.improvement.md b/changelog/3153.improvement.md new file mode 100644 index 000000000000..31e8c3a3bb47 --- /dev/null +++ b/changelog/3153.improvement.md @@ -0,0 +1 @@ +Used pathlib instead of os.path to improve readability. diff --git a/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py index 29978e3018a9..06e5805992df 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py @@ -1,5 +1,5 @@ import logging -import os +from pathlib import Path import re import scipy.sparse from typing import Any, Dict, List, Optional, Text, Type, Tuple @@ -595,7 +595,7 @@ def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]] attribute_vocabularies = self._collect_vectorizer_vocabularies() if self._is_any_model_trained(attribute_vocabularies): # Definitely need to persist some vocabularies - featurizer_file = os.path.join(model_dir, file_name) + featurizer_file = Path(model_dir) / file_name if self.use_shared_vocab: # Only persist vocabulary from one attribute. Can be loaded and @@ -675,9 +675,9 @@ def load( ) -> "CountVectorsFeaturizer": file_name = meta.get("file") - featurizer_file = os.path.join(model_dir, file_name) + featurizer_file = Path(model_dir) / file_name - if not os.path.exists(featurizer_file): + if not Path(featurizer_file).exists(): return cls(meta) vocabulary = io_utils.json_unpickle(featurizer_file) diff --git a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py index 32114d05b39e..5578309c202f 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py @@ -1,5 +1,5 @@ import logging -import os +from pathlib import Path import re from typing import Any, Dict, List, Optional, Text, Type, Tuple @@ -161,9 +161,9 @@ def load( ) -> "RegexFeaturizer": file_name = meta.get("file") - regex_file = os.path.join(model_dir, file_name) + regex_file = Path(model_dir) / file_name - if os.path.exists(regex_file): + if Path(regex_file).exists(): known_patterns = rasa.shared.utils.io.read_json_file(regex_file) return RegexFeaturizer(meta, known_patterns=known_patterns) else: @@ -173,7 +173,7 @@ def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]] """Persist this model into the passed directory. Return the metadata necessary to load the model again.""" file_name = file_name + ".pkl" - regex_file = os.path.join(model_dir, file_name) + regex_file = Path(model_dir) / file_name utils.write_json_to_file(regex_file, self.known_patterns, indent=4) return {"file": file_name} From 5370197ca9accdff282651afdcc0a8b20a78b930 Mon Sep 17 00:00:00 2001 From: Sara Silva Date: Sun, 8 Nov 2020 16:22:24 -0300 Subject: [PATCH 5/9] Make changes suggested by the reviewer --- rasa/nlu/classifiers/diet_classifier.py | 2 +- .../nlu/classifiers/keyword_intent_classifier.py | 2 +- rasa/nlu/classifiers/mitie_intent_classifier.py | 4 ++-- .../nlu/classifiers/sklearn_intent_classifier.py | 13 ++++++++----- .../count_vectors_featurizer.py | 2 +- rasa/nlu/model.py | 16 ++++++++-------- rasa/nlu/utils/__init__.py | 5 ++--- 7 files changed, 23 insertions(+), 21 deletions(-) diff --git a/rasa/nlu/classifiers/diet_classifier.py b/rasa/nlu/classifiers/diet_classifier.py index 721b4fdfc25a..67e95a9e35af 100644 --- a/rasa/nlu/classifiers/diet_classifier.py +++ b/rasa/nlu/classifiers/diet_classifier.py @@ -1039,7 +1039,7 @@ def _load_model( model_dir: Text, ) -> "RasaModel": file_name = meta.get("file") - tf_model_file = file_name + ".tf_model" + tf_model_file = f"{file_name}.tf_model" tf_model_file = Path(model_dir) / tf_model_file label_key = LABEL_KEY if meta[INTENT_CLASSIFICATION] else None diff --git a/rasa/nlu/classifiers/keyword_intent_classifier.py b/rasa/nlu/classifiers/keyword_intent_classifier.py index e0cf7c98e57c..22773321af5e 100644 --- a/rasa/nlu/classifiers/keyword_intent_classifier.py +++ b/rasa/nlu/classifiers/keyword_intent_classifier.py @@ -146,7 +146,7 @@ def load( if model_dir and meta.get("file"): file_name = meta.get("file") keyword_file = Path(model_dir) / file_name - if Path(keyword_file).exists(): + if keyword_file.exists(): intent_keyword_map = rasa.shared.utils.io.read_json_file(keyword_file) else: rasa.shared.utils.io.raise_warning( diff --git a/rasa/nlu/classifiers/mitie_intent_classifier.py b/rasa/nlu/classifiers/mitie_intent_classifier.py index 47870658dc1e..fa13b3c7eb0d 100644 --- a/rasa/nlu/classifiers/mitie_intent_classifier.py +++ b/rasa/nlu/classifiers/mitie_intent_classifier.py @@ -104,7 +104,7 @@ def load( if not file_name: return cls(meta) classifier_file = Path(model_dir) / file_name - if Path(classifier_file).exists(): + if classifier_file.exists(): classifier = mitie.text_categorizer(classifier_file) return cls(meta, classifier) else: @@ -113,7 +113,7 @@ def load( def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]: if self.clf: - file_name = file_name + ".dat" + file_name = f"{file_name}.dat" classifier_file = Path(model_dir) / file_name self.clf.save_to_disk(classifier_file, pure_model=True) return {"file": file_name} diff --git a/rasa/nlu/classifiers/sklearn_intent_classifier.py b/rasa/nlu/classifiers/sklearn_intent_classifier.py index e9b15fa5f02a..cfe89fa888f3 100644 --- a/rasa/nlu/classifiers/sklearn_intent_classifier.py +++ b/rasa/nlu/classifiers/sklearn_intent_classifier.py @@ -231,10 +231,11 @@ def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]] classifier_file_name = file_name + "_classifier.pkl" encoder_file_name = file_name + "_encoder.pkl" + model_dir = Path(model_dir) if self.clf and self.le: - io_utils.json_pickle(Path(model_dir) / encoder_file_name, self.le.classes_) + io_utils.json_pickle(model_dir / encoder_file_name, self.le.classes_) io_utils.json_pickle( - Path(model_dir) / classifier_file_name, self.clf.best_estimator_ + model_dir / classifier_file_name, self.clf.best_estimator_ ) return {"classifier": classifier_file_name, "encoder": encoder_file_name} @@ -249,10 +250,12 @@ def load( ) -> "SklearnIntentClassifier": from sklearn.preprocessing import LabelEncoder - classifier_file = Path(model_dir) / meta.get("classifier") - encoder_file = Path(model_dir) / meta.get("encoder") + model_dir = Path(model_dir) - if Path(classifier_file).exists(): + classifier_file = model_dir / meta.get("classifier") + encoder_file = model_dir / meta.get("encoder") + + if classifier_file.exists(): classifier = io_utils.json_unpickle(classifier_file) classes = io_utils.json_unpickle(encoder_file) encoder = LabelEncoder() diff --git a/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py index 06e5805992df..9ebfc0b9ee38 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py @@ -677,7 +677,7 @@ def load( file_name = meta.get("file") featurizer_file = Path(model_dir) / file_name - if not Path(featurizer_file).exists(): + if not featurizer_file.exists(): return cls(meta) vocabulary = io_utils.json_unpickle(featurizer_file) diff --git a/rasa/nlu/model.py b/rasa/nlu/model.py index 502754f675ba..5dbcb6048736 100644 --- a/rasa/nlu/model.py +++ b/rasa/nlu/model.py @@ -232,15 +232,15 @@ def persist( model_name = NLU_MODEL_NAME_PREFIX + timestamp dir_path = Path(path).resolve() / model_name - dir_name = str(dir_path) - rasa.shared.utils.io.create_directory(dir_name) + dir_path_string = str(dir_path) + rasa.shared.utils.io.create_directory(dir_path_string) if self.training_data and persist_nlu_training_data: - metadata.update(self.training_data.persist(dir_name)) + metadata.update(self.training_data.persist(dir_path_string)) for i, component in enumerate(self.pipeline): file_name = self._file_name(i, component.name) - update = component.persist(file_name, dir_name) + update = component.persist(file_name, dir_path_string) component_meta = component.component_config if update: component_meta.update(update) @@ -248,12 +248,12 @@ def persist( metadata["pipeline"].append(component_meta) - Metadata(metadata, dir_name).persist(dir_name) + Metadata(metadata, dir_path_string).persist(dir_path_string) if persistor is not None: - persistor.persist(dir_name, model_name) - logger.info("Successfully saved model into '{}'".format(dir_path)) - return dir_name + persistor.persist(dir_path_string, model_name) + logger.info(f"Successfully saved model into {dir_path_string}") + return dir_path_string class Interpreter: diff --git a/rasa/nlu/utils/__init__.py b/rasa/nlu/utils/__init__.py index 48036ec3f10b..b0c15a70dd14 100644 --- a/rasa/nlu/utils/__init__.py +++ b/rasa/nlu/utils/__init__.py @@ -40,11 +40,10 @@ def is_model_dir(model_dir: Text) -> bool: if not dir_tree.is_dir(): return False - iter_dir = [d for d in dir_tree.iterdir()] - if [d for d in iter_dir if d.is_dir()]: # look for subdirectories + if any([d.is_dir() for d in dir_tree.iterdir()]): # look for subdirectories return False - file_extenstions = [PurePath(f).suffix for f in iter_dir] + file_extenstions = [PurePath(f).suffix for f in list(dir_tree.iterdir())] only_valid_files = all([ext in allowed_extensions for ext in file_extenstions]) return only_valid_files From 8c7bebb8655b280874ebc1da8baea48cb5750afb Mon Sep 17 00:00:00 2001 From: romulosouza Date: Tue, 10 Nov 2020 11:32:34 -0300 Subject: [PATCH 6/9] Remove unnecessary casting --- rasa/nlu/utils/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rasa/nlu/utils/__init__.py b/rasa/nlu/utils/__init__.py index b0c15a70dd14..942cf2b0accc 100644 --- a/rasa/nlu/utils/__init__.py +++ b/rasa/nlu/utils/__init__.py @@ -43,8 +43,8 @@ def is_model_dir(model_dir: Text) -> bool: if any([d.is_dir() for d in dir_tree.iterdir()]): # look for subdirectories return False - file_extenstions = [PurePath(f).suffix for f in list(dir_tree.iterdir())] - only_valid_files = all([ext in allowed_extensions for ext in file_extenstions]) + file_extensions = [PurePath(f).suffix for f in dir_tree.iterdir()] + only_valid_files = all([ext in allowed_extensions for ext in file_extensions]) return only_valid_files From 2b1d8c1fee1447b6ba7ef56d746226f41310a094 Mon Sep 17 00:00:00 2001 From: Sara Silva Date: Wed, 11 Nov 2020 11:18:15 -0300 Subject: [PATCH 7/9] Make changes suggested by the reviewer --- changelog/3153.improvement.md | 2 +- rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog/3153.improvement.md b/changelog/3153.improvement.md index 31e8c3a3bb47..0ff41cec1922 100644 --- a/changelog/3153.improvement.md +++ b/changelog/3153.improvement.md @@ -1 +1 @@ -Used pathlib instead of os.path to improve readability. +Reduce usage of `os.path` in favor of `pathlib` throughout the code base. diff --git a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py index 5578309c202f..54c341365426 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py @@ -163,7 +163,7 @@ def load( file_name = meta.get("file") regex_file = Path(model_dir) / file_name - if Path(regex_file).exists(): + if regex_file.exists(): known_patterns = rasa.shared.utils.io.read_json_file(regex_file) return RegexFeaturizer(meta, known_patterns=known_patterns) else: From b425e6c47f5fe69931b9289698a9e61c3430cab2 Mon Sep 17 00:00:00 2001 From: romulosouza Date: Sun, 6 Dec 2020 16:56:17 -0300 Subject: [PATCH 8/9] Fix type of param --- rasa/nlu/classifiers/mitie_intent_classifier.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rasa/nlu/classifiers/mitie_intent_classifier.py b/rasa/nlu/classifiers/mitie_intent_classifier.py index fa13b3c7eb0d..59e8eebceed2 100644 --- a/rasa/nlu/classifiers/mitie_intent_classifier.py +++ b/rasa/nlu/classifiers/mitie_intent_classifier.py @@ -105,7 +105,7 @@ def load( return cls(meta) classifier_file = Path(model_dir) / file_name if classifier_file.exists(): - classifier = mitie.text_categorizer(classifier_file) + classifier = mitie.text_categorizer(str(classifier_file)) return cls(meta, classifier) else: return cls(meta) @@ -115,7 +115,7 @@ def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]: if self.clf: file_name = f"{file_name}.dat" classifier_file = Path(model_dir) / file_name - self.clf.save_to_disk(classifier_file, pure_model=True) + self.clf.save_to_disk(str(classifier_file), pure_model=True) return {"file": file_name} else: return {"file": None} From fdac786272d8a1f32827e291126d6be50c4b4158 Mon Sep 17 00:00:00 2001 From: romulosouza Date: Sun, 6 Dec 2020 23:27:32 -0300 Subject: [PATCH 9/9] Fix lint problems --- rasa/nlu/classifiers/mitie_intent_classifier.py | 3 +++ rasa/nlu/convert.py | 1 + rasa/nlu/extractors/entity_synonyms.py | 4 ++++ rasa/nlu/extractors/regex_entity_extractor.py | 3 ++- rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py | 4 +++- rasa/nlu/persistor.py | 2 -- rasa/nlu/tokenizers/jieba_tokenizer.py | 2 +- rasa/nlu/utils/__init__.py | 1 - rasa/shared/utils/io.py | 4 ++-- 9 files changed, 16 insertions(+), 8 deletions(-) diff --git a/rasa/nlu/classifiers/mitie_intent_classifier.py b/rasa/nlu/classifiers/mitie_intent_classifier.py index 59e8eebceed2..03b218e59296 100644 --- a/rasa/nlu/classifiers/mitie_intent_classifier.py +++ b/rasa/nlu/classifiers/mitie_intent_classifier.py @@ -111,7 +111,10 @@ def load( return cls(meta) def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]: + """Persist this model into the passed directory. + Return the metadata necessary to load the model again. + """ if self.clf: file_name = f"{file_name}.dat" classifier_file = Path(model_dir) / file_name diff --git a/rasa/nlu/convert.py b/rasa/nlu/convert.py index 0dd2f8df116a..9faf9791feea 100644 --- a/rasa/nlu/convert.py +++ b/rasa/nlu/convert.py @@ -10,6 +10,7 @@ def convert_training_data( data_file: Text, out_file: Text, output_format: Text, language: Text ): + """Convert the training data to the specified language and format.""" if not Path(data_file).exists(): print_error( "Data file '{}' does not exist. Provide a valid NLU data file using " diff --git a/rasa/nlu/extractors/entity_synonyms.py b/rasa/nlu/extractors/entity_synonyms.py index 8df371421575..8886a86a7ff3 100644 --- a/rasa/nlu/extractors/entity_synonyms.py +++ b/rasa/nlu/extractors/entity_synonyms.py @@ -51,6 +51,10 @@ def process(self, message: Message, **kwargs: Any) -> None: message.set(ENTITIES, updated_entities, add_to_output=True) def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: + """Persist this model into the passed directory. + + Return the metadata necessary to load the model again. + """ model_dir = Path(model_dir) if self.synonyms: file_name = file_name + ".json" diff --git a/rasa/nlu/extractors/regex_entity_extractor.py b/rasa/nlu/extractors/regex_entity_extractor.py index fab196f88298..aac0c34f05b6 100644 --- a/rasa/nlu/extractors/regex_entity_extractor.py +++ b/rasa/nlu/extractors/regex_entity_extractor.py @@ -126,8 +126,9 @@ def load( def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: """Persist this model into the passed directory. - Return the metadata necessary to load the model again.""" + Return the metadata necessary to load the model again. + """ file_name = f"{file_name}.json" regex_file = Path(model_dir) / file_name rasa.shared.utils.io.dump_obj_as_json_to_file(regex_file, self.patterns) diff --git a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py index 54c341365426..664351d9b1bc 100644 --- a/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py +++ b/rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py @@ -171,7 +171,9 @@ def load( def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: """Persist this model into the passed directory. - Return the metadata necessary to load the model again.""" + + Return the metadata necessary to load the model again. + """ file_name = file_name + ".pkl" regex_file = Path(model_dir) / file_name utils.write_json_to_file(regex_file, self.known_patterns, indent=4) diff --git a/rasa/nlu/persistor.py b/rasa/nlu/persistor.py index ab119f3388d4..b89b21ae382e 100644 --- a/rasa/nlu/persistor.py +++ b/rasa/nlu/persistor.py @@ -49,7 +49,6 @@ class Persistor: def persist(self, model_directory: Text, model_name: Text) -> None: """Uploads a model persisted in the `target_dir` to cloud storage.""" - model_directory = Path(model_directory) if not model_directory.is_dir(): raise ValueError(f"Target directory '{model_directory}' not found.") @@ -71,7 +70,6 @@ def retrieve(self, model_name: Text, target_path: Text) -> None: def list_models(self) -> List[Text]: """Lists all the trained models.""" - raise NotImplementedError def _retrieve_tar(self, filename: Text) -> Text: diff --git a/rasa/nlu/tokenizers/jieba_tokenizer.py b/rasa/nlu/tokenizers/jieba_tokenizer.py index 10a4a073814f..c412b51f5043 100644 --- a/rasa/nlu/tokenizers/jieba_tokenizer.py +++ b/rasa/nlu/tokenizers/jieba_tokenizer.py @@ -94,6 +94,7 @@ def load( @staticmethod def copy_files_dir_to_dir(input_dir: Text, output_dir: Text) -> None: + """Copy files from one dir to another.""" create_directory(output_dir) target_file_list = glob.glob(f"{input_dir}/*") @@ -102,7 +103,6 @@ def copy_files_dir_to_dir(input_dir: Text, output_dir: Text) -> None: def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]: """Persist this model into the passed directory.""" - # copy custom dictionaries to model dir, if any if self.dictionary_path is not None: target_dictionary_path = Path(model_dir) / file_name diff --git a/rasa/nlu/utils/__init__.py b/rasa/nlu/utils/__init__.py index 9b86f18e9e82..427a37ab65f5 100644 --- a/rasa/nlu/utils/__init__.py +++ b/rasa/nlu/utils/__init__.py @@ -24,7 +24,6 @@ def write_json_to_file(filename: Union[Path, Text], obj: Any, **kwargs: Any) -> def write_to_file(filename: Union[Path, Text], text: Any) -> None: """Write a text to a file.""" - rasa.shared.utils.io.write_text_file(str(text), filename) diff --git a/rasa/shared/utils/io.py b/rasa/shared/utils/io.py index 2513a8626071..5397ec3a0ec2 100644 --- a/rasa/shared/utils/io.py +++ b/rasa/shared/utils/io.py @@ -476,8 +476,8 @@ def dump_obj_as_yaml_to_string( def create_directory(directory_path: Union[Path, Text]) -> None: """Creates a directory and its super paths. - Succeeds even if the path already exists.""" - + Succeeds even if the path already exists. + """ Path(directory_path).mkdir(parents=True, exist_ok=True)