diff --git a/rasa/nlu/classifiers/llm_flow_classifier.py b/rasa/nlu/classifiers/llm_flow_classifier.py index e8bd9a04449a..4b715009f1d9 100644 --- a/rasa/nlu/classifiers/llm_flow_classifier.py +++ b/rasa/nlu/classifiers/llm_flow_classifier.py @@ -154,9 +154,11 @@ def process_single( @classmethod def parse_action_list( - cls, actions: str, tracker: DialogueStateTracker, flows: FlowsList + cls, actions: Optional[str], tracker: DialogueStateTracker, flows: FlowsList ) -> Tuple[str, List[Tuple[str, str]]]: """Parse the actions returned by the llm into intent and entities.""" + if not actions: + return "openai_error", [] start_flow_actions = [] slot_sets = [] cancel_flow = False diff --git a/rasa/utils/llm.py b/rasa/utils/llm.py index f9dd42a81c1e..9e1dc1ea1391 100644 --- a/rasa/utils/llm.py +++ b/rasa/utils/llm.py @@ -1,12 +1,13 @@ from typing import Optional import openai import logging - +import openai.error +import structlog from rasa.shared.core.events import BotUttered, UserUttered from rasa.shared.core.trackers import DialogueStateTracker -logger = logging.getLogger(__name__) +structlogger = structlog.get_logger() USER = "USER" @@ -35,12 +36,16 @@ def generate_text_openai_chat( The generated text. """ # TODO: exception handling - chat_completion = openai.ChatCompletion.create( # type: ignore[no-untyped-call] - model=model, - messages=[{"role": "user", "content": prompt}], - temperature=temperature, - ) - return chat_completion.choices[0].message.content + try: + chat_completion = openai.ChatCompletion.create( # type: ignore[no-untyped-call] + model=model, + messages=[{"role": "user", "content": prompt}], + temperature=temperature, + ) + return chat_completion.choices[0].message.content + except openai.error.OpenAIError as e: + structlogger.exception("openai.generate.error", model=model, prompt=prompt) + return None def tracker_as_readable_transcript(