Skip to content

Commit

Permalink
Feat: Add method to add multiple outputs to a prompt in AIConfig (#689)
Browse files Browse the repository at this point in the history
Summary:
This pull request adds ability to add multiple outputs to prompt for
Python SDK.

Changes Made:
Added the add_outputs method to add multiple outputs to a prompt.

Related Issues:
Closes #153

Test Plan:
- [x] Test to check that multiple outputs can be added to prompt
`test_add_outputs_existing_prompt_no_overwrite`.
- [x] Test to check that `add_outputs` support prompt output overwriting
`test_add_outputs_existing_prompt_overwrite`.
- [x] Test to check that exception is raised if `add_outputs` is invoked
with empty list or no outputs `test_add_empty_outputs_to_prompt`.
  • Loading branch information
rossdanlm committed Jan 3, 2024
2 parents ed5fb9d + eafd36b commit e47ce1f
Show file tree
Hide file tree
Showing 2 changed files with 126 additions and 0 deletions.
23 changes: 23 additions & 0 deletions python/src/aiconfig/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -842,6 +842,29 @@ def add_output(self, prompt_name: str, output: Output, overwrite: bool = False):
else:
prompt.outputs.append(output)

def add_outputs(self, prompt_name: str, outputs: List[Output], overwrite: bool = False):
"""
Add multiple outputs to the prompt with the given name in the AIConfig
Args:
prompt_name (str): The name of the prompt to add the outputs to.
outputs (List[Output]): List of outputs to add.
overwrite (bool, optional): Overwrites the existing output if True. Otherwise appends the outputs to the prompt's output list. Defaults to False.
"""
prompt = self.get_prompt(prompt_name)
if not prompt:
raise IndexError(
f"Cannot add outputs. Prompt '{prompt_name}' not found in config."
)
if not outputs:
raise ValueError(
f"Cannot add outputs. No outputs provided for prompt '{prompt_name}'."
)
if overwrite:
prompt.outputs = outputs
else:
prompt.outputs.extend(outputs)

def delete_output(self, prompt_name: str):
"""
Deletes the outputs for the prompt with the given prompt_name.
Expand Down
103 changes: 103 additions & 0 deletions python/tests/test_programmatically_create_an_AIConfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,6 +502,109 @@ def test_add_output_existing_prompt_no_overwrite(ai_config_runtime: AIConfigRunt
ai_config_runtime.delete_output("GreetingPrompt")
assert ai_config_runtime.get_latest_output("GreetingPrompt") == None

def test_add_outputs_existing_prompt_no_overwrite(ai_config_runtime: AIConfigRuntime):
"""Test adding outputs to an existing prompt without overwriting."""
original_result = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="original result",
metadata={
"raw_response": {"role": "assistant", "content": "original result"}
},
)
prompt = Prompt(
name="GreetingPrompt",
input="Hello, how are you?",
metadata=PromptMetadata(model="fakemodel"),
outputs=[original_result],
)
ai_config_runtime.add_prompt(prompt.name, prompt)

assert ai_config_runtime.get_latest_output("GreetingPrompt") == original_result

test_result1 = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="test output 1",
metadata={
"raw_response": {"role": "assistant", "content": "test output 1"}
},
)
test_result2 = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="test output 2",
metadata={
"raw_response": {"role": "assistant", "content": "test output 2"}
},
)
ai_config_runtime.add_outputs("GreetingPrompt", [test_result1, test_result2])

assert ai_config_runtime.get_latest_output("GreetingPrompt") == test_result2
assert prompt.outputs == [original_result, test_result1, test_result2]

def test_add_outputs_existing_prompt_with_overwrite(ai_config_runtime: AIConfigRuntime):
"""Test adding outputs to an existing prompt with overwriting."""
original_result = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="original result",
metadata={
"raw_response": {"role": "assistant", "content": "original result"}
},
)
prompt = Prompt(
name="GreetingPrompt",
input="Hello, how are you?",
metadata=PromptMetadata(model="fakemodel"),
outputs=[original_result],
)
ai_config_runtime.add_prompt(prompt.name, prompt)

assert ai_config_runtime.get_latest_output("GreetingPrompt") == original_result

test_result1 = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="test output 1",
metadata={
"raw_response": {"role": "assistant", "content": "test output 1"}
},
)
test_result2 = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="test output 2",
metadata={
"raw_response": {"role": "assistant", "content": "test output 2"}
},
)
ai_config_runtime.add_outputs("GreetingPrompt", [test_result1, test_result2], True)

assert ai_config_runtime.get_latest_output("GreetingPrompt") == test_result2
assert prompt.outputs == [test_result1, test_result2]

def test_add_undefined_outputs_to_prompt(ai_config_runtime: AIConfigRuntime):
"""Test for adding undefined outputs to an existing prompt with/without overwriting. Should result in an error."""
prompt = Prompt(
name="GreetingPrompt",
input="Hello, how are you?",
metadata=PromptMetadata(model="fakemodel"),
)
ai_config_runtime.add_prompt(prompt.name, prompt)
assert ai_config_runtime.get_latest_output("GreetingPrompt") == None
# Case 1: No outputs, overwrite param not defined
with pytest.raises(
ValueError,
match=r"Cannot add outputs. No outputs provided for prompt 'GreetingPrompt'.",
):
ai_config_runtime.add_outputs("GreetingPrompt", [])
# Case 2: No outputs, overwrite param set to True
with pytest.raises(
ValueError,
match=r"Cannot add outputs. No outputs provided for prompt 'GreetingPrompt'.",
):
ai_config_runtime.add_outputs("GreetingPrompt", [], True)

def test_add_output_existing_prompt_overwrite(ai_config_runtime: AIConfigRuntime):
"""Test adding an output to an existing prompt with overwriting."""
Expand Down

0 comments on commit e47ce1f

Please sign in to comment.