Skip to content

Commit

Permalink
fix: feedbacks and test cases
Browse files Browse the repository at this point in the history
  • Loading branch information
noble-varghese committed Dec 1, 2023
1 parent 9755993 commit 4330024
Show file tree
Hide file tree
Showing 4 changed files with 423 additions and 2 deletions.
16 changes: 14 additions & 2 deletions portkey_ai/api_resources/apis/feedback.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Optional, Dict, Any
from typing import Optional, Dict, Any, List
from portkey_ai.api_resources.apis.api_resource import APIResource
from portkey_ai.api_resources.base_client import APIClient
from portkey_ai.api_resources.streaming import Stream
Expand All @@ -16,7 +16,7 @@ def create(
value: Optional[int] = None,
weight: Optional[float] = None,
metadata: Optional[Dict[str, Any]] = None
) -> None:
) -> GenericResponse:
body = dict(trace_id=trace_id, value=value, weight=weight, metadata=metadata)
return self._post(
PortkeyApiPaths.FEEDBACK_API,
Expand All @@ -27,3 +27,15 @@ def create(
stream=False,
headers={},
)

def bulk_create(self, *, feedbacks: List[Dict[str, Any]]) -> GenericResponse:
body = feedbacks
return self._post(
PortkeyApiPaths.FEEDBACK_API,
body=body,
params=None,
cast_to=GenericResponse,
stream_cls=Stream[GenericResponse],
stream=False,
headers={},
)
1 change: 1 addition & 0 deletions portkey_ai/api_resources/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def __init__(
self.generations = apis.Generations(self)
self.prompt = apis.Prompt(self)
self.embeddings = apis.Embeddings(self)
self.feedback = apis.Feedback(self)

def copy(
self,
Expand Down
217 changes: 217 additions & 0 deletions tests/test_chat_complete.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,3 +235,220 @@ def test_method_single_provider(self, client: Any, config: Dict) -> None:
)

print(completion.choices)


class TestChatCompletionsStreaming:
client = Portkey
parametrize = pytest.mark.parametrize("client", [client], ids=["strict"])
models = read_json_file("./tests/models.json")

def get_metadata(self):
return {
"case": "testing",
"function": inspect.currentframe().f_back.f_code.co_name,
"random_id": str(uuid4()),
}

# --------------------------
# Test-1
t1_params = []
t = []
for k, v in models.items():
for i in v["chat"]:
t.append((client, k, os.environ.get(v["env_variable"]), i))

t1_params.extend(t)

@pytest.mark.parametrize("client, provider, auth, model", t1_params)
def test_method_single_with_vk_and_provider(
self, client: Any, provider: str, auth: str, model
) -> None:
portkey = client(
base_url=base_url,
api_key=api_key,
provider=f"{provider}",
Authorization=f"Bearer {auth}",
trace_id=str(uuid4()),
metadata=self.get_metadata(),
)

portkey.chat.completions.create(
messages=[{"role": "user", "content": "Say this is a test"}],
model=model,
max_tokens=245,
stream=True,
)

# --------------------------
# Test -2
t2_params = []
for i in get_configs(f"{CONFIGS_PATH}/single_with_basic_config"):
t2_params.append((client, i))

@pytest.mark.parametrize("client, config", t2_params)
def test_method_single_with_basic_config(self, client: Any, config: Dict) -> None:
"""
Test the creation of a chat completion with a virtual key using the specified
Portkey client.
This test method performs the following steps:
1. Creates a Portkey client instance with the provided base URL, API key, trace
ID, and configuration loaded from the 'single_provider_with_virtualkey.json'
file.
2. Calls the Portkey client's chat.completions.create method to generate a
completion.
3. Prints the choices from the completion.
Args:
client (Portkey): The Portkey client instance used for the test.
Raises:
Any exceptions raised during the test.
Note:
- Ensure that the 'single_provider_with_virtualkey.json' file exists and
contains valid configuration data.
- Modify the 'model' parameter and the 'messages' content as needed for your
use case.
"""
portkey = client(
base_url=base_url,
api_key=api_key,
trace_id=str(uuid4()),
metadata=self.get_metadata(),
config=config,
)

portkey.chat.completions.create(
messages=[{"role": "user", "content": "Say this is a test"}],
model="gpt-3.5-turbo",
stream=True,
)

# print(completion.choices)
# assert("True", "True")

# assert_matches_type(TextCompletion, completion, path=["response"])

# --------------------------
# Test-3
t3_params = []
for i in get_configs(f"{CONFIGS_PATH}/single_provider_with_vk_retry_cache"):
t3_params.append((client, i))

@pytest.mark.parametrize("client, config", t3_params)
def test_method_single_provider_with_vk_retry_cache(
self, client: Any, config: Dict
) -> None:
# 1. Make a new cache the cache
# 2. Make a cache hit and see if the response contains the data.
random_id = str(uuid4())
metadata = self.get_metadata()
portkey = client(
base_url=base_url,
api_key=api_key,
trace_id=random_id,
virtual_key=virtual_api_key,
metadata=metadata,
config=config,
)

portkey.chat.completions.create(
messages=[{"role": "user", "content": "Say this is a test"}],
model="gpt-3.5-turbo",
stream=True,
)
# Sleeping for the cache to reflect across the workers. The cache has an
# eventual consistency and not immediate consistency.
sleep(20)
portkey_2 = client(
base_url=base_url,
api_key=api_key,
trace_id=random_id,
virtual_key=virtual_api_key,
metadata=metadata,
config=config,
)

portkey_2.chat.completions.create(
messages=[{"role": "user", "content": "Say this is a test"}],
model="gpt-3.5-turbo",
stream=True,
)

# --------------------------
# Test-4
t4_params = []
for i in get_configs(f"{CONFIGS_PATH}/loadbalance_with_two_apikeys"):
t4_params.append((client, i))

@pytest.mark.parametrize("client, config", t4_params)
def test_method_loadbalance_with_two_apikeys(
self, client: Any, config: Dict
) -> None:
portkey = client(
base_url=base_url,
api_key=api_key,
# virtual_key=virtual_api_key,
trace_id=str(uuid4()),
metadata=self.get_metadata(),
config=config,
)

completion = portkey.chat.completions.create(
messages=[{"role": "user", "content": "Say this is a test"}],
max_tokens=245,
stream=True,
)

print(completion)

# --------------------------
# Test-5
t5_params = []
for i in get_configs(f"{CONFIGS_PATH}/loadbalance_and_fallback"):
t5_params.append((client, i))

@pytest.mark.parametrize("client, config", t5_params)
def test_method_loadbalance_and_fallback(self, client: Any, config: Dict) -> None:
portkey = client(
base_url=base_url,
api_key=api_key,
trace_id=str(uuid4()),
config=config,
)

completion = portkey.chat.completions.create(
messages=[
{
"role": "user",
"content": "Say this is just a loadbalance and fallback test test",
}
],
stream=True,
)

print(completion)

# --------------------------
# Test-6
t6_params = []
for i in get_configs(f"{CONFIGS_PATH}/single_provider"):
t6_params.append((client, i))

@pytest.mark.parametrize("client, config", t6_params)
def test_method_single_provider(self, client: Any, config: Dict) -> None:
portkey = client(
base_url=base_url,
api_key=api_key,
trace_id=str(uuid4()),
config=config,
)

completion = portkey.chat.completions.create(
messages=[{"role": "user", "content": "Say this is a test"}],
model="gpt-3.5-turbo",
stream=True,
)

print(completion)
Loading

0 comments on commit 4330024

Please sign in to comment.