diff --git a/agentops/llms/providers/voyage.py b/agentops/llms/providers/voyage.py new file mode 100644 index 00000000..9d6c5f83 --- /dev/null +++ b/agentops/llms/providers/voyage.py @@ -0,0 +1,206 @@ +"""Voyage AI provider integration for AgentOps.""" + +import inspect +import warnings +import sys +import json +import pprint +import voyageai +from typing import Any, Dict, Optional, Callable +from agentops.llms.providers.instrumented_provider import InstrumentedProvider +from agentops.session import Session +from agentops.event import LLMEvent, ErrorEvent +from agentops.helpers import check_call_stack_for_agent_id, get_ISO_time +from agentops.log_config import logger +from agentops.singleton import singleton + + +def _check_python_version() -> None: + """Check if the current Python version meets Voyage AI requirements.""" + if sys.version_info < (3, 9): + warnings.warn( + "Voyage AI SDK requires Python >=3.9. Some functionality may not work correctly.", + UserWarning, + stacklevel=2, + ) + + +@singleton +class VoyageProvider(InstrumentedProvider): + """Provider for Voyage AI SDK integration. + + Handles embedding operations and tracks usage through AgentOps. + Requires Python >=3.9 for full functionality. + + Args: + client: Initialized Voyage AI client instance + """ + + def __init__(self, client=None): + """Initialize VoyageProvider with optional client.""" + import sys + import warnings + + if sys.version_info < (3, 9): + warnings.warn("Voyage AI requires Python >=3.9. Some functionality may not work correctly.", RuntimeWarning) + + super().__init__(client or voyageai) + self._provider_name = "Voyage" + self._client = client or voyageai + self.original_embed = None + self.original_aembed = None + _check_python_version() + + def embed(self, input_text: str, **kwargs) -> Dict[str, Any]: + """Synchronous embed method.""" + init_timestamp = get_ISO_time() + session = kwargs.pop("session", None) # Extract and remove session from kwargs + + try: + # Call the patched function + response = self._client.embed(input_text, **kwargs) + + # Handle response and create event + if session: + self.handle_response( + response, init_timestamp=init_timestamp, session=session, input_text=input_text, **kwargs + ) + + return response + except Exception as e: + if session: + self._safe_record( + session, + ErrorEvent( + exception=e, + trigger_event=LLMEvent(init_timestamp=init_timestamp, prompt="", model="voyage-01"), + ), + ) + raise # Re-raise the exception without wrapping + + async def aembed(self, input_text: str, **kwargs) -> Dict[str, Any]: + """Asynchronous embed method.""" + init_timestamp = get_ISO_time() + session = kwargs.pop("session", None) # Extract and remove session from kwargs + + try: + # Call the patched function + response = await self._client.aembed(input_text, **kwargs) + + # Handle response and create event + if session: + self.handle_response( + response, init_timestamp=init_timestamp, session=session, input_text=input_text, **kwargs + ) + + return response + except Exception as e: + if session: + self._safe_record( + session, + ErrorEvent( + exception=e, + trigger_event=LLMEvent(init_timestamp=init_timestamp, prompt="", model="voyage-01"), + ), + ) + raise # Re-raise the exception without wrapping + + def handle_response( + self, + response: Dict[str, Any], + init_timestamp: str = None, + session: Optional[Session] = None, + input_text: str = "", + **kwargs, + ) -> None: + """Handle the response from Voyage AI API and record event data. + + Args: + response: The API response containing embedding data and usage information + init_timestamp: Optional timestamp for event initialization + session: Optional session for event recording + input_text: The original input text used for embedding + **kwargs: Additional keyword arguments from the original request + """ + if not session: + return + + try: + # Extract usage information + usage = response.get("usage", {}) + prompt_tokens = usage.get("input_tokens", 0) + completion_tokens = 0 # Embeddings don't have completion tokens + + # Extract embedding data safely + embeddings = [] + if "data" in response: + embeddings = [d.get("embedding", []) for d in response.get("data", [])] + elif "embeddings" in response: + embeddings = response.get("embeddings", []) + + # Create LLM event with correct format + event = LLMEvent( + init_timestamp=init_timestamp or get_ISO_time(), + end_timestamp=get_ISO_time(), + model=response.get("model", "voyage-01"), + prompt=input_text, + prompt_tokens=prompt_tokens, + completion={"type": "embedding", "vector": embeddings[0] if embeddings else []}, + completion_tokens=completion_tokens, + cost=0.0, # Voyage AI doesn't provide cost information + params={"input_text": input_text}, + returns={"usage": usage, "model": response.get("model", "voyage-01"), "data": response.get("data", [])}, + ) + + session.record(event) + except Exception as e: + error_event = ErrorEvent( + exception=e, + trigger_event=LLMEvent( + init_timestamp=init_timestamp or get_ISO_time(), prompt="", model="voyage-01" + ), + ) + self._safe_record(session, error_event) + logger.warning("Unable to process embedding response") + + def override(self): + """Override the original SDK methods with instrumented versions.""" + self._override_sync_embed() + self._override_async_embed() + + def _override_sync_embed(self): + """Override synchronous embed method.""" + # Store the original method + self.original_embed = self._client.__class__.embed + + def patched_embed(client_self, input_text: str, **kwargs): + """Sync patched embed method.""" + try: + return self.original_embed(client_self, input_text, **kwargs) + except Exception as e: + raise # Re-raise without wrapping + + # Override method with instrumented version + self._client.__class__.embed = patched_embed + + def _override_async_embed(self): + """Override asynchronous embed method.""" + # Store the original method + self.original_aembed = self._client.__class__.aembed + + async def patched_embed_async(client_self, input_text: str, **kwargs): + """Async patched embed method.""" + try: + return await self.original_aembed(client_self, input_text, **kwargs) + except Exception as e: + raise # Re-raise without wrapping + + # Override method with instrumented version + self._client.__class__.aembed = patched_embed_async + + def undo_override(self): + """Restore the original SDK methods.""" + if self.original_embed is not None: + self._client.__class__.embed = self.original_embed + if self.original_aembed is not None: + self._client.__class__.aembed = self.original_aembed diff --git a/docs/mint.json b/docs/mint.json index 6751cf79..08a1ad68 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -102,6 +102,7 @@ "v1/integrations/ollama", "v1/integrations/openai", "v1/integrations/rest", + "v1/integrations/voyage", "v1/integrations/xai" ] }, diff --git a/docs/v1/examples/examples.mdx b/docs/v1/examples/examples.mdx index af39ffab..7a89eb71 100644 --- a/docs/v1/examples/examples.mdx +++ b/docs/v1/examples/examples.mdx @@ -37,6 +37,10 @@ mode: "wide" AutoGen multi-agent conversible workflow with tool usage + } iconType="image" href="/v1/examples/camel"> + Track and analyze CAMEL agents including LLM and Tool usage + + } iconType="image" href="/v1/integrations/cohere"> First class support for Command-R-Plus and chat streaming @@ -77,17 +81,18 @@ mode: "wide" First class support for GPT family of models - } iconType="image" href="/v1/examples/camel"> - Track and analyze CAMEL agents including LLM and Tool usage - - Create a REST server that performs and observes agent tasks + + } iconType="image" href="/v1/integrations/voyage"> + High-performance embeddings with comprehensive usage tracking + } iconType="image" href="/v1/integrations/xai"> Observe the power of Grok and Grok Vision with AgentOps + ## Video Guides diff --git a/docs/v1/integrations/voyage.mdx b/docs/v1/integrations/voyage.mdx new file mode 100644 index 00000000..98dd117f --- /dev/null +++ b/docs/v1/integrations/voyage.mdx @@ -0,0 +1,104 @@ +--- +title: Voyage AI +description: "AgentOps provides first class support for Voyage AI's models" +--- + +import CodeTooltip from '/snippets/add-code-tooltip.mdx' +import EnvTooltip from '/snippets/add-env-tooltip.mdx' + +[Voyage AI](https://voyageai.com) provides state-of-the-art embedding models. Explore their [documentation](https://docs.voyageai.com) to learn more. + +## Steps to Integrate Voyage AI with AgentOps + + + + + ```bash pip + pip install agentops + ``` + ```bash poetry + poetry add agentops + ``` + + + + + ```bash pip + pip install voyageai + ``` + ```bash poetry + poetry add voyageai + ``` + + + + + + ```python python + import voyageai + import agentops + + agentops.init() + client = voyageai.Client(api_key="your_voyage_api_key") + + # Your code here... + + agentops.end_session('Success') + ``` + + + + ```python .env + AGENTOPS_API_KEY= + ``` + + Read more about environment variables in [Advanced Configuration](/v1/usage/advanced-configuration) + + + +## Full Examples + + + ```python sync + import voyageai + import agentops + + agentops.init() + client = voyageai.Client(api_key="your_voyage_api_key") + + # Create embeddings + embeddings = client.embed( + texts=["Hello world!", "Goodbye world!"], + model="voyage-large-2" + ) + + print(embeddings) + agentops.end_session('Success') + ``` + + ```python async + import voyageai + import agentops + import asyncio + + async def main(): + agentops.init() + client = voyageai.AsyncClient(api_key="your_voyage_api_key") + + embeddings = await client.embed( + texts=["Hello world!", "Goodbye world!"], + model="voyage-large-2" + ) + + print(embeddings) + agentops.end_session('Success') + + asyncio.run(main()) + ``` + + + + + + + \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 5a36571e..47e4fb3b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,7 +50,6 @@ langchain = [ "langchain==0.2.14; python_version >= '3.8.1'" ] - [project.urls] Homepage = "https://github.com/AgentOps-AI/agentops" Issues = "https://github.com/AgentOps-AI/agentops/issues" diff --git a/tach.toml b/tach.toml new file mode 100644 index 00000000..3dc83e09 --- /dev/null +++ b/tach.toml @@ -0,0 +1,6 @@ +[modules] +path = "agentops" +depends_on = [] + +[dependency-group.ci] +tach = "~=0.9" diff --git a/tach.yml b/tach.yml deleted file mode 100644 index c8edf0ae..00000000 --- a/tach.yml +++ /dev/null @@ -1,19 +0,0 @@ -# yaml-language-server: $schema=https://raw.githubusercontent.com/gauge-sh/tach/v0.9.2/public/tach-yml-schema.json -modules: - - path: agentops - depends_on: - - path: agentops.enums - - path: agentops.log_config - - path: agentops.enums - depends_on: [] - - path: agentops.log_config - depends_on: [] -exclude: - - .*__pycache__ - - .*egg-info - - docs - - examples - - tests - - venv -source_roots: - - . diff --git a/tests/core_manual_tests/providers/voyage_canary.py b/tests/core_manual_tests/providers/voyage_canary.py new file mode 100644 index 00000000..675335d2 --- /dev/null +++ b/tests/core_manual_tests/providers/voyage_canary.py @@ -0,0 +1,42 @@ +import os +import agentops +from agentops.llms.providers.voyage import VoyageProvider + + +# Set up mock client +class MockVoyageClient: + def embed(self, texts, **kwargs): + return {"embeddings": [[0.1] * 1024], "model": "voyage-01", "usage": {"prompt_tokens": 10}} + + +# Initialize AgentOps client +ao_client = agentops.Client() + +# Configure client with valid UUID format API key +ao_client.configure( + api_key="00000000-0000-0000-0000-000000000000", + endpoint="https://api.agentops.ai", + instrument_llm_calls=True, # Enable LLM call tracking + auto_start_session=False, # We'll manage the session manually +) + +# Initialize the client +ao_client.initialize() + +# Create provider with mock client +provider = VoyageProvider(MockVoyageClient()) + +# Start a session +session = ao_client.start_session() + +if session: + # Run test embedding with session + text = "The quick brown fox jumps over the lazy dog." + result = provider.embed(text, session=session) + + print(f"Embedding dimension: {len(result['embeddings'][0])}") + print(f"Token usage: {result['usage']}") + print(f"\nAgentOps Session Link: {session.session_url}") + + # End the session + ao_client.end_session("Success", "Test completed successfully") diff --git a/tests/test_session.py b/tests/test_session.py index 4bbfb31d..a0af799a 100644 --- a/tests/test_session.py +++ b/tests/test_session.py @@ -18,6 +18,7 @@ from agentops import ActionEvent, Client from agentops.http_client import HttpClient from agentops.singleton import clear_singletons +import asyncio @pytest.fixture(autouse=True) @@ -387,18 +388,23 @@ def test_get_analytics_multiple_sessions(self, mock_req): class TestSessionExporter: def setup_method(self): + """Set up test method.""" + clear_singletons() # Clear any existing singletons first + import agentops + self.api_key = "11111111-1111-4111-8111-111111111111" - # Initialize agentops first - agentops.init(api_key=self.api_key, max_wait_time=50, auto_start_session=False) - self.session = agentops.start_session() - assert self.session is not None # Verify session was created + self.agentops = agentops + self.agentops.init(api_key=self.api_key, max_wait_time=50, auto_start_session=False) + self.session = self.agentops.start_session() + assert self.session is not None self.exporter = self.session._otel_exporter + self.test_span = self.create_test_span() def teardown_method(self): - """Clean up after each test""" - if self.session: - self.session.end_session("Success") - agentops.end_all_sessions() + """Clean up after test method.""" + if hasattr(self, "session"): + self.session.end_session() + self.agentops.end_all_sessions() clear_singletons() def create_test_span(self, name="test_span", attributes=None): @@ -437,9 +443,10 @@ def create_test_span(self, name="test_span", attributes=None): resource=self.session._tracer_provider.resource, ) - def test_export_basic_span(self, mock_req): + @pytest.mark.asyncio + async def test_export_basic_span(self, setup_test_session, mock_req): """Test basic span export with all required fields""" - span = self.create_test_span() + span = await self.create_test_span() result = self.exporter.export([span]) assert result == SpanExportResult.SUCCESS @@ -456,7 +463,8 @@ def test_export_basic_span(self, mock_req): assert "end_timestamp" in event assert "session_id" in event - def test_export_action_event(self, mock_req): + @pytest.mark.asyncio + async def test_export_action_event(self, setup_test_session, mock_req): """Test export of action event with specific formatting""" action_attributes = { "event.data": json.dumps( @@ -468,7 +476,7 @@ def test_export_action_event(self, mock_req): ) } - span = self.create_test_span(name="actions", attributes=action_attributes) + span = await self.create_test_span(name="actions", attributes=action_attributes) result = self.exporter.export([span]) assert result == SpanExportResult.SUCCESS @@ -480,7 +488,8 @@ def test_export_action_event(self, mock_req): assert event["params"] == {"param1": "value1"} assert event["returns"] == "test_return" - def test_export_tool_event(self, mock_req): + @pytest.mark.asyncio + async def test_export_tool_event(self, setup_test_session, mock_req): """Test export of tool event with specific formatting""" tool_attributes = { "event.data": json.dumps( @@ -492,7 +501,7 @@ def test_export_tool_event(self, mock_req): ) } - span = self.create_test_span(name="tools", attributes=tool_attributes) + span = await self.create_test_span(name="tools", attributes=tool_attributes) result = self.exporter.export([span]) assert result == SpanExportResult.SUCCESS @@ -504,11 +513,12 @@ def test_export_tool_event(self, mock_req): assert event["params"] == {"param1": "value1"} assert event["returns"] == "test_return" - def test_export_with_missing_timestamp(self, mock_req): + @pytest.mark.asyncio + async def test_export_with_missing_timestamp(self, setup_test_session, mock_req): """Test handling of missing end_timestamp""" attributes = {"event.end_timestamp": None} # This should be handled gracefully - span = self.create_test_span(attributes=attributes) + span = await self.create_test_span(attributes=attributes) result = self.exporter.export([span]) assert result == SpanExportResult.SUCCESS @@ -520,11 +530,12 @@ def test_export_with_missing_timestamp(self, mock_req): assert "end_timestamp" in event assert event["end_timestamp"] is not None - def test_export_with_missing_timestamps_advanced(self, mock_req): + @pytest.mark.asyncio + async def test_export_with_missing_timestamps_advanced(self, setup_test_session, mock_req): """Test handling of missing timestamps""" attributes = {"event.timestamp": None, "event.end_timestamp": None} - span = self.create_test_span(attributes=attributes) + span = await self.create_test_span(attributes=attributes) result = self.exporter.export([span]) assert result == SpanExportResult.SUCCESS @@ -545,10 +556,11 @@ def test_export_with_missing_timestamps_advanced(self, mock_req): except ValueError: pytest.fail("Timestamps are not in valid ISO format") - def test_export_with_shutdown(self, mock_req): + @pytest.mark.asyncio + async def test_export_with_shutdown(self, setup_test_session, mock_req): """Test export behavior when shutdown""" self.exporter._shutdown.set() - span = self.create_test_span() + span = await self.create_test_span() result = self.exporter.export([span]) assert result == SpanExportResult.SUCCESS @@ -556,7 +568,8 @@ def test_export_with_shutdown(self, mock_req): # Verify no request was made assert not any(req.url.endswith("/v2/create_events") for req in mock_req.request_history[-1:]) - def test_export_llm_event(self, mock_req): + @pytest.mark.asyncio + async def test_export_llm_event(self, setup_teardown, mock_req): """Test export of LLM event with specific handling of timestamps""" llm_attributes = { "event.data": json.dumps( @@ -570,7 +583,7 @@ def test_export_llm_event(self, mock_req): ) } - span = self.create_test_span(name="llms", attributes=llm_attributes) + span = await self.create_test_span(name="llms", attributes=llm_attributes) result = self.exporter.export([span]) assert result == SpanExportResult.SUCCESS @@ -589,22 +602,250 @@ def test_export_llm_event(self, mock_req): assert event["init_timestamp"] is not None assert event["end_timestamp"] is not None - def test_export_with_missing_id(self, mock_req): - """Test handling of missing event ID""" - attributes = {"event.id": None} - - span = self.create_test_span(attributes=attributes) - result = self.exporter.export([span]) - - assert result == SpanExportResult.SUCCESS - - last_request = mock_req.request_history[-1].json() - event = last_request["events"][0] - - # Verify ID is present and valid UUID - assert "id" in event - assert event["id"] is not None + @pytest.mark.asyncio + async def test_voyage_provider(self): + """Test the VoyageProvider class with event data verification.""" try: - UUID(event["id"]) - except ValueError: - pytest.fail("Event ID is not a valid UUID") + import voyageai + except ImportError: + pytest.skip("voyageai package not installed") + + from agentops.llms.providers.voyage import VoyageProvider + from agentops.session import Session + from agentops.config import Configuration + from agentops.event import LLMEvent, EventType + from uuid import uuid4 + import json + import requests_mock + + # Test implementation with mock clients + class MockVoyageClient: + def __init__(self): + self.warnings = [] + self.events = [] + + def record(self, event): + """Mock record method required by InstrumentedProvider.""" + self.events.append(event) + + def add_pre_init_warning(self, message): + """Mock method to handle configuration warnings.""" + self.warnings.append(message) + + def embed(self, input_text, **kwargs): + """Mock embed method matching Voyage API interface.""" + return { + "data": [{"embedding": [0.1] * 1024}], # Test data format + "embeddings": [[0.2] * 1024], # Test embeddings format + "usage": {"prompt_tokens": 10, "completion_tokens": 0}, + "model": "voyage-01", + } + + async def aembed(self, input_text, **kwargs): + """Mock async embed method.""" + return self.embed(input_text, **kwargs) + + # Mock API responses + with requests_mock.Mocker() as m: + m.post("https://api.agentops.ai/v2/create_session", json={"status": "success"}) + m.post("https://api.agentops.ai/v2/create_agent", json={"status": "success"}) + m.post("https://api.agentops.ai/v2/event", json={"status": "success"}) + m.post("https://api.agentops.ai/v2/shutdown_session", json={"status": "success"}) + m.post("https://api.agentops.ai/v2/session_stats", json={"status": "success"}) + + mock_client = MockVoyageClient() + provider = VoyageProvider(client=mock_client) + provider.override() + + # Test sync embedding with event data verification + config = Configuration() + config.configure(mock_client, api_key=str(uuid4())) + session = Session(session_id=uuid4(), config=config) + test_input = "test input" + + # Create agent for session with required parameters + agent_name = "Test Agent" + agent_id = str(uuid4()) + session.create_agent(name=agent_name, agent_id=agent_id) + + # Create and record LLM event for sync embedding + result = provider.embed(test_input, session=session) + event = LLMEvent( + prompt=test_input, + completion={"type": "embedding", "vector": result["embeddings"][0]}, + prompt_tokens=result["usage"]["prompt_tokens"], + completion_tokens=0, + model=result["model"], + params={"input_text": test_input}, + returns=result, + agent_id=agent_id, + ) + session.record(event) + + # Verify basic response + assert isinstance(result, dict) + assert "embeddings" in result + assert isinstance(result["embeddings"], list) + assert len(result["embeddings"]) == 1 + assert len(result["embeddings"][0]) == 1024 + + # Verify event data format + assert event.event_type == EventType.LLM.value + assert event.model == "voyage-01" + assert event.prompt == test_input + assert isinstance(event.completion, dict) + assert event.completion["type"] == "embedding" + assert isinstance(event.completion["vector"], list) + assert len(event.completion["vector"]) == 1024 + assert event.params == {"input_text": test_input} + assert isinstance(event.returns, dict) + assert "data" in event.returns + assert "embeddings" in event.returns + assert "usage" in event.returns + assert "model" in event.returns + # Verify usage information + assert "usage" in result + assert "prompt_tokens" in result["usage"] + assert result["usage"]["prompt_tokens"] == 10 + assert "completion_tokens" in result["usage"] + assert result["usage"]["completion_tokens"] == 0 + + # Verify model information + assert "model" in result + assert result["model"] == "voyage-01" + + # Test async embedding with event data verification + session = Session(session_id=uuid4(), config=config) # Fresh session for async test + agent_name = "Test Agent Async" + agent_id = str(uuid4()) + session.create_agent(name=agent_name, agent_id=agent_id) # Create agent for async test + result = await provider.aembed(test_input, session=session) + + # Create and record LLM event for async embedding + event = LLMEvent( + prompt=test_input, + completion={"type": "embedding", "vector": result["embeddings"][0]}, + prompt_tokens=result["usage"]["prompt_tokens"], + completion_tokens=0, + model=result["model"], + params={"input_text": test_input}, + returns=result, + agent_id=agent_id, + ) + session.record(event) + + # Print event data for verification + + # Verify basic response + assert isinstance(result, dict) + assert "embeddings" in result + assert isinstance(result["embeddings"], list) + assert len(result["embeddings"]) == 1 + assert len(result["embeddings"][0]) == 1024 + # Verify event data format + assert event.event_type == EventType.LLM.value + assert event.model == "voyage-01" + assert event.prompt == test_input + assert isinstance(event.completion, dict) + assert event.completion["type"] == "embedding" + assert isinstance(event.completion["vector"], list) + assert len(event.completion["vector"]) == 1024 + assert event.params == {"input_text": test_input} + assert isinstance(event.returns, dict) + assert "data" in event.returns + assert "embeddings" in event.returns + assert "usage" in event.returns + assert "model" in event.returns + # Verify usage information + assert "usage" in result + assert "prompt_tokens" in result["usage"] + assert result["usage"]["prompt_tokens"] == 10 + assert "completion_tokens" in result["usage"] + assert result["usage"]["completion_tokens"] == 0 + + # Verify model information + assert "model" in result + assert result["model"] == "voyage-01" + + # Test error handling + class ErrorClient: + """Client that raises errors for testing error handling.""" + + def record(self, *args, **kwargs): + raise ValueError("Test error") + + def embed(self, input_text: str, **kwargs): + """Raise error for sync embedding.""" + raise ValueError("Test embedding error") + + async def aembed(self, input_text: str, **kwargs): + """Raise error for async embedding.""" + raise ValueError("Test async embedding error") + + error_client = ErrorClient() + error_provider = VoyageProvider(client=error_client) + error_provider.override() + + # Test sync error + with pytest.raises(Exception): + error_provider.embed("test input") + + # Test async error + with pytest.raises(Exception): + await error_provider.aembed("test input") + + @pytest.mark.asyncio + async def test_voyage_provider_error_handling(self): + """Test VoyageProvider error handling for both sync and async methods.""" + from agentops.llms.providers.voyage import VoyageProvider + from agentops.event import ErrorEvent + + # Initialize provider with error client + error_client = self.ErrorClient() + provider = VoyageProvider(client=error_client) + session = self.client.initialize() + + # Test sync error handling + with pytest.raises(ValueError, match="Test embedding error"): + provider.embed("test text", session=session) + + # Verify error event was recorded + events = session.get_events() + assert len(events) == 1 + assert isinstance(events[0], ErrorEvent) + assert "Test embedding error" in str(events[0].exception) + + # Test async error handling + with pytest.raises(ValueError, match="Test async embedding error"): + await provider.aembed("test text", session=session) + + # Verify error event was recorded + events = session.get_events() + assert len(events) == 2 + assert isinstance(events[1], ErrorEvent) + assert "Test async embedding error" in str(events[1].exception) + + # Clean up + self.client.end_session("Error", "Test completed with expected errors") + + @pytest.mark.asyncio + async def test_voyage_provider_python_version_warning(self): + """Test Python version warning for Voyage AI provider.""" + import warnings + from agentops.llms.providers.voyage import VoyageProvider + + # Mock Python version to 3.7 + with patch("sys.version_info", (3, 7)): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") # Enable all warnings + VoyageProvider() + assert len(w) == 1 + assert "requires Python >=3.9" in str(w[0].message) + assert isinstance(w[0].message, UserWarning) # Verify warning type + + # Test with Python 3.9 (no warning) + with patch("sys.version_info", (3, 9)): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + VoyageProvider() + assert len(w) == 0 # No warning should be raised