diff --git a/python/openinference-semantic-conventions/pyproject.toml b/python/openinference-semantic-conventions/pyproject.toml
index 50b5a62ab..33dab9fce 100644
--- a/python/openinference-semantic-conventions/pyproject.toml
+++ b/python/openinference-semantic-conventions/pyproject.toml
@@ -45,6 +45,7 @@ packages = ["src/openinference"]
[tool.pytest.ini_options]
asyncio_mode = "auto"
+asyncio_default_fixture_loop_scope = "function"
testpaths = [
"tests",
]
diff --git a/python/openinference-semantic-conventions/src/openinference/semconv/trace/__init__.py b/python/openinference-semantic-conventions/src/openinference/semconv/trace/__init__.py
index 7ff95b306..1c23ba22c 100644
--- a/python/openinference-semantic-conventions/src/openinference/semconv/trace/__init__.py
+++ b/python/openinference-semantic-conventions/src/openinference/semconv/trace/__init__.py
@@ -164,6 +164,10 @@ class MessageAttributes:
The JSON string representing the arguments passed to the function
during a function call.
"""
+ MESSAGE_TOOL_CALL_ID = "message.tool_call_id"
+ """
+ The id of the tool call.
+ """
class MessageContentAttributes:
@@ -270,6 +274,10 @@ class ToolCallAttributes:
Attributes for a tool call
"""
+ TOOL_CALL_ID = "tool_call.id"
+ """
+ The id of the tool call.
+ """
TOOL_CALL_FUNCTION_NAME = "tool_call.function.name"
"""
The name of function that is being called during a tool call.
diff --git a/python/openinference-semantic-conventions/tests/openinference/semconv/test_attributes.py b/python/openinference-semantic-conventions/tests/openinference/semconv/test_attributes.py
new file mode 100644
index 000000000..638da3c4f
--- /dev/null
+++ b/python/openinference-semantic-conventions/tests/openinference/semconv/test_attributes.py
@@ -0,0 +1,209 @@
+from typing import Any, Dict, Mapping, Type
+
+from openinference.semconv.resource import ResourceAttributes
+from openinference.semconv.trace import (
+ DocumentAttributes,
+ EmbeddingAttributes,
+ ImageAttributes,
+ MessageAttributes,
+ MessageContentAttributes,
+ RerankerAttributes,
+ SpanAttributes,
+ ToolAttributes,
+ ToolCallAttributes,
+)
+
+
+class TestSpanAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(SpanAttributes)
+ assert _nested_dict(attributes) == {
+ "embedding": {
+ "embeddings": "EMBEDDING_EMBEDDINGS",
+ "model_name": "EMBEDDING_MODEL_NAME",
+ },
+ "input": {
+ "mime_type": "INPUT_MIME_TYPE",
+ "value": "INPUT_VALUE",
+ },
+ "llm": {
+ "function_call": "LLM_FUNCTION_CALL",
+ "input_messages": "LLM_INPUT_MESSAGES",
+ "invocation_parameters": "LLM_INVOCATION_PARAMETERS",
+ "model_name": "LLM_MODEL_NAME",
+ "output_messages": "LLM_OUTPUT_MESSAGES",
+ "prompt_template": {
+ "template": "LLM_PROMPT_TEMPLATE",
+ "variables": "LLM_PROMPT_TEMPLATE_VARIABLES",
+ "version": "LLM_PROMPT_TEMPLATE_VERSION",
+ },
+ "prompts": "LLM_PROMPTS",
+ "provider": "LLM_PROVIDER",
+ "system": "LLM_SYSTEM",
+ "token_count": {
+ "completion": "LLM_TOKEN_COUNT_COMPLETION",
+ "prompt": "LLM_TOKEN_COUNT_PROMPT",
+ "total": "LLM_TOKEN_COUNT_TOTAL",
+ },
+ "tools": "LLM_TOOLS",
+ },
+ "metadata": "METADATA",
+ "openinference": {
+ "span": {
+ "kind": "OPENINFERENCE_SPAN_KIND",
+ }
+ },
+ "output": {
+ "mime_type": "OUTPUT_MIME_TYPE",
+ "value": "OUTPUT_VALUE",
+ },
+ "retrieval": {
+ "documents": "RETRIEVAL_DOCUMENTS",
+ },
+ "session": {
+ "id": "SESSION_ID",
+ },
+ "tag": {
+ "tags": "TAG_TAGS",
+ },
+ "tool": {
+ "description": "TOOL_DESCRIPTION",
+ "name": "TOOL_NAME",
+ "parameters": "TOOL_PARAMETERS",
+ },
+ "user": {
+ "id": "USER_ID",
+ },
+ }
+
+
+class TestMessageAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(MessageAttributes)
+ assert _nested_dict(attributes) == {
+ "message": {
+ "content": "MESSAGE_CONTENT",
+ "contents": "MESSAGE_CONTENTS",
+ "function_call_arguments_json": "MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON",
+ "function_call_name": "MESSAGE_FUNCTION_CALL_NAME",
+ "name": "MESSAGE_NAME",
+ "role": "MESSAGE_ROLE",
+ "tool_call_id": "MESSAGE_TOOL_CALL_ID",
+ "tool_calls": "MESSAGE_TOOL_CALLS",
+ }
+ }
+
+
+class TestMessageContentAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(MessageContentAttributes)
+ assert _nested_dict(attributes) == {
+ "message_content": {
+ "image": "MESSAGE_CONTENT_IMAGE",
+ "text": "MESSAGE_CONTENT_TEXT",
+ "type": "MESSAGE_CONTENT_TYPE",
+ }
+ }
+
+
+class TestImageAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(ImageAttributes)
+ assert _nested_dict(attributes) == {
+ "image": {
+ "url": "IMAGE_URL",
+ }
+ }
+
+
+class TestDocumentAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(DocumentAttributes)
+ assert _nested_dict(attributes) == {
+ "document": {
+ "content": "DOCUMENT_CONTENT",
+ "id": "DOCUMENT_ID",
+ "metadata": "DOCUMENT_METADATA",
+ "score": "DOCUMENT_SCORE",
+ }
+ }
+
+
+class TestRerankerAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(RerankerAttributes)
+ assert _nested_dict(attributes) == {
+ "reranker": {
+ "input_documents": "RERANKER_INPUT_DOCUMENTS",
+ "model_name": "RERANKER_MODEL_NAME",
+ "output_documents": "RERANKER_OUTPUT_DOCUMENTS",
+ "query": "RERANKER_QUERY",
+ "top_k": "RERANKER_TOP_K",
+ }
+ }
+
+
+class TestEmbeddingAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(EmbeddingAttributes)
+ assert _nested_dict(attributes) == {
+ "embedding": {
+ "text": "EMBEDDING_TEXT",
+ "vector": "EMBEDDING_VECTOR",
+ }
+ }
+
+
+class TestToolCallAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(ToolCallAttributes)
+ assert _nested_dict(attributes) == {
+ "tool_call": {
+ "function": {
+ "arguments": "TOOL_CALL_FUNCTION_ARGUMENTS_JSON",
+ "name": "TOOL_CALL_FUNCTION_NAME",
+ },
+ "id": "TOOL_CALL_ID",
+ },
+ }
+
+
+class TestToolAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(ToolAttributes)
+ assert _nested_dict(attributes) == {
+ "tool": {
+ "json_schema": "TOOL_JSON_SCHEMA",
+ }
+ }
+
+
+class TestResourceAttributes:
+ def test_nesting(self) -> None:
+ attributes = _flat_dict(ResourceAttributes)
+ assert _nested_dict(attributes) == {
+ "openinference": {
+ "project": {
+ "name": "PROJECT_NAME",
+ }
+ }
+ }
+
+
+def _flat_dict(cls: Type[Any]) -> Dict[str, str]:
+ return {v: k for k, v in cls.__dict__.items() if k.isupper()}
+
+
+def _nested_dict(
+ attributes: Mapping[str, str],
+) -> Dict[str, Any]:
+ nested_attributes: Dict[str, Any] = {}
+ for name, value in attributes.items():
+ trie = nested_attributes
+ keys = name.split(".")
+ for key in keys[:-1]:
+ if key not in trie:
+ trie[key] = {}
+ trie = trie[key]
+ trie[keys[-1]] = value
+ return nested_attributes
diff --git a/python/openinference-semantic-conventions/tests/openinference/semconv/test_enums.py b/python/openinference-semantic-conventions/tests/openinference/semconv/test_enums.py
new file mode 100644
index 000000000..aa1cba5cc
--- /dev/null
+++ b/python/openinference-semantic-conventions/tests/openinference/semconv/test_enums.py
@@ -0,0 +1,54 @@
+from openinference.semconv.trace import (
+ OpenInferenceLLMProviderValues,
+ OpenInferenceLLMSystemValues,
+ OpenInferenceMimeTypeValues,
+ OpenInferenceSpanKindValues,
+)
+
+
+class TestOpenInferenceSpanKindValues:
+ def test_values(self) -> None:
+ assert {e.name: e.value for e in OpenInferenceSpanKindValues} == {
+ "AGENT": "AGENT",
+ "CHAIN": "CHAIN",
+ "EMBEDDING": "EMBEDDING",
+ "EVALUATOR": "EVALUATOR",
+ "GUARDRAIL": "GUARDRAIL",
+ "LLM": "LLM",
+ "RERANKER": "RERANKER",
+ "RETRIEVER": "RETRIEVER",
+ "TOOL": "TOOL",
+ "UNKNOWN": "UNKNOWN",
+ }
+
+
+class TestOpenInferenceMimeTypeValues:
+ def test_values(self) -> None:
+ assert {e.name: e.value for e in OpenInferenceMimeTypeValues} == {
+ "JSON": "application/json",
+ "TEXT": "text/plain",
+ }
+
+
+class TestOpenInferenceLLMSystemValues:
+ def test_values(self) -> None:
+ assert {e.name: e.value for e in OpenInferenceLLMSystemValues} == {
+ "ANTHROPIC": "anthropic",
+ "COHERE": "cohere",
+ "MISTRALAI": "mistralai",
+ "OPENAI": "openai",
+ "VERTEXAI": "vertexai",
+ }
+
+
+class TestOpenInferenceLLMProviderValues:
+ def test_values(self) -> None:
+ assert {e.name: e.value for e in OpenInferenceLLMProviderValues} == {
+ "ANTHROPIC": "anthropic",
+ "AWS": "aws",
+ "AZURE": "azure",
+ "COHERE": "cohere",
+ "GOOGLE": "google",
+ "MISTRALAI": "mistralai",
+ "OPENAI": "openai",
+ }
diff --git a/python/openinference-semantic-conventions/tests/openinference/semconv/test_version.py b/python/openinference-semantic-conventions/tests/openinference/semconv/test_version.py
deleted file mode 100644
index daba2fabb..000000000
--- a/python/openinference-semantic-conventions/tests/openinference/semconv/test_version.py
+++ /dev/null
@@ -1,9 +0,0 @@
-"""
-This is a dummy test to ensure that every package has one test.
-"""
-
-from openinference.semconv.version import __version__ as semconv_version
-
-
-def test_version() -> None:
- print(semconv_version)
diff --git a/spec/semantic_conventions.md b/spec/semantic_conventions.md
index 97c0f6600..dd712cc38 100644
--- a/spec/semantic_conventions.md
+++ b/spec/semantic_conventions.md
@@ -7,83 +7,88 @@ operations used by applications. These conventions are used to populate the `att
The following attributes are reserved and MUST be supported by all OpenInference Tracing SDKs:
-| Attribute | Type | Example | Description |
-| -------------------------------------- | --------------------------- | -------------------------------------------------------------------------- | ----------------------------------------------------------------------------- |
-| `document.content` | String | `"This is a sample document content."` | The content of a retrieved document |
-| `document.id` | String/Integer | `"1234"` or `1` | Unique identifier for a document |
-| `document.metadata` | JSON String | `"{'author': 'John Doe', 'date': '2023-09-09'}"` | Metadata associated with a document |
-| `document.score` | Float | `0.98` | Score representing the relevance of a document |
-| `embedding.embeddings` | List of objects† | `[{"embedding.vector": [...], "embedding.text": "hello"}]` | List of embedding objects including text and vector data |
-| `embedding.model_name` | String | `"BERT-base"` | Name of the embedding model used |
-| `embedding.text` | String | `"hello world"` | The text represented in the embedding |
-| `embedding.vector` | List of floats | `[0.123, 0.456, ...]` | The embedding vector consisting of a list of floats |
-| `exception.escaped` | Boolean | `true` | Indicator if the exception has escaped the span's scope |
-| `exception.message` | String | `"Null value encountered"` | Detailed message describing the exception |
-| `exception.stacktrace` | String | `"at app.main(app.java:16)"` | The stack trace of the exception |
-| `exception.type` | String | `"NullPointerException"` | The type of exception that was thrown |
-| `image.url` | String | `"https://sample-link-to-image.jpg"` | The link to the image or its base64 encoding |
-| `input.mime_type` | String | `"text/plain"` or `"application/json"` | MIME type representing the format of `input.value` |
-| `input.value` | String | `"{'query': 'What is the weather today?'}"` | The input value to an operation |
-| `llm.function_call` | JSON String | `"{function_name: 'add', args: [1, 2]}"` | Object recording details of a function call in models or APIs |
-| `llm.input_messages` | List of objects† | `[{"message.role": "user", "message.content": "hello"}]` | List of messages sent to the LLM in a chat API request |
-| `llm.invocation_parameters` | JSON string | `"{model_name: 'gpt-3', temperature: 0.7}"` | Parameters used during the invocation of an LLM or API |
-| `llm.provider` | String | `openai`, `azure` | The hosting provider of the llm, e.x. `azure` |
-| `llm.system` | String | `anthropic`, `openai` | The AI product as identified by the client or server instrumentation. |
-| `llm.model_name` | String | `"gpt-3.5-turbo"` | The name of the language model being utilized |
-| `llm.output_messages` | List of objects† | `[{"message.role": "user", "message.content": "hello"}]` | List of messages received from the LLM in a chat API request |
-| `llm.prompt_template.template` | String | `"Weather forecast for {city} on {date}"` | Template used to generate prompts as Python f-strings |
-| `llm.prompt_template.variables` | JSON String | `{ context: "", subject: "math" }` | JSON of key value pairs applied to the prompt template |
-| `llm.prompt_template.version` | String | `"v1.0"` | The version of the prompt template |
-| `llm.token_count.completion` | Integer | `15` | The number of tokens in the completion |
-| `llm.token_count.prompt` | Integer | `5` | The number of tokens in the prompt |
-| `llm.token_count.total` | Integer | `20` | Total number of tokens, including prompt and completion |
-| `llm.tools` | List of objects† | `[{"tool": {"json_schema": "{}"}, ...]` | List of tools that are advertised to the LLM to be able to call |
-| `message.content` | String | `"What's the weather today?"` | The content of a message in a chat |
-| `message.contents` | List of objects† | `[{"message_content.type": "text", "message_content.text": "Hello"}, ...]` | The message contents to the llm, it is an array of `message_content` objects. |
-| `message.function_call_arguments_json` | JSON String | `"{ 'x': 2 }"` | The arguments to the function call in JSON |
-| `message.function_call_name` | String | `"multiply"` or `"subtract"` | Function call function name |
-| `message.role` | String | `"user"` or `"system"` | Role of the entity in a message (e.g., user, system) |
-| `message.tool_calls` | List of objects† | `[{"tool_call.function.name": "get_current_weather"}]` | List of tool calls (e.g. function calls) generated by the LLM |
-| `messagecontent.type` | String | `"text"` or `"image"` | The type of the content, such as "text" or "image". |
-| `messagecontent.text` | String | `"This is a sample text"` | The text content of the message, if the type is "text". |
-| `messagecontent.image` | Image Object | `{"image.url": "https://sample-link-to-image.jpg"}` | The image content of the message, if the type is "image". |
-| `metadata` | JSON String | `"{'author': 'John Doe', 'date': '2023-09-09'}"` | Metadata associated with a span |
-| `openinference.span.kind` | String | `"CHAIN"` | The kind of span (e.g., `CHAIN`, `LLM`, `RETRIEVER`, `RERANKER`) |
-| `output.mime_type` | String | `"text/plain"` or `"application/json"` | MIME type representing the format of `output.value` |
-| `output.value` | String | `"Hello, World!"` | The output value of an operation |
-| `reranker.input_documents` | List of objects† | `[{"document.id": "1", "document.score": 0.9, "document.content": "..."}]` | List of documents as input to the reranker |
-| `reranker.model_name` | String | `"cross-encoder/ms-marco-MiniLM-L-12-v2"` | Model name of the reranker |
-| `reranker.output_documents` | List of objects† | `[{"document.id": "1", "document.score": 0.9, "document.content": "..."}]` | List of documents outputted by the reranker |
-| `reranker.query` | String | `"How to format timestamp?"` | Query parameter of the reranker |
-| `reranker.top_k` | Integer | 3 | Top K parameter of the reranker |
-| `retrieval.documents` | List of objects† | `[{"document.id": "1", "document.score": 0.9, "document.content": "..."}]` | List of retrieved documents |
-| `session.id` | String | `"26bcd3d2-cad2-443d-a23c-625e47f3324a"` | Unique identifier for a session |
-| `tag.tags` | List of strings | ["shopping", "travel"] | List of tags to give the span a category |
-| `tool.description` | String | `"An API to get weather data."` | Description of the tool's purpose and functionality |
-| `tool.json_schema` | JSON String | `"{'type': 'function', 'function': {'name': 'get_weather'}}"` | The json schema of a tool input |
-| `tool.name` | String | `"WeatherAPI"` | The name of the tool being utilized |
-| `tool.parameters` | JSON string | `"{ 'a': 'int' }"` | The parameters definition for invoking the tool |
-| `tool_call.function.arguments` | JSON string | `"{'city': 'London'}"` | The arguments for the function being invoked by a tool call |
-| `tool_call.function.name` | String | `get_current_weather` | The name of the function being invoked by a tool call |
-| `user.id` | String | `"9328ae73-7141-4f45-a044-8e06192aa465"` | Unique identifier for a user |
+| Attribute | Type | Example | Description |
+|----------------------------------------|-----------------------------|----------------------------------------------------------------------------|---------------------------------------------------------------------------------------|
+| `document.content` | String | `"This is a sample document content."` | The content of a retrieved document |
+| `document.id` | String/Integer | `"1234"` or `1` | Unique identifier for a document |
+| `document.metadata` | JSON String | `"{'author': 'John Doe', 'date': '2023-09-09'}"` | Metadata associated with a document |
+| `document.score` | Float | `0.98` | Score representing the relevance of a document |
+| `embedding.embeddings` | List of objects† | `[{"embedding.vector": [...], "embedding.text": "hello"}]` | List of embedding objects including text and vector data |
+| `embedding.model_name` | String | `"BERT-base"` | Name of the embedding model used |
+| `embedding.text` | String | `"hello world"` | The text represented in the embedding |
+| `embedding.vector` | List of floats | `[0.123, 0.456, ...]` | The embedding vector consisting of a list of floats |
+| `exception.escaped` | Boolean | `true` | Indicator if the exception has escaped the span's scope |
+| `exception.message` | String | `"Null value encountered"` | Detailed message describing the exception |
+| `exception.stacktrace` | String | `"at app.main(app.java:16)"` | The stack trace of the exception |
+| `exception.type` | String | `"NullPointerException"` | The type of exception that was thrown |
+| `image.url` | String | `"https://sample-link-to-image.jpg"` | The link to the image or its base64 encoding |
+| `input.mime_type` | String | `"text/plain"` or `"application/json"` | MIME type representing the format of `input.value` |
+| `input.value` | String | `"{'query': 'What is the weather today?'}"` | The input value to an operation |
+| `llm.function_call` | JSON String | `"{function_name: 'add', args: [1, 2]}"` | Object recording details of a function call in models or APIs |
+| `llm.input_messages` | List of objects† | `[{"message.role": "user", "message.content": "hello"}]` | List of messages sent to the LLM in a chat API request |
+| `llm.invocation_parameters` | JSON string | `"{model_name: 'gpt-3', temperature: 0.7}"` | Parameters used during the invocation of an LLM or API |
+| `llm.provider` | String | `openai`, `azure` | The hosting provider of the llm, e.x. `azure` |
+| `llm.system` | String | `anthropic`, `openai` | The AI product as identified by the client or server instrumentation. |
+| `llm.model_name` | String | `"gpt-3.5-turbo"` | The name of the language model being utilized |
+| `llm.output_messages` | List of objects† | `[{"message.role": "user", "message.content": "hello"}]` | List of messages received from the LLM in a chat API request |
+| `llm.prompt_template.template` | String | `"Weather forecast for {city} on {date}"` | Template used to generate prompts as Python f-strings |
+| `llm.prompt_template.variables` | JSON String | `{ context: "", subject: "math" }` | JSON of key value pairs applied to the prompt template |
+| `llm.prompt_template.version` | String | `"v1.0"` | The version of the prompt template |
+| `llm.token_count.completion` | Integer | `15` | The number of tokens in the completion |
+| `llm.token_count.prompt` | Integer | `5` | The number of tokens in the prompt |
+| `llm.token_count.total` | Integer | `20` | Total number of tokens, including prompt and completion |
+| `llm.tools` | List of objects† | `[{"tool": {"json_schema": "{}"}, ...]` | List of tools that are advertised to the LLM to be able to call |
+| `message.content` | String | `"What's the weather today?"` | The content of a message in a chat |
+| `message.contents` | List of objects† | `[{"message_content.type": "text", "message_content.text": "Hello"}, ...]` | The message contents to the llm, it is an array of `message_content` objects. |
+| `message.function_call_arguments_json` | JSON String | `"{ 'x': 2 }"` | The arguments to the function call in JSON |
+| `message.function_call_name` | String | `"multiply"` or `"subtract"` | Function call function name |
+| `message.tool_call_id` | String | `"call_62136355"` | Tool call result identifier corresponding to `tool_call.id` |
+| `message.role` | String | `"user"` or `"system"` | Role of the entity in a message (e.g., user, system) |
+| `message.tool_calls` | List of objects† | `[{"tool_call.function.name": "get_current_weather"}]` | List of tool calls (e.g. function calls) generated by the LLM |
+| `messagecontent.type` | String | `"text"` or `"image"` | The type of the content, such as "text" or "image". |
+| `messagecontent.text` | String | `"This is a sample text"` | The text content of the message, if the type is "text". |
+| `messagecontent.image` | Image Object | `{"image.url": "https://sample-link-to-image.jpg"}` | The image content of the message, if the type is "image". |
+| `metadata` | JSON String | `"{'author': 'John Doe', 'date': '2023-09-09'}"` | Metadata associated with a span |
+| `openinference.span.kind` | String | `"CHAIN"` | The kind of span (e.g., `CHAIN`, `LLM`, `RETRIEVER`, `RERANKER`) |
+| `output.mime_type` | String | `"text/plain"` or `"application/json"` | MIME type representing the format of `output.value` |
+| `output.value` | String | `"Hello, World!"` | The output value of an operation |
+| `reranker.input_documents` | List of objects† | `[{"document.id": "1", "document.score": 0.9, "document.content": "..."}]` | List of documents as input to the reranker |
+| `reranker.model_name` | String | `"cross-encoder/ms-marco-MiniLM-L-12-v2"` | Model name of the reranker |
+| `reranker.output_documents` | List of objects† | `[{"document.id": "1", "document.score": 0.9, "document.content": "..."}]` | List of documents outputted by the reranker |
+| `reranker.query` | String | `"How to format timestamp?"` | Query parameter of the reranker |
+| `reranker.top_k` | Integer | 3 | Top K parameter of the reranker |
+| `retrieval.documents` | List of objects† | `[{"document.id": "1", "document.score": 0.9, "document.content": "..."}]` | List of retrieved documents |
+| `session.id` | String | `"26bcd3d2-cad2-443d-a23c-625e47f3324a"` | Unique identifier for a session |
+| `tag.tags` | List of strings | ["shopping", "travel"] | List of tags to give the span a category |
+| `tool.description` | String | `"An API to get weather data."` | Description of the tool's purpose and functionality |
+| `tool.json_schema` | JSON String | `"{'type': 'function', 'function': {'name': 'get_weather'}}"` | The json schema of a tool input |
+| `tool.name` | String | `"WeatherAPI"` | The name of the tool being utilized |
+| `tool.id` | String | `"WeatherAPI"` | The identifier for the result of the tool call (corresponding to `tool_call.id`) |
+| `tool.parameters` | JSON string | `"{ 'a': 'int' }"` | The parameters definition for invoking the tool |
+| `tool_call.function.arguments` | JSON string | `"{'city': 'London'}"` | The arguments for the function being invoked by a tool call |
+| `tool_call.function.name` | String | `"get_current_weather"` | The name of the function being invoked by a tool call |
+| `tool_call.id` | string | `"call_62136355"` | The id of the a tool call (useful when there are more than one call at the same time) |
+| `user.id` | String | `"9328ae73-7141-4f45-a044-8e06192aa465"` | Unique identifier for a user |
† To get a list of objects exported as OpenTelemetry span attributes, flattening of the list is necessary as
shown in the examples below.
-`llm.system` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+`llm.system` has the following list of well-known values. If one of them applies, then the respective value MUST be
+used; otherwise, a custom value MAY be used.
| Value | Description |
-| ----------- | ----------- |
+|-------------|-------------|
| `anthropic` | Anthropic |
| `openai` | OpenAI |
| `vertexai` | Vertex AI |
| `cohere` | Cohere |
| `mistralai` | Mistral AI |
-`llm.provider` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+`llm.provider` has the following list of well-known values. If one of them applies, then the respective value MUST be
+used; otherwise, a custom value MAY be used.
| Value | Description |
-| ----------- | --------------- |
+|-------------|-----------------|
| `anthropic` | Anthropic |
| `openai` | OpenAI |
| `vertexai` | Vertex AI |
@@ -108,17 +113,17 @@ for i, obj in enumerate(messages):
```javascript
const messages = [
- { "message.role": "user", "message.content": "hello" },
- {
- "message.role": "assistant",
- "message.content": "hi",
- },
+ { "message.role": "user", "message.content": "hello" },
+ {
+ "message.role": "assistant",
+ "message.content": "hi",
+ },
];
for (const [i, obj] of messages.entries()) {
- for (const [key, value] of Object.entries(obj)) {
- span.setAttribute(`input.messages.${i}.${key}`, value);
- }
+ for (const [key, value] of Object.entries(obj)) {
+ span.setAttribute(`input.messages.${i}.${key}`, value);
+ }
}
```