diff --git a/airbyte-cdk/python/CHANGELOG.md b/airbyte-cdk/python/CHANGELOG.md index e83176b4172a..cac9e4e7ab3a 100644 --- a/airbyte-cdk/python/CHANGELOG.md +++ b/airbyte-cdk/python/CHANGELOG.md @@ -1,5 +1,8 @@ # Changelog +## 0.1.24 +Added Transform class to use for mutating record value types so they adhere to jsonschema definition. + ## 0.1.23 Added the ability to use caching for efficient synchronization of nested streams. diff --git a/airbyte-cdk/python/airbyte_cdk/sources/abstract_source.py b/airbyte-cdk/python/airbyte_cdk/sources/abstract_source.py index c8d2c47883fa..2f28b025db33 100644 --- a/airbyte-cdk/python/airbyte_cdk/sources/abstract_source.py +++ b/airbyte-cdk/python/airbyte_cdk/sources/abstract_source.py @@ -26,7 +26,8 @@ import copy from abc import ABC, abstractmethod from datetime import datetime -from typing import Any, Iterator, List, Mapping, MutableMapping, Optional, Tuple +from functools import lru_cache +from typing import Any, Dict, Iterator, List, Mapping, MutableMapping, Optional, Tuple from airbyte_cdk.logger import AirbyteLogger from airbyte_cdk.models import ( @@ -35,6 +36,7 @@ AirbyteMessage, AirbyteRecordMessage, AirbyteStateMessage, + AirbyteStream, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, Status, @@ -45,6 +47,7 @@ from airbyte_cdk.sources.streams import Stream from airbyte_cdk.sources.streams.http.http import HttpStream from airbyte_cdk.sources.utils.schema_helpers import InternalConfig, split_config +from airbyte_cdk.sources.utils.transform import TypeTransformer class AbstractSource(Source, ABC): @@ -70,6 +73,9 @@ def streams(self, config: Mapping[str, Any]) -> List[Stream]: :return: A list of the streams in this source connector. """ + # Stream name to instance map for applying output object transformation + _stream_to_instance_map: Dict[str, AirbyteStream] = {} + @property def name(self) -> str: """Source name""" @@ -101,6 +107,7 @@ def read( # TODO assert all streams exist in the connector # get the streams once in case the connector needs to make any queries to generate them stream_instances = {s.name: s for s in self.streams(config)} + self._stream_to_instance_map = stream_instances for configured_stream in catalog.streams: stream_instance = stream_instances.get(configured_stream.stream.name) if not stream_instance: @@ -227,7 +234,25 @@ def _checkpoint_state(self, stream_name, stream_state, connector_state, logger): connector_state[stream_name] = stream_state return AirbyteMessage(type=MessageType.STATE, state=AirbyteStateMessage(data=connector_state)) + @lru_cache(maxsize=None) + def _get_stream_transformer_and_schema(self, stream_name: str) -> Tuple[TypeTransformer, dict]: + """ + Lookup stream's transform object and jsonschema based on stream name. + This function would be called a lot so using caching to save on costly + get_json_schema operation. + :param stream_name name of stream from catalog. + :return tuple with stream transformer object and discover json schema. + """ + stream_instance = self._stream_to_instance_map.get(stream_name) + return stream_instance.transformer, stream_instance.get_json_schema() + def _as_airbyte_record(self, stream_name: str, data: Mapping[str, Any]): now_millis = int(datetime.now().timestamp()) * 1000 + transformer, schema = self._get_stream_transformer_and_schema(stream_name) + # Transform object fields according to config. Most likely you will + # need it to normalize values against json schema. By default no action + # taken unless configured. See + # docs/connector-development/cdk-python/schemas.md for details. + transformer.transform(data, schema) message = AirbyteRecordMessage(stream=stream_name, data=data, emitted_at=now_millis) return AirbyteMessage(type=MessageType.RECORD, record=message) diff --git a/airbyte-cdk/python/airbyte_cdk/sources/streams/core.py b/airbyte-cdk/python/airbyte_cdk/sources/streams/core.py index 491452751573..5547e28b6083 100644 --- a/airbyte-cdk/python/airbyte_cdk/sources/streams/core.py +++ b/airbyte-cdk/python/airbyte_cdk/sources/streams/core.py @@ -31,6 +31,7 @@ from airbyte_cdk.logger import AirbyteLogger from airbyte_cdk.models import AirbyteStream, SyncMode from airbyte_cdk.sources.utils.schema_helpers import ResourceSchemaLoader +from airbyte_cdk.sources.utils.transform import TransformConfig, TypeTransformer def package_name_from_class(cls: object) -> str: @@ -47,6 +48,9 @@ class Stream(ABC): # Use self.logger in subclasses to log any messages logger = AirbyteLogger() # TODO use native "logging" loggers with custom handlers + # TypeTransformer object to perform output data transformation + transformer: TypeTransformer = TypeTransformer(TransformConfig.NoTransform) + @property def name(self) -> str: """ diff --git a/airbyte-cdk/python/airbyte_cdk/sources/utils/transform.py b/airbyte-cdk/python/airbyte_cdk/sources/utils/transform.py new file mode 100644 index 000000000000..607b5b680f14 --- /dev/null +++ b/airbyte-cdk/python/airbyte_cdk/sources/utils/transform.py @@ -0,0 +1,196 @@ +# +# MIT License +# +# Copyright (c) 2020 Airbyte +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +from distutils.util import strtobool +from enum import Flag, auto +from typing import Any, Callable, Dict + +from airbyte_cdk.logger import AirbyteLogger +from jsonschema import Draft7Validator, validators + +logger = AirbyteLogger() + + +class TransformConfig(Flag): + """ + TypeTransformer class config. Configs can be combined using bitwise or operator e.g. + ``` + TransformConfig.DefaultSchemaNormalization | TransformConfig.CustomSchemaNormalization + ``` + """ + + # No action taken, default behaviour. Cannot be combined with any other options. + NoTransform = auto() + # Applies default type casting with default_convert method which converts + # values by applying simple type casting to specified jsonschema type. + DefaultSchemaNormalization = auto() + # Allow registering custom type transformation callback. Can be combined + # with DefaultSchemaNormalization. In this case default type casting would + # be applied before custom one. + CustomSchemaNormalization = auto() + + +class TypeTransformer: + """ + Class for transforming object before output. + """ + + _custom_normalizer: Callable[[Any, Dict[str, Any]], Any] = None + + def __init__(self, config: TransformConfig): + """ + Initialize TypeTransformer instance. + :param config Transform config that would be applied to object + """ + if TransformConfig.NoTransform in config and config != TransformConfig.NoTransform: + raise Exception("NoTransform option cannot be combined with other flags.") + self._config = config + all_validators = { + key: self.__get_normalizer(key, orig_validator) + for key, orig_validator in Draft7Validator.VALIDATORS.items() + # Do not validate field we do not transform for maximum performance. + if key in ["type", "array", "$ref", "properties", "items"] + } + self._normalizer = validators.create(meta_schema=Draft7Validator.META_SCHEMA, validators=all_validators) + + def registerCustomTransform(self, normalization_callback: Callable[[Any, Dict[str, Any]], Any]) -> Callable: + """ + Register custom normalization callback. + :param normalization_callback function to be used for value + normalization. Takes original value and part type schema. Should return + normalized value. See docs/connector-development/cdk-python/schemas.md + for details. + :return Same callbeck, this is usefull for using registerCustomTransform function as decorator. + """ + if TransformConfig.CustomSchemaNormalization not in self._config: + raise Exception("Please set TransformConfig.CustomSchemaNormalization config before registering custom normalizer") + self._custom_normalizer = normalization_callback + return normalization_callback + + def __normalize(self, original_item: Any, subschema: Dict[str, Any]) -> Any: + """ + Applies different transform function to object's field according to config. + :param original_item original value of field. + :param subschema part of the jsonschema containing field type/format data. + :return Final field value. + """ + if TransformConfig.DefaultSchemaNormalization in self._config: + original_item = self.default_convert(original_item, subschema) + + if self._custom_normalizer: + original_item = self._custom_normalizer(original_item, subschema) + return original_item + + @staticmethod + def default_convert(original_item: Any, subschema: Dict[str, Any]) -> Any: + """ + Default transform function that is used when TransformConfig.DefaultSchemaNormalization flag set. + :param original_item original value of field. + :param subschema part of the jsonschema containing field type/format data. + :return transformed field value. + """ + target_type = subschema.get("type") + if original_item is None and "null" in target_type: + return None + if isinstance(target_type, list): + # jsonschema type could either be a single string or array of type + # strings. In case if there is some disambigous and more than one + # type (except null) do not do any conversion and return original + # value. If type array has one type and null i.e. {"type": + # ["integer", "null"]}, convert value to specified type. + target_type = [t for t in target_type if t != "null"] + if len(target_type) != 1: + return original_item + target_type = target_type[0] + try: + if target_type == "string": + return str(original_item) + elif target_type == "number": + return float(original_item) + elif target_type == "integer": + return int(original_item) + elif target_type == "boolean": + if isinstance(original_item, str): + return strtobool(original_item) == 1 + return bool(original_item) + except ValueError: + return original_item + return original_item + + def __get_normalizer(self, schema_key: str, original_validator: Callable): + """ + Traverse through object fields using native jsonschema validator and apply normalization function. + :param schema_key related json schema key that currently being validated/normalized. + :original_validator: native jsonschema validator callback. + """ + + def normalizator(validator_instance: Callable, val: Any, instance: Any, schema: Dict[str, Any]): + """ + Jsonschema validator callable it uses for validating instance. We + override default Draft7Validator to perform value transformation + before validation take place. We do not take any action except + logging warn if object does not conform to json schema, just using + jsonschema algorithm to traverse through object fields. + Look + https://python-jsonschema.readthedocs.io/en/stable/creating/?highlight=validators.create#jsonschema.validators.create + validators parameter for detailed description. + : + """ + + def resolve(subschema): + if "$ref" in subschema: + _, resolved = validator_instance.resolver.resolve(subschema["$ref"]) + return resolved + return subschema + + if schema_key == "type" and instance is not None: + if "object" in val and isinstance(instance, dict): + for k, subschema in schema.get("properties", {}).items(): + if k in instance: + subschema = resolve(subschema) + instance[k] = self.__normalize(instance[k], subschema) + elif "array" in val and isinstance(instance, list): + subschema = schema.get("items", {}) + subschema = resolve(subschema) + for index, item in enumerate(instance): + instance[index] = self.__normalize(item, subschema) + # Running native jsonschema traverse algorithm after field normalization is done. + yield from original_validator(validator_instance, val, instance, schema) + + return normalizator + + def transform(self, record: Dict[str, Any], schema: Dict[str, Any]): + """ + Normalize and validate according to config. + :param record record instance for normalization/transformation. All modification are done by modifing existent object. + :schema object's jsonschema for normalization. + """ + if TransformConfig.NoTransform in self._config: + return + normalizer = self._normalizer(schema) + for e in normalizer.iter_errors(record): + """ + just calling normalizer.validate() would throw an exception on + first validation occurences and stop processing rest of schema. + """ + logger.warn(e.message) diff --git a/airbyte-cdk/python/setup.py b/airbyte-cdk/python/setup.py index 47b58281d845..85595c665726 100644 --- a/airbyte-cdk/python/setup.py +++ b/airbyte-cdk/python/setup.py @@ -35,7 +35,7 @@ setup( name="airbyte-cdk", - version="0.1.23", + version="0.1.24", description="A framework for writing Airbyte Connectors.", long_description=README, long_description_content_type="text/markdown", diff --git a/airbyte-cdk/python/unit_tests/sources/test_source.py b/airbyte-cdk/python/unit_tests/sources/test_source.py index 9a297c201549..d8913353dc26 100644 --- a/airbyte-cdk/python/unit_tests/sources/test_source.py +++ b/airbyte-cdk/python/unit_tests/sources/test_source.py @@ -34,6 +34,7 @@ from airbyte_cdk.sources import AbstractSource, Source from airbyte_cdk.sources.streams.core import Stream from airbyte_cdk.sources.streams.http.http import HttpStream +from airbyte_cdk.sources.utils.transform import TransformConfig, TypeTransformer class MockSource(Source): @@ -81,6 +82,7 @@ def abstract_source(mocker): class MockHttpStream(MagicMock, HttpStream): url_base = "http://example.com" path = "/dummy/path" + get_json_schema = MagicMock() def supports_incremental(self): return True @@ -92,6 +94,7 @@ def __init__(self, *args, **kvargs): class MockStream(MagicMock, Stream): page_size = None + get_json_schema = MagicMock() def __init__(self, *args, **kvargs): MagicMock.__init__(self) @@ -145,8 +148,7 @@ def test_read_catalog(source): def test_internal_config(abstract_source, catalog): streams = abstract_source.streams(None) assert len(streams) == 2 - http_stream = streams[0] - non_http_stream = streams[1] + http_stream, non_http_stream = streams assert isinstance(http_stream, HttpStream) assert not isinstance(non_http_stream, HttpStream) http_stream.read_records.return_value = [{}] * 3 @@ -216,3 +218,44 @@ def test_internal_config_limit(abstract_source, catalog): logger_info_args = [call[0][0] for call in logger_mock.info.call_args_list] read_log_record = [_l for _l in logger_info_args if _l.startswith("Read")] assert read_log_record[0].startswith(f"Read {STREAM_LIMIT} ") + + +SCHEMA = {"type": "object", "properties": {"value": {"type": "string"}}} + + +def test_source_config_no_transform(abstract_source, catalog): + logger_mock = MagicMock() + streams = abstract_source.streams(None) + http_stream, non_http_stream = streams + http_stream.get_json_schema.return_value = non_http_stream.get_json_schema.return_value = SCHEMA + http_stream.read_records.return_value, non_http_stream.read_records.return_value = [[{"value": 23}] * 5] * 2 + records = [r for r in abstract_source.read(logger=logger_mock, config={}, catalog=catalog, state={})] + assert len(records) == 2 * 5 + assert [r.record.data for r in records] == [{"value": 23}] * 2 * 5 + assert http_stream.get_json_schema.call_count == 1 + assert non_http_stream.get_json_schema.call_count == 1 + + +def test_source_config_transform(abstract_source, catalog): + logger_mock = MagicMock() + streams = abstract_source.streams(None) + http_stream, non_http_stream = streams + http_stream.transformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization) + non_http_stream.transformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization) + http_stream.get_json_schema.return_value = non_http_stream.get_json_schema.return_value = SCHEMA + http_stream.read_records.return_value, non_http_stream.read_records.return_value = [{"value": 23}], [{"value": 23}] + records = [r for r in abstract_source.read(logger=logger_mock, config={}, catalog=catalog, state={})] + assert len(records) == 2 + assert [r.record.data for r in records] == [{"value": "23"}] * 2 + + +def test_source_config_transform_and_no_transform(abstract_source, catalog): + logger_mock = MagicMock() + streams = abstract_source.streams(None) + http_stream, non_http_stream = streams + http_stream.transformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization) + http_stream.get_json_schema.return_value = non_http_stream.get_json_schema.return_value = SCHEMA + http_stream.read_records.return_value, non_http_stream.read_records.return_value = [{"value": 23}], [{"value": 23}] + records = [r for r in abstract_source.read(logger=logger_mock, config={}, catalog=catalog, state={})] + assert len(records) == 2 + assert [r.record.data for r in records] == [{"value": "23"}, {"value": 23}] diff --git a/airbyte-cdk/python/unit_tests/sources/utils/test_transform.py b/airbyte-cdk/python/unit_tests/sources/utils/test_transform.py new file mode 100644 index 000000000000..d2d7b70668b8 --- /dev/null +++ b/airbyte-cdk/python/unit_tests/sources/utils/test_transform.py @@ -0,0 +1,239 @@ +# +# MIT License +# +# Copyright (c) 2020 Airbyte +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +import json + +import pytest +from airbyte_cdk.sources.utils.transform import TransformConfig, TypeTransformer + +SIMPLE_SCHEMA = {"type": "object", "properties": {"value": {"type": "string"}}} +COMPLEX_SCHEMA = { + "type": "object", + "properties": { + "value": {"type": "boolean", "format": "even", "is_positive": True}, + "prop": {"type": "string"}, + "prop_with_null": {"type": ["string", "null"]}, + "number_prop": {"type": "number"}, + "int_prop": {"type": ["integer", "null"]}, + "too_many_types": {"type": ["boolean", "null", "string"]}, + "def": { + "type": "object", + "properties": {"dd": {"$ref": "#/definitions/my_type"}}, + }, + "array": {"type": "array", "items": {"$ref": "#/definitions/str_type"}}, + "nested": {"$ref": "#/definitions/nested_type"}, + "list_of_lists": { + "type": "array", + "items": {"type": "array", "items": {"type": "string"}}, + }, + }, + "definitions": { + "str_type": {"type": "string"}, + "nested_type": {"type": "object", "properties": {"a": {"type": "string"}}}, + }, +} +VERY_NESTED_SCHEMA = { + "type": ["null", "object"], + "properties": { + "very_nested_value": { + "type": ["null", "object"], + "properties": { + "very_nested_value": { + "type": ["null", "object"], + "properties": { + "very_nested_value": { + "type": ["null", "object"], + "properties": { + "very_nested_value": { + "type": ["null", "object"], + "properties": {"very_nested_value": {"type": ["null", "number"]}}, + } + }, + } + }, + } + }, + } + }, +} + + +@pytest.mark.parametrize( + "schema, actual, expected", + [ + ( + SIMPLE_SCHEMA, + {"value": 12}, + {"value": "12"}, + ), + ( + SIMPLE_SCHEMA, + {"value": 12}, + {"value": "12"}, + ), + ( + COMPLEX_SCHEMA, + {"value": 1, "array": ["111", 111, {1: 111}]}, + {"value": True, "array": ["111", "111", "{1: 111}"]}, + ), + ( + COMPLEX_SCHEMA, + {"value": 1, "list_of_lists": [["111"], [111], [11], [{1: 1}]]}, + {"value": True, "list_of_lists": [["111"], ["111"], ["11"], ["{1: 1}"]]}, + ), + ( + COMPLEX_SCHEMA, + {"value": 1, "nested": {"a": [1, 2, 3]}}, + {"value": True, "nested": {"a": "[1, 2, 3]"}}, + ), + ( + COMPLEX_SCHEMA, + {"value": "false", "nested": {"a": [1, 2, 3]}}, + {"value": False, "nested": {"a": "[1, 2, 3]"}}, + ), + (COMPLEX_SCHEMA, {}, {}), + (COMPLEX_SCHEMA, {"int_prop": "12"}, {"int_prop": 12}), + # Skip invalid formattted field and process other fields. + ( + COMPLEX_SCHEMA, + {"prop": 12, "number_prop": "aa12", "array": [12]}, + {"prop": "12", "number_prop": "aa12", "array": ["12"]}, + ), + # Field too_many_types have ambigious type, skip formatting + ( + COMPLEX_SCHEMA, + {"prop": 12, "too_many_types": 1212, "array": [12]}, + {"prop": "12", "too_many_types": 1212, "array": ["12"]}, + ), + # Test null field + ( + COMPLEX_SCHEMA, + {"prop": None, "array": [12]}, + {"prop": "None", "array": ["12"]}, + ), + # If field can be null do not convert + ( + COMPLEX_SCHEMA, + {"prop_with_null": None, "array": [12]}, + {"prop_with_null": None, "array": ["12"]}, + ), + ( + VERY_NESTED_SCHEMA, + {"very_nested_value": {"very_nested_value": {"very_nested_value": {"very_nested_value": {"very_nested_value": "2"}}}}}, + {"very_nested_value": {"very_nested_value": {"very_nested_value": {"very_nested_value": {"very_nested_value": 2.0}}}}}, + ), + ( + VERY_NESTED_SCHEMA, + {"very_nested_value": {"very_nested_value": None}}, + {"very_nested_value": {"very_nested_value": None}}, + ), + # Object without properties + ( + {"type": "object"}, + {"value": 12}, + {"value": 12}, + ), + ( + # Array without items + {"type": "object", "properties": {"value": {"type": "array"}}}, + {"value": [12]}, + {"value": [12]}, + ), + ( + # Array without items and value is not an array + {"type": "object", "properties": {"value": {"type": "array"}}}, + {"value": "12"}, + {"value": "12"}, + ), + ( + # Schema root object is not an object, no convertion should happen + {"type": "integer"}, + {"value": "12"}, + {"value": "12"}, + ), + ( + # More than one type except null, no conversion should happen + {"type": "object", "properties": {"value": {"type": ["string", "boolean", "null"]}}}, + {"value": 12}, + {"value": 12}, + ), + ( + # Oneof not suported, no conversion for one_of_value should happen + {"type": "object", "properties": {"one_of_value": {"oneOf": ["string", "boolean", "null"]}, "value_2": {"type": "string"}}}, + {"one_of_value": 12, "value_2": 12}, + {"one_of_value": 12, "value_2": "12"}, + ), + ], +) +def test_transform(schema, actual, expected): + t = TypeTransformer(TransformConfig.DefaultSchemaNormalization) + t.transform(actual, schema) + assert json.dumps(actual) == json.dumps(expected) + + +def test_transform_wrong_config(): + with pytest.raises(Exception, match="NoTransform option cannot be combined with other flags."): + TypeTransformer(TransformConfig.NoTransform | TransformConfig.DefaultSchemaNormalization) + + with pytest.raises(Exception, match="Please set TransformConfig.CustomSchemaNormalization config before registering custom normalizer"): + + class NotAStream: + transformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization) + + @transformer.registerCustomTransform + def transform_cb(instance, schema): + pass + + +def test_custom_transform(): + class NotAStream: + transformer = TypeTransformer(TransformConfig.CustomSchemaNormalization) + + @transformer.registerCustomTransform + def transform_cb(instance, schema): + # Check no default conversion applied + assert instance == 12 + assert schema == SIMPLE_SCHEMA["properties"]["value"] + return "transformed" + + s = NotAStream() + obj = {"value": 12} + s.transformer.transform(obj, SIMPLE_SCHEMA) + assert obj == {"value": "transformed"} + + +def test_custom_transform_with_default_normalization(): + class NotAStream: + transformer = TypeTransformer(TransformConfig.CustomSchemaNormalization | TransformConfig.DefaultSchemaNormalization) + + @transformer.registerCustomTransform + def transform_cb(instance, schema): + # Check default conversion applied + assert instance == "12" + assert schema == SIMPLE_SCHEMA["properties"]["value"] + return "transformed" + + s = NotAStream() + obj = {"value": 12} + s.transformer.transform(obj, SIMPLE_SCHEMA) + assert obj == {"value": "transformed"} diff --git a/docs/connector-development/cdk-python/schemas.md b/docs/connector-development/cdk-python/schemas.md index 204b303423df..71c5e2e68b0e 100644 --- a/docs/connector-development/cdk-python/schemas.md +++ b/docs/connector-development/cdk-python/schemas.md @@ -25,3 +25,86 @@ def get_json_schema(self): return schema ``` +## Type transformation + +It is important to ensure output data conforms to the declared json schema. This is because the destination receiving this data to load into tables may strictly enforce schema (e.g. when data is stored in a SQL database, you can't put CHAT type into INTEGER column). In the case of changes to API output (which is almost guaranteed to happen over time) or a minor mistake in jsonschema definition, data syncs could thus break because of mismatched datatype schemas. + +To remain robust in operation, the CDK provides a transformation ability to perform automatic object mutation to align with desired schema before outputting to the destination. All streams inherited from airbyte_cdk.sources.streams.core.Stream class have this transform configuration available. It is _disabled_ by default and can be configured per stream within a source connector. + +### Default type transformation +Here's how you can configure the TypeTransformer: + +```python +from airbyte_cdk.sources.utils.transform import TransformConfig, Transformer +from airbyte_cdk.sources.streams.core import Stream + +class MyStream(Stream): + ... + transformer = Transformer(TransformConfig.DefaultSchemaNormalization) + ... +``` +In this case default transformation will be applied. For example if you have schema like this +```json +{"type": "object", "properties": {"value": {"type": "string"}}} +``` +and source API returned object with non-string type, it would be casted to string automaticaly: +```json +{"value": 12} -> {"value": "12"} +``` +Also it works on complex types: +```json +{"value": {"unexpected_object": "value"}} -> {"value": "{'unexpected_object': 'value'}"} +``` +And objects inside array of referenced by $ref attribute. + + If the value cannot be cast (e.g. string "asdf" cannot be casted to integer), the field would retain its original value. Schema type transformation support any jsonschema types, nested objects/arrays and reference types. Types described as array of more than one type (except "null"), types under oneOf/anyOf keyword wont be transformed. + +*Note:* This transformation is done by the source, not the stream itself. I.e. if you have overriden "read_records" method in your stream it wont affect object transformation. All transformation are done in-place by modifing output object before passing it to "get_updated_state" method, so "get_updated_state" would receive the transformed object. + +### Custom schema type transformation +Default schema type transformation performs simple type casting. Sometimes you want to perform more sophisticated transform like making "date-time" field compliant to rcf3339 standard. In this case you can use custom schema type transformation: +```python +class MyStream(Stream): + ... + transformer = Transformer(TransformConfig.CustomSchemaNormalization) + ... + + @transformer.registerCustomTransform + def transform_function(orginal_value: Any, field_schema: Dict[str, Any]) -> Any: + # transformed_value = ... + return transformed_value +``` +Where original_value is initial field value and field_schema is part of jsonschema describing field type. For schema +```json +{"type": "object", "properties": {"value": {"type": "string", "format": "date-time"}}} +``` +field_schema variable would be equal to +```json +{"type": "string", "format": "date-time"} +``` +In this case default transformation would be skipped and only custom transformation apply. If you want to run both default and custom transformation you can configure transdormer object by combining config flags: +```python +transformer = Transformer(TransformConfig.DefaultSchemaNormalization | TransformConfig.CustomSchemaNormalization) +``` +In this case custom transformation will be applied after default type transformation function. Note that order of flags doesnt matter, default transformation will always be run before custom. + +### Performance consideration + +Transofrming each object on the fly would add some time for each object processing. This time is depends on object/schema complexitiy and hardware configuration. + +There is some performance benchmark we've done with ads_insights facebook schema (it is complex schema with objects nested inside arrays ob object and a lot of references) and example object. +Here is average transform time per single object, seconds: +``` +regular transform: +0.0008423403530008121 + +transform without type casting (but value still being write to dict/array): +0.000776215762666349 + +transform without actual value setting (but iterating through object properties): +0.0006788729513330812 + +just traverse/validate through json schema and object fields: +0.0006139181846665452 +``` +On my PC (AMD Ryzen 7 5800X) it took 0.8 milliseconds per one object. As you can see most time (~ 75%) is taken by jsonschema traverse/validation routine and very little (less than 10 %) by actual converting. Processing time can be reduced by skipping jsonschema type checking but it would be no warnings about possible object jsonschema inconsistency.