From a9c8bc0e0a480c5ba86d5914a3e1cdcf98f3de43 Mon Sep 17 00:00:00 2001 From: Eve Johns <105383716+eve-johns@users.noreply.github.com> Date: Mon, 17 Oct 2022 13:58:04 -0700 Subject: [PATCH 001/156] f-string cleanup #6068 (#6082) * fix f string issue * removed one space * Add changelog * fixed return format Co-authored-by: Leah Antkiewicz --- .changes/unreleased/Under the Hood-20221017-155844.yaml | 7 +++++++ core/dbt/config/project.py | 2 +- core/dbt/context/providers.py | 6 +++--- core/dbt/contracts/graph/manifest.py | 2 +- core/dbt/exceptions.py | 4 +--- core/dbt/graph/selector.py | 2 +- core/dbt/parser/base.py | 2 +- core/dbt/parser/generic_test_builders.py | 2 +- core/dbt/parser/sources.py | 2 +- core/dbt/task/runnable.py | 2 +- 10 files changed, 18 insertions(+), 13 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221017-155844.yaml diff --git a/.changes/unreleased/Under the Hood-20221017-155844.yaml b/.changes/unreleased/Under the Hood-20221017-155844.yaml new file mode 100644 index 00000000000..84e6675351c --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221017-155844.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Clean up string formatting +time: 2022-10-17T15:58:44.676549-04:00 +custom: + Author: eve-johns + Issue: "6068" + PR: "6082" diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index d2aaee699a3..6e9770026c4 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -668,7 +668,7 @@ def hashed_name(self): def get_selector(self, name: str) -> Union[SelectionSpec, bool]: if name not in self.selectors: raise RuntimeException( - f"Could not find selector named {name}, expected one of " f"{list(self.selectors)}" + f"Could not find selector named {name}, expected one of {list(self.selectors)}" ) return self.selectors[name]["definition"] diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index c053d28d1df..597b526e384 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -182,7 +182,7 @@ def dispatch( return macro searched = ", ".join(repr(a) for a in attempts) - msg = f"In dispatch: No macro named '{macro_name}' found\n" f" Searched for: {searched}" + msg = f"In dispatch: No macro named '{macro_name}' found\n Searched for: {searched}" raise CompilationException(msg) @@ -220,12 +220,12 @@ def _repack_args(self, name: str, package: Optional[str]) -> List[str]: def validate_args(self, name: str, package: Optional[str]): if not isinstance(name, str): raise CompilationException( - f"The name argument to ref() must be a string, got " f"{type(name)}" + f"The name argument to ref() must be a string, got {type(name)}" ) if package is not None and not isinstance(package, str): raise CompilationException( - f"The package argument to ref() must be a string or None, got " f"{type(package)}" + f"The package argument to ref() must be a string or None, got {type(package)}" ) def __call__(self, *args: str) -> RelationProxy: diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 7e4c42fce76..a2d22e6e315 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -499,7 +499,7 @@ def _update_into(dest: MutableMapping[str, T], new_item: T): existing = dest[unique_id] if new_item.original_file_path != existing.original_file_path: raise dbt.exceptions.RuntimeException( - f"cannot update a {new_item.resource_type} to have a new file " f"path!" + f"cannot update a {new_item.resource_type} to have a new file path!" ) dest[unique_id] = new_item diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index b9539ea19bd..db824e19bf1 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -976,9 +976,7 @@ def raise_patch_targets_not_found(patches): def _fix_dupe_msg(path_1: str, path_2: str, name: str, type_name: str) -> str: if path_1 == path_2: - return ( - f"remove one of the {type_name} entries for {name} in this file:\n" f" - {path_1!s}\n" - ) + return f"remove one of the {type_name} entries for {name} in this file:\n - {path_1!s}\n" else: return ( f"remove the {type_name} entry for {name} in one of these files:\n" diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 3cb5f415be9..49b73fc71c4 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -26,7 +26,7 @@ def get_package_names(nodes): def alert_non_existence(raw_spec, nodes): if len(nodes) == 0: - warn_or_error(f"The selection criterion '{str(raw_spec)}' does not match" f" any nodes") + warn_or_error(f"The selection criterion '{str(raw_spec)}' does not match any nodes") def can_select_indirectly(node): diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 2786a7c5744..4b9e666a421 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -347,7 +347,7 @@ def initial_config(self, fqn: List[str]) -> ContextConfig: ) else: raise InternalException( - f"Got an unexpected project version={config_version}, " f"expected 2" + f"Got an unexpected project version={config_version}, expected 2" ) def config_dict( diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index a0617f1689f..3dfb541cb8f 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -435,7 +435,7 @@ def tags(self) -> List[str]: tags = [tags] if not isinstance(tags, list): raise_compiler_error( - f"got {tags} ({type(tags)}) for tags, expected a list of " f"strings" + f"got {tags} ({type(tags)}) for tags, expected a list of strings" ) for tag in tags: if not isinstance(tag, str): diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 4757edab31e..57320c87f2e 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -150,7 +150,7 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinit if not isinstance(config, SourceConfig): raise InternalException( - f"Calculated a {type(config)} for a source, but expected " f"a SourceConfig" + f"Calculated a {type(config)} for a source, but expected a SourceConfig" ) default_database = self.root_project.credentials.database diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 39289b3cacb..64413df82c1 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -174,7 +174,7 @@ def _runtime_initialize(self): self._flattened_nodes.append(self.manifest.sources[uid]) else: raise InternalException( - f"Node selection returned {uid}, expected a node or a " f"source" + f"Node selection returned {uid}, expected a node or a source" ) self.num_nodes = len([n for n in self._flattened_nodes if not n.is_ephemeral_model]) From 35f7975d8f9e76c0ac324da1222c2fdb832054e2 Mon Sep 17 00:00:00 2001 From: Luke Bassett Date: Mon, 17 Oct 2022 15:58:31 -0500 Subject: [PATCH 002/156] Updated string formatting on non-f-strings. (#6086) * Updated string formatting on non-f-strings. Found all cases of strings separated by white space on a single line and removed white space separation. EX: "hello " "world" -> "hello world". * add changelog entry --- .changes/unreleased/Under the Hood-20221017-151511.yaml | 7 +++++++ core/dbt/adapters/base/impl.py | 2 +- core/dbt/clients/_jinja_blocks.py | 6 +++--- core/dbt/config/project.py | 2 +- core/dbt/context/base.py | 2 +- core/dbt/contracts/connection.py | 2 +- core/dbt/contracts/results.py | 2 +- core/dbt/graph/graph.py | 2 +- core/dbt/logger.py | 4 +--- core/dbt/parser/schemas.py | 2 +- core/dbt/parser/sources.py | 2 +- core/dbt/task/base.py | 2 +- core/dbt/task/compile.py | 4 ++-- core/dbt/task/freshness.py | 2 +- core/dbt/task/runnable.py | 2 +- core/dbt/utils.py | 4 ++-- tests/functional/artifacts/expected_manifest.py | 2 +- 17 files changed, 27 insertions(+), 22 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221017-151511.yaml diff --git a/.changes/unreleased/Under the Hood-20221017-151511.yaml b/.changes/unreleased/Under the Hood-20221017-151511.yaml new file mode 100644 index 00000000000..cbdcf04beb3 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221017-151511.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Fixed extra whitespace in strings introduced by black. +time: 2022-10-17T15:15:11.499246-05:00 +custom: + Author: luke-bassett + Issue: "1350" + PR: "6086" diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 3f8a1e6f78f..3c301c2e7f4 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -581,7 +581,7 @@ def list_relations_without_caching(self, schema_relation: BaseRelation) -> List[ :rtype: List[self.Relation] """ raise NotImplementedException( - "`list_relations_without_caching` is not implemented for this " "adapter!" + "`list_relations_without_caching` is not implemented for this adapter!" ) ### diff --git a/core/dbt/clients/_jinja_blocks.py b/core/dbt/clients/_jinja_blocks.py index 761c6dfcb4d..c1ef31acf44 100644 --- a/core/dbt/clients/_jinja_blocks.py +++ b/core/dbt/clients/_jinja_blocks.py @@ -367,9 +367,9 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): if self.current: linecount = self.data[: self.current.end].count("\n") + 1 dbt.exceptions.raise_compiler_error( - ( - "Reached EOF without finding a close tag for " "{} (searched from line {})" - ).format(self.current.block_type_name, linecount) + ("Reached EOF without finding a close tag for {} (searched from line {})").format( + self.current.block_type_name, linecount + ) ) if collect_raw_data: diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index 6e9770026c4..9521dd29882 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -248,7 +248,7 @@ class PartialProject(RenderComponents): project_name: Optional[str] = field( metadata=dict( description=( - "The name of the project. This should always be set and will not " "be rendered" + "The name of the project. This should always be set and will not be rendered" ) ) ) diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index bf334a7d11f..68b5edb98c1 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -126,7 +126,7 @@ def __new__(mcls, name, bases, dct): class Var: - UndefinedVarError = "Required var '{}' not found in config:\nVars " "supplied to {} = {}" + UndefinedVarError = "Required var '{}' not found in config:\nVars supplied to {} = {}" _VAR_NOTSET = object() def __init__( diff --git a/core/dbt/contracts/connection.py b/core/dbt/contracts/connection.py index 831230d661d..a32bb443099 100644 --- a/core/dbt/contracts/connection.py +++ b/core/dbt/contracts/connection.py @@ -94,7 +94,7 @@ def handle(self): self._handle.resolve(self) except RecursionError as exc: raise InternalException( - "A connection's open() method attempted to read the " "handle value" + "A connection's open() method attempted to read the handle value" ) from exc return self._handle diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index cb0a6e2a67e..a3b7ce2b506 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -339,7 +339,7 @@ def process_freshness_result(result: FreshnessNodeResult) -> FreshnessNodeOutput criteria = result.node.freshness if criteria is None: raise InternalException( - "Somehow evaluated a freshness result for a source " "that has no freshness criteria!" + "Somehow evaluated a freshness result for a source that has no freshness criteria!" ) return SourceFreshnessOutput( unique_id=unique_id, diff --git a/core/dbt/graph/graph.py b/core/dbt/graph/graph.py index acfc43c2142..2dda596e073 100644 --- a/core/dbt/graph/graph.py +++ b/core/dbt/graph/graph.py @@ -90,7 +90,7 @@ def get_subset_graph(self, selected: Iterable[UniqueId]) -> "Graph": for node in include_nodes: if node not in new_graph: raise ValueError( - "Couldn't find model '{}' -- does it exist or is " "it disabled?".format(node) + "Couldn't find model '{}' -- does it exist or is it disabled?".format(node) ) return Graph(new_graph) diff --git a/core/dbt/logger.py b/core/dbt/logger.py index 3787b9a769b..4bbcfca4c06 100644 --- a/core/dbt/logger.py +++ b/core/dbt/logger.py @@ -28,9 +28,7 @@ colorama.init(wrap=True) STDOUT_LOG_FORMAT = "{record.message}" -DEBUG_LOG_FORMAT = ( - "{record.time:%Y-%m-%d %H:%M:%S.%f%z} " "({record.thread_name}): " "{record.message}" -) +DEBUG_LOG_FORMAT = "{record.time:%Y-%m-%d %H:%M:%S.%f%z} ({record.thread_name}): {record.message}" def get_secret_env() -> List[str]: diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index b73722952ce..8b22427cb39 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -298,7 +298,7 @@ def _parse_generic_test( except ParsingException as exc: context = _trimmed(str(target)) - msg = "Invalid test config given in {}:" "\n\t{}\n\t@: {}".format( + msg = "Invalid test config given in {}:\n\t{}\n\t@: {}".format( target.original_file_path, exc.msg, context ) raise ParsingException(msg) from exc diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 57320c87f2e..1c55281db56 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -317,7 +317,7 @@ def get_unused_msg( unused_tables: Dict[SourceKey, Optional[Set[str]]], ) -> str: msg = [ - "During parsing, dbt encountered source overrides that had no " "target:", + "During parsing, dbt encountered source overrides that had no target:", ] for key, table_names in unused_tables.items(): patch = self.manifest.source_patches[key] diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index b20dd76b10d..1b067d79af8 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -461,7 +461,7 @@ def on_skip(self): print_run_result_error(result=self.skip_cause, newline=False) if self.skip_cause is None: # mypy appeasement raise InternalException( - "Skip cause not set but skip was somehow caused by " "an ephemeral failure" + "Skip cause not set but skip was somehow caused by an ephemeral failure" ) # set an error so dbt will exit with an error code error_message = ( diff --git a/core/dbt/task/compile.py b/core/dbt/task/compile.py index b091ae76099..740d35d37e9 100644 --- a/core/dbt/task/compile.py +++ b/core/dbt/task/compile.py @@ -64,7 +64,7 @@ def _get_deferred_manifest(self) -> Optional[WritableManifest]: state = self.previous_state if state is None: raise RuntimeException( - "Received a --defer argument, but no value was provided " "to --state" + "Received a --defer argument, but no value was provided to --state" ) if state.manifest is None: @@ -77,7 +77,7 @@ def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): return if self.manifest is None: raise InternalException( - "Expected to defer to manifest, but there is no runtime " "manifest to defer from!" + "Expected to defer to manifest, but there is no runtime manifest to defer from!" ) self.manifest.merge_from_artifact( adapter=adapter, diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index fa16bc5dd80..ab256334271 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -135,7 +135,7 @@ def execute(self, compiled_node, manifest): # broken, raise! if compiled_node.loaded_at_field is None: raise InternalException( - "Got to execute for source freshness of a source that has no " "loaded_at_field!" + "Got to execute for source freshness of a source that has no loaded_at_field!" ) relation = self.adapter.Relation.create_from_source(compiled_node) diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 64413df82c1..af0de610c98 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -459,7 +459,7 @@ def run(self): if len(self._flattened_nodes) == 0: with TextOnly(): fire_event(EmptyLine()) - msg = "Nothing to do. Try checking your model " "configs and model specification args" + msg = "Nothing to do. Try checking your model configs and model specification args" warn_or_error(msg, log_fmt=warning_tag("{}")) result = self.get_result( results=[], diff --git a/core/dbt/utils.py b/core/dbt/utils.py index ccae3601446..b7cc6475319 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -491,11 +491,11 @@ def submit(*args, **kwargs): self, fn, *args = args elif not args: raise TypeError( - "descriptor 'submit' of 'SingleThreadedExecutor' object needs " "an argument" + "descriptor 'submit' of 'SingleThreadedExecutor' object needs an argument" ) else: raise TypeError( - "submit expected at least 1 positional argument, " "got %d" % (len(args) - 1) + "submit expected at least 1 positional argument, got %d" % (len(args) - 1) ) fut = concurrent.futures.Future() try: diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 23e396400e9..32c9dcfbfa1 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -1275,7 +1275,7 @@ def expected_references_manifest(project): }, "test.view_summary": { "block_contents": ( - "A view of the summary of the ephemeral copy of the " "seed data" + "A view of the summary of the ephemeral copy of the seed data" ), "name": "view_summary", "original_file_path": docs_path, From ff2f1f42c394d27ff8d49ca7e285fcf2b76faa05 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Tue, 18 Oct 2022 12:20:30 -0700 Subject: [PATCH 003/156] Working solution serialization bug. (#5874) * Create functors to initialize event types with str-type member attributes. Before this change, the spec of various classes expected base_msg and msg params to be str's. This assumption did not always hold true. post_init hooks ensures the spec is obeyed. * Add new changelog. * Add msg type change functor to a few other events that could use it. Co-authored-by: Mila Page --- .../unreleased/Fixes-20221016-173742.yaml | 8 +++++ core/dbt/events/adapter_endpoint.py | 1 + core/dbt/events/base_types.py | 17 +++++++++++ core/dbt/events/types.py | 30 +++++++++++-------- 4 files changed, 43 insertions(+), 13 deletions(-) create mode 100644 .changes/unreleased/Fixes-20221016-173742.yaml diff --git a/.changes/unreleased/Fixes-20221016-173742.yaml b/.changes/unreleased/Fixes-20221016-173742.yaml new file mode 100644 index 00000000000..11d4a8c85f4 --- /dev/null +++ b/.changes/unreleased/Fixes-20221016-173742.yaml @@ -0,0 +1,8 @@ +kind: Fixes +body: Add functors to ensure event types with str-type attributes are initialized + to spec, even when provided non-str type params. +time: 2022-10-16T17:37:42.846683-07:00 +custom: + Author: versusfacit + Issue: "5436" + PR: "5874" diff --git a/core/dbt/events/adapter_endpoint.py b/core/dbt/events/adapter_endpoint.py index aff157ab611..68a73d8aecb 100644 --- a/core/dbt/events/adapter_endpoint.py +++ b/core/dbt/events/adapter_endpoint.py @@ -9,6 +9,7 @@ ) +# N.B. No guarantees for what type param msg is. @dataclass class AdapterLogger: name: str diff --git a/core/dbt/events/base_types.py b/core/dbt/events/base_types.py index 489b70cb1ad..cd3275c02a9 100644 --- a/core/dbt/events/base_types.py +++ b/core/dbt/events/base_types.py @@ -99,6 +99,23 @@ def level_tag(self) -> str: return "error" +# Included to ensure classes with str-type message members are initialized correctly. +@dataclass # type: ignore[misc] +class AdapterEventStringFunctor: + def __post_init__(self): + super().__post_init__() + if not isinstance(self.base_msg, str): + self.base_msg = str(self.base_msg) + + +@dataclass # type: ignore[misc] +class EventStringFunctor: + def __post_init__(self): + super().__post_init__() + if not isinstance(self.msg, str): + self.msg = str(self.msg) + + # prevents an event from going to the file # This should rarely be used in core code. It is currently # only used in integration tests and for the 'clean' command. diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 60204138c36..f6e66f941d2 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -7,6 +7,8 @@ WarnLevel, ErrorLevel, Cache, + AdapterEventStringFunctor, + EventStringFunctor, ) from dbt.events.format import format_fancy_output_line, pluralize @@ -309,7 +311,7 @@ def message(self) -> str: @dataclass -class AdapterEventDebug(DebugLevel, pt.AdapterEventDebug): # noqa +class AdapterEventDebug(DebugLevel, AdapterEventStringFunctor, pt.AdapterEventDebug): # noqa def code(self): return "E001" @@ -318,7 +320,7 @@ def message(self): @dataclass -class AdapterEventInfo(InfoLevel, pt.AdapterEventInfo): # noqa +class AdapterEventInfo(InfoLevel, AdapterEventStringFunctor, pt.AdapterEventInfo): # noqa def code(self): return "E002" @@ -327,7 +329,7 @@ def message(self): @dataclass -class AdapterEventWarning(WarnLevel, pt.AdapterEventWarning): # noqa +class AdapterEventWarning(WarnLevel, AdapterEventStringFunctor, pt.AdapterEventWarning): # noqa def code(self): return "E003" @@ -336,7 +338,7 @@ def message(self): @dataclass -class AdapterEventError(ErrorLevel, pt.AdapterEventError): # noqa +class AdapterEventError(ErrorLevel, AdapterEventStringFunctor, pt.AdapterEventError): # noqa def code(self): return "E004" @@ -1218,7 +1220,9 @@ def message(self) -> str: # TODO: switch to storing structured info and calling get_target_failure_msg @dataclass -class InvalidDisabledSourceInTestNode(WarnLevel, pt.InvalidDisabledSourceInTestNode): +class InvalidDisabledSourceInTestNode( + WarnLevel, EventStringFunctor, pt.InvalidDisabledSourceInTestNode +): def code(self): return "I050" @@ -1227,7 +1231,7 @@ def message(self) -> str: @dataclass -class InvalidRefInTestNode(DebugLevel, pt.InvalidRefInTestNode): +class InvalidRefInTestNode(DebugLevel, EventStringFunctor, pt.InvalidRefInTestNode): def code(self): return "I051" @@ -1334,7 +1338,7 @@ def message(self) -> str: @dataclass -class MacroEventInfo(InfoLevel, pt.MacroEventInfo): +class MacroEventInfo(InfoLevel, EventStringFunctor, pt.MacroEventInfo): def code(self): return "M011" @@ -1343,7 +1347,7 @@ def message(self) -> str: @dataclass -class MacroEventDebug(DebugLevel, pt.MacroEventDebug): +class MacroEventDebug(DebugLevel, EventStringFunctor, pt.MacroEventDebug): def code(self): return "M012" @@ -2261,7 +2265,7 @@ def message(self) -> str: @dataclass -class RunResultError(ErrorLevel, pt.RunResultError): +class RunResultError(ErrorLevel, EventStringFunctor, pt.RunResultError): def code(self): return "Z024" @@ -2299,7 +2303,7 @@ def message(self) -> str: @dataclass -class FirstRunResultError(ErrorLevel, pt.FirstRunResultError): +class FirstRunResultError(ErrorLevel, EventStringFunctor, pt.FirstRunResultError): def code(self): return "Z028" @@ -2308,7 +2312,7 @@ def message(self) -> str: @dataclass -class AfterFirstRunResultError(ErrorLevel, pt.AfterFirstRunResultError): +class AfterFirstRunResultError(ErrorLevel, EventStringFunctor, pt.AfterFirstRunResultError): def code(self): return "Z029" @@ -2446,7 +2450,7 @@ def message(self) -> str: @dataclass -class GeneralWarningMsg(WarnLevel, pt.GeneralWarningMsg): +class GeneralWarningMsg(WarnLevel, EventStringFunctor, pt.GeneralWarningMsg): def code(self): return "Z046" @@ -2476,7 +2480,7 @@ def message(self) -> str: @dataclass -class RunResultWarningMessage(WarnLevel, pt.RunResultWarningMessage): +class RunResultWarningMessage(WarnLevel, EventStringFunctor, pt.RunResultWarningMessage): def code(self): return "Z049" From a7670a3ab956cfb53b7828de42b69f946296cea3 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Wed, 19 Oct 2022 22:52:32 -0700 Subject: [PATCH 004/156] Add unit tests for recent stringifier functors added to events library. (#6095) Co-authored-by: Mila Page --- tests/unit/test_events.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 4f403d04473..c2064b84c1a 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -75,6 +75,15 @@ def test_formatting(self): event = AdapterEventDebug(name="dbt_tests", base_msg="boop{x}boop", args=()) assert "boop{x}boop" in event.message() + # ensure AdapterLogger and subclasses makes all base_msg members + # of type string; when someone writes logger.debug(a) where a is + # any non-string object + event = AdapterEventDebug(name="dbt_tests", base_msg=[1,2,3], args=(3,)) + assert isinstance(event.base_msg, str) + + event = MacroEventDebug(msg=[1,2,3]) + assert isinstance(event.msg, str) + class TestEventCodes: From 53ae3255760255c4efbef5142cd86008619c86d2 Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Thu, 20 Oct 2022 12:43:30 -0400 Subject: [PATCH 005/156] CT-1099: Migrate test 071_commented_yaml_regression_3568_tests (#6106) --- .../models/schema.yml | 3 --- .../test_all_comment_yml_files.py | 20 ------------------- tests/functional/schema_tests/fixtures.py | 4 ++++ .../schema_tests/test_schema_v2_tests.py | 19 ++++++++++++++++++ 4 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 test/integration/071_commented_yaml_regression_3568_tests/models/schema.yml delete mode 100644 test/integration/071_commented_yaml_regression_3568_tests/test_all_comment_yml_files.py diff --git a/test/integration/071_commented_yaml_regression_3568_tests/models/schema.yml b/test/integration/071_commented_yaml_regression_3568_tests/models/schema.yml deleted file mode 100644 index 35ab0fade65..00000000000 --- a/test/integration/071_commented_yaml_regression_3568_tests/models/schema.yml +++ /dev/null @@ -1,3 +0,0 @@ -# models/schema.yml -# only comments here -# https://github.com/dbt-labs/dbt-core/issues/3568 \ No newline at end of file diff --git a/test/integration/071_commented_yaml_regression_3568_tests/test_all_comment_yml_files.py b/test/integration/071_commented_yaml_regression_3568_tests/test_all_comment_yml_files.py deleted file mode 100644 index c9f6b2d99f1..00000000000 --- a/test/integration/071_commented_yaml_regression_3568_tests/test_all_comment_yml_files.py +++ /dev/null @@ -1,20 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestAllCommentYMLIsOk(DBTIntegrationTest): - @property - def schema(self): - return "071_commented_yaml" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_parses_with_all_comment_yml(self): - try: - self.run_dbt(['parse']) - except TypeError: - assert False, '`dbt parse` failed with a yaml file that is all comments with the same exception as 3568' - except: - assert False, '`dbt parse` failed with a yaml file that is all comments' diff --git a/tests/functional/schema_tests/fixtures.py b/tests/functional/schema_tests/fixtures.py index 40fe9f5a086..7e0dfbaca58 100644 --- a/tests/functional/schema_tests/fixtures.py +++ b/tests/functional/schema_tests/fixtures.py @@ -536,6 +536,10 @@ """ +all_quotes_schema__schema_yml = """# models/schema.yml +# only comments here, which should be okay! +# https://github.com/dbt-labs/dbt-core/issues/3568""" + models_v2__render_test_cli_arg_models__schema_yml = """ version: 2 diff --git a/tests/functional/schema_tests/test_schema_v2_tests.py b/tests/functional/schema_tests/test_schema_v2_tests.py index 07c3b87e63e..00c14cd711b 100644 --- a/tests/functional/schema_tests/test_schema_v2_tests.py +++ b/tests/functional/schema_tests/test_schema_v2_tests.py @@ -93,6 +93,7 @@ macro_resolution_order_models__config_yml, macro_resolution_order_models__my_model_sql, alt_local_utils__macros__type_timestamp_sql, + all_quotes_schema__schema_yml, ) from dbt.exceptions import ParsingException, CompilationException from dbt.contracts.results import TestStatus @@ -991,6 +992,24 @@ def test_invalid_schema_file( assert re.search(r"'models' is not a list", str(exc)) +class TestCommentedSchema: + @pytest.fixture(scope="class") + def models(self): + return { + "schema.yml": all_quotes_schema__schema_yml, + "model.sql": invalid_schema_models__model_sql, + } + + def test_quoted_schema_file(self, project): + try: + # A schema file consisting entirely of quotes should not be a problem + run_dbt(['parse']) + except TypeError: + assert False, '`dbt parse` failed with a yaml file that is all comments with the same exception as 3568' + except Exception: + assert False, '`dbt parse` failed with a yaml file that is all comments' + + class TestWrongSpecificationBlock: @pytest.fixture(scope="class") def models(self): From 6c8609499ae3b6603ba89b61cde4013acc937ad7 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Thu, 20 Oct 2022 14:41:41 -0400 Subject: [PATCH 006/156] Add 'michelleark' to changie's core_team list (#6084) --- .changie.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changie.yaml b/.changie.yaml index 9d938a9c519..0744c5bb9c7 100644 --- a/.changie.yaml +++ b/.changie.yaml @@ -44,7 +44,7 @@ custom: footerFormat: | {{- $contributorDict := dict }} {{- /* any names added to this list should be all lowercase for later matching purposes */}} - {{- $core_team := list "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} + {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} {{- range $change := .Changes }} {{- $authorList := splitList " " $change.Custom.Author }} {{- /* loop through all authors for a PR */}} From 17b82661d24e4b94cc31dadd2ab2d39d4de764ee Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 21 Oct 2022 11:41:51 -0400 Subject: [PATCH 007/156] convert 027 cycle test (#6094) * convert 027 cycle test * remove no-op expect_pass=False * remove postgres from test names --- .../complex_cycle_models/model_a.sql | 2 - .../complex_cycle_models/model_b.sql | 4 -- .../complex_cycle_models/model_c.sql | 2 - .../complex_cycle_models/model_d.sql | 2 - .../complex_cycle_models/model_e.sql | 2 - .../complex_cycle_models/readme | 7 -- .../simple_cycle_models/model_a.sql | 2 - .../simple_cycle_models/model_b.sql | 2 - .../027_cycle_tests/test_cycles.py | 37 ---------- tests/functional/cycles/test_cycles.py | 71 +++++++++++++++++++ 10 files changed, 71 insertions(+), 60 deletions(-) delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_a.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_b.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_c.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_d.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/model_e.sql delete mode 100644 test/integration/027_cycle_tests/complex_cycle_models/readme delete mode 100644 test/integration/027_cycle_tests/simple_cycle_models/model_a.sql delete mode 100644 test/integration/027_cycle_tests/simple_cycle_models/model_b.sql delete mode 100644 test/integration/027_cycle_tests/test_cycles.py create mode 100644 tests/functional/cycles/test_cycles.py diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_a.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_a.sql deleted file mode 100644 index 2cd691ea7b4..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_a.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select 1 as id diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_b.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_b.sql deleted file mode 100644 index da16daedfdb..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_b.sql +++ /dev/null @@ -1,4 +0,0 @@ - -select * from {{ ref('model_a') }} -union all -select * from {{ ref('model_e') }} diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_c.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_c.sql deleted file mode 100644 index 741b6cce028..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_c.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_b') }} diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_d.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_d.sql deleted file mode 100644 index 954ca668936..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_d.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_c') }} diff --git a/test/integration/027_cycle_tests/complex_cycle_models/model_e.sql b/test/integration/027_cycle_tests/complex_cycle_models/model_e.sql deleted file mode 100644 index 9f689ae55df..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/model_e.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_e') }} diff --git a/test/integration/027_cycle_tests/complex_cycle_models/readme b/test/integration/027_cycle_tests/complex_cycle_models/readme deleted file mode 100644 index 5f95aba0473..00000000000 --- a/test/integration/027_cycle_tests/complex_cycle_models/readme +++ /dev/null @@ -1,7 +0,0 @@ - -The cycle in this graph looks like: - -A -> B -> C -> D - ^ | - | | - +--- E <--+ diff --git a/test/integration/027_cycle_tests/simple_cycle_models/model_a.sql b/test/integration/027_cycle_tests/simple_cycle_models/model_a.sql deleted file mode 100644 index 741b6cce028..00000000000 --- a/test/integration/027_cycle_tests/simple_cycle_models/model_a.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_b') }} diff --git a/test/integration/027_cycle_tests/simple_cycle_models/model_b.sql b/test/integration/027_cycle_tests/simple_cycle_models/model_b.sql deleted file mode 100644 index 67176d4b2b4..00000000000 --- a/test/integration/027_cycle_tests/simple_cycle_models/model_b.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('model_a') }} diff --git a/test/integration/027_cycle_tests/test_cycles.py b/test/integration/027_cycle_tests/test_cycles.py deleted file mode 100644 index 9312a76ac84..00000000000 --- a/test/integration/027_cycle_tests/test_cycles.py +++ /dev/null @@ -1,37 +0,0 @@ -from freezegun import freeze_time -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestSimpleCycle(DBTIntegrationTest): - - @property - def schema(self): - return "cycles_simple_027" - - @property - def models(self): - return "simple_cycle_models" - - @property - @use_profile('postgres') - def test_postgres_simple_cycle(self): - message = "Found a cycle.*" - with self.assertRaisesRegex(Exception, message): - self.run_dbt(["run"]) - -class TestComplexCycle(DBTIntegrationTest): - - @property - def schema(self): - return "cycles_complex_027" - - @property - def models(self): - return "complex_cycle_models" - - @property - @use_profile('postgres') - def test_postgres_simple_cycle(self): - message = "Found a cycle.*" - with self.assertRaisesRegex(Exception, message): - self.run_dbt(["run"]) diff --git a/tests/functional/cycles/test_cycles.py b/tests/functional/cycles/test_cycles.py new file mode 100644 index 00000000000..0e2cdcaf911 --- /dev/null +++ b/tests/functional/cycles/test_cycles.py @@ -0,0 +1,71 @@ +import pytest + +from dbt.tests.util import run_dbt + +model_a_sql = """ +select * from {{ ref('model_b') }} +""" + +model_b_sql = """ +select * from {{ ref('model_a') }} +""" + +complex_cycle__model_a_sql = """ +select 1 as id +""" + +complex_cycle__model_b_sql = """ +select * from {{ ref('model_a') }}s +union all +select * from {{ ref('model_e') }} +""" + +complex_cycle__model_c_sql = """ +select * from {{ ref('model_b') }} +""" + +complex_cycle__model_d_sql = """ +select * from {{ ref('model_c') }} +""" + +complex_cycle__model_e_sql = """ +select * from {{ ref('model_e') }} +""" + + +class TestSimpleCycle: + @pytest.fixture(scope="class") + def models(self): + return { + "model_a.sql": model_a_sql, + "model_b.sql": model_b_sql + } + + def test_simple_cycle(self, project): + with pytest.raises(RuntimeError) as exc: + run_dbt(["run"]) + expected_msg = "Found a cycle" + assert expected_msg in str(exc.value) + + +class TestComplexCycle: + @pytest.fixture(scope="class") + def models(self): + # The cycle in this graph looks like: + # A -> B -> C -> D + # ^ | + # | | + # +--- E <--+ + return { + "model_a.sql": complex_cycle__model_a_sql, + "model_b.sql": complex_cycle__model_b_sql, + "model_c.sql": complex_cycle__model_c_sql, + "model_d.sql": complex_cycle__model_d_sql, + "model_e.sql": complex_cycle__model_e_sql, + } + + def test_complex_cycle(self, project): + with pytest.raises(RuntimeError) as exc: + run_dbt(["run"]) + expected_msg = "Found a cycle" + assert expected_msg in str(exc.value) From c2856017a12b947f9d61901b187bfab177516579 Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Tue, 25 Oct 2022 13:01:38 -0500 Subject: [PATCH 008/156] [BUGFIX] Force `tox` to update `pip` (fixes `psycopg2-binary @ 2.9.5`) (#6134) --- tox.ini | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tox.ini b/tox.ini index 89f2ac41204..109e8b4f62f 100644 --- a/tox.ini +++ b/tox.ini @@ -4,6 +4,7 @@ envlist = unit,integration [testenv:{unit,py37,py38,py39,py310,py}] description = unit testing +download = true skip_install = true passenv = DBT_* PYTEST_ADDOPTS commands = @@ -15,6 +16,7 @@ deps = [testenv:{integration,py37-integration,py38-integration,py39-integration,py310-integration,py-integration}] description = adapter plugin integration testing +download = true skip_install = true passenv = DBT_* POSTGRES_TEST_* PYTEST_ADDOPTS commands = From 19167842873ea882c2dbb70aeb525953812d8ef3 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:37:44 -0700 Subject: [PATCH 009/156] Ct 1167/030 statement tests conversion (#6109) * Convert test to functional set. * Remove old statement tests from integration test set. * Nix whitespace Co-authored-by: Mila Page --- .../models/statement_actual.sql | 23 ---------- .../seed/statement_expected.csv | 3 -- .../030_statement_tests/test_statements.py | 36 ---------------- .../functional/statements/fixtures.py | 39 ++++++++++++++++- .../functional/statements/test_statements.py | 43 +++++++++++++++++++ 5 files changed, 81 insertions(+), 63 deletions(-) delete mode 100644 test/integration/030_statement_tests/models/statement_actual.sql delete mode 100644 test/integration/030_statement_tests/seed/statement_expected.csv delete mode 100644 test/integration/030_statement_tests/test_statements.py rename test/integration/030_statement_tests/seed/seed.csv => tests/functional/statements/fixtures.py (89%) create mode 100644 tests/functional/statements/test_statements.py diff --git a/test/integration/030_statement_tests/models/statement_actual.sql b/test/integration/030_statement_tests/models/statement_actual.sql deleted file mode 100644 index 8c550bc5dc1..00000000000 --- a/test/integration/030_statement_tests/models/statement_actual.sql +++ /dev/null @@ -1,23 +0,0 @@ - --- {{ ref('seed') }} - -{%- call statement('test_statement', fetch_result=True) -%} - - select - count(*) as "num_records" - - from {{ ref('seed') }} - -{%- endcall -%} - -{% set result = load_result('test_statement') %} - -{% set res_table = result['table'] %} -{% set res_matrix = result['data'] %} - -{% set matrix_value = res_matrix[0][0] %} -{% set table_value = res_table[0]['num_records'] %} - -select 'matrix' as source, {{ matrix_value }} as value -union all -select 'table' as source, {{ table_value }} as value diff --git a/test/integration/030_statement_tests/seed/statement_expected.csv b/test/integration/030_statement_tests/seed/statement_expected.csv deleted file mode 100644 index cf9d9af15ac..00000000000 --- a/test/integration/030_statement_tests/seed/statement_expected.csv +++ /dev/null @@ -1,3 +0,0 @@ -source,value -matrix,100 -table,100 diff --git a/test/integration/030_statement_tests/test_statements.py b/test/integration/030_statement_tests/test_statements.py deleted file mode 100644 index 4278f394580..00000000000 --- a/test/integration/030_statement_tests/test_statements.py +++ /dev/null @@ -1,36 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestStatements(DBTIntegrationTest): - - @property - def schema(self): - return "statements_030" - - @staticmethod - def dir(path): - return path.lstrip("/") - - @property - def models(self): - return self.dir("models") - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'quote_columns': False, - } - } - - @use_profile("postgres") - def test_postgres_statements(self): - self.use_default_project({"seed-paths": [self.dir("seed")]}) - - results = self.run_dbt(["seed"]) - self.assertEqual(len(results), 2) - results = self.run_dbt() - self.assertEqual(len(results), 1) - - self.assertTablesEqual("statement_actual", "statement_expected") diff --git a/test/integration/030_statement_tests/seed/seed.csv b/tests/functional/statements/fixtures.py similarity index 89% rename from test/integration/030_statement_tests/seed/seed.csv rename to tests/functional/statements/fixtures.py index 640af6c4ee6..e05f697644a 100644 --- a/test/integration/030_statement_tests/seed/seed.csv +++ b/tests/functional/statements/fixtures.py @@ -1,4 +1,12 @@ -id,first_name,last_name,email,gender,ip_address +# +# Seeds +# +seeds__statement_expected = """source,value +matrix,100 +table,100 +""" + +seeds__statement_actual = """id,first_name,last_name,email,gender,ip_address 1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 @@ -99,3 +107,32 @@ 98,Angela,Brooks,abrooks2p@mtv.com,Female,10.63.249.126 99,Harold,Foster,hfoster2q@privacy.gov.au,Male,139.214.40.244 100,Carl,Meyer,cmeyer2r@disqus.com,Male,204.117.7.88 +""" + +# +# Models +# +models__statement_actual = """ +-- {{ ref('seed') }} + +{%- call statement('test_statement', fetch_result=True) -%} + + select + count(*) as "num_records" + + from {{ ref('seed') }} + +{%- endcall -%} + +{% set result = load_result('test_statement') %} + +{% set res_table = result['table'] %} +{% set res_matrix = result['data'] %} + +{% set matrix_value = res_matrix[0][0] %} +{% set table_value = res_table[0]['num_records'] %} + +select 'matrix' as source, {{ matrix_value }} as value +union all +select 'table' as source, {{ table_value }} as value +""" diff --git a/tests/functional/statements/test_statements.py b/tests/functional/statements/test_statements.py new file mode 100644 index 00000000000..4b8640b8066 --- /dev/null +++ b/tests/functional/statements/test_statements.py @@ -0,0 +1,43 @@ +import pathlib +import pytest + +from dbt.tests.util import ( + run_dbt, + check_relations_equal, + write_file +) +from tests.functional.statements.fixtures import ( + models__statement_actual, + seeds__statement_actual, + seeds__statement_expected, +) + + +class TestStatements: + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project): + # put seeds in 'seed' not 'seeds' directory + (pathlib.Path(project.project_root) / "seed").mkdir(parents=True, exist_ok=True) + write_file(seeds__statement_actual, project.project_root, "seed", "seed.csv") + write_file(seeds__statement_expected, project.project_root, "seed", "statement_expected.csv") + + @pytest.fixture(scope="class") + def models(self): + return {"statement_actual.sql": models__statement_actual} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + "seed-paths": ["seed"], + } + + def test_postgres_statements(self, project): + results = run_dbt(["seed"]) + assert len(results) == 2 + results = run_dbt() + assert len(results) == 1 + + check_relations_equal(project.adapter, ["statement_actual", "statement_expected"]) From 7b73264ec823dbe7ee12bb998907f4c1aca3b6fd Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Thu, 27 Oct 2022 08:33:15 -0500 Subject: [PATCH 010/156] switch out to use internal action for triage labels (#6120) * switch out to use our action * point to main --- .github/workflows/triage-labels.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/.github/workflows/triage-labels.yml b/.github/workflows/triage-labels.yml index a71dc5e1f74..26f41f74db8 100644 --- a/.github/workflows/triage-labels.yml +++ b/.github/workflows/triage-labels.yml @@ -23,11 +23,5 @@ permissions: jobs: triage_label: - if: contains(github.event.issue.labels.*.name, 'awaiting_response') - runs-on: ubuntu-latest - steps: - - name: initial labeling - uses: andymckay/labeler@master - with: - add-labels: "triage" - remove-labels: "awaiting_response" + uses: dbt-labs/actions/.github/workflows/triage-labels.yml@main + secrets: inherit From 77dfec7214e8aaa867a18363c1fcc6f373b103b3 Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Thu, 27 Oct 2022 10:49:27 -0500 Subject: [PATCH 011/156] more ergonomic profile name handling (#6157) --- core/dbt/config/runtime.py | 91 ++++++++++++++++++++++++++------------ 1 file changed, 63 insertions(+), 28 deletions(-) diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index a8edb9b096a..7a3e475ae54 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -3,31 +3,42 @@ from copy import deepcopy from dataclasses import dataclass, field from pathlib import Path -from typing import Dict, Any, Optional, Mapping, Iterator, Iterable, Tuple, List, MutableSet, Type +from typing import ( + Any, + Dict, + Iterable, + Iterator, + List, + Mapping, + MutableSet, + Optional, + Tuple, + Type, + Union, +) -from .profile import Profile -from .project import Project -from .renderer import DbtProjectYamlRenderer, ProfileRenderer -from .utils import parse_cli_vars from dbt import flags -from dbt.adapters.factory import get_relation_class_by_name, get_include_paths -from dbt.helper_types import FQNPath, PathSet, DictDefaultEmptyStr +from dbt.adapters.factory import get_include_paths, get_relation_class_by_name from dbt.config.profile import read_user_config from dbt.contracts.connection import AdapterRequiredConfig, Credentials from dbt.contracts.graph.manifest import ManifestMetadata -from dbt.contracts.relation import ComponentName -from dbt.ui import warning_tag - from dbt.contracts.project import Configuration, UserConfig +from dbt.contracts.relation import ComponentName +from dbt.dataclass_schema import ValidationError from dbt.exceptions import ( - RuntimeException, DbtProjectError, + RuntimeException, + raise_compiler_error, validator_error_message, warn_or_error, - raise_compiler_error, ) +from dbt.helper_types import DictDefaultEmptyStr, FQNPath, PathSet +from dbt.ui import warning_tag -from dbt.dataclass_schema import ValidationError +from .profile import Profile +from .project import Project, PartialProject +from .renderer import DbtProjectYamlRenderer, ProfileRenderer +from .utils import parse_cli_vars def _project_quoting_dict(proj: Project, profile: Profile) -> Dict[ComponentName, bool]: @@ -190,28 +201,52 @@ def _get_rendered_profile( @classmethod def collect_parts(cls: Type["RuntimeConfig"], args: Any) -> Tuple[Project, Profile]: - # profile_name from the project - project_root = args.project_dir if args.project_dir else os.getcwd() - version_check = bool(flags.VERSION_CHECK) - partial = Project.partial_load(project_root, verify_version=version_check) - # build the profile using the base renderer and the one fact we know - # Note: only the named profile section is rendered. The rest of the - # profile is ignored. + cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) + + profile = cls.collect_profile(args=args) + project_renderer = DbtProjectYamlRenderer(profile, cli_vars) + project = cls.collect_project(args=args, project_renderer=project_renderer) + assert type(project) is Project + return (project, profile) + + @classmethod + def collect_profile( + cls: Type["RuntimeConfig"], args: Any, profile_name: Optional[str] = None + ) -> Profile: + cli_vars: Dict[str, Any] = parse_cli_vars(getattr(args, "vars", "{}")) profile_renderer = ProfileRenderer(cli_vars) - profile_name = partial.render_profile_name(profile_renderer) + + # build the profile using the base renderer and the one fact we know + if profile_name is None: + # Note: only the named profile section is rendered here. The rest of the + # profile is ignored. + partial = cls.collect_project(args) + assert type(partial) is PartialProject + profile_name = partial.render_profile_name(profile_renderer) + profile = cls._get_rendered_profile(args, profile_renderer, profile_name) # Save env_vars encountered in rendering for partial parsing profile.profile_env_vars = profile_renderer.ctx_obj.env_vars + return profile - # get a new renderer using our target information and render the - # project - project_renderer = DbtProjectYamlRenderer(profile, cli_vars) - project = partial.render(project_renderer) - # Save env_vars encountered in rendering for partial parsing - project.project_env_vars = project_renderer.ctx_obj.env_vars - return (project, profile) + @classmethod + def collect_project( + cls: Type["RuntimeConfig"], + args: Any, + project_renderer: Optional[DbtProjectYamlRenderer] = None, + ) -> Union[Project, PartialProject]: + + project_root = args.project_dir if args.project_dir else os.getcwd() + version_check = bool(flags.VERSION_CHECK) + partial = Project.partial_load(project_root, verify_version=version_check) + if project_renderer is None: + return partial + else: + project = partial.render(project_renderer) + project.project_env_vars = project_renderer.ctx_obj.env_vars + return project # Called in main.py, lib.py, task/base.py @classmethod From fc00239f369342f2045d42c0f19881e32f81d10f Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Thu, 27 Oct 2022 14:05:09 -0500 Subject: [PATCH 012/156] point to correct workflow (#6161) * point to correct workflow * add inputs --- .github/workflows/triage-labels.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/triage-labels.yml b/.github/workflows/triage-labels.yml index 26f41f74db8..dc387de70dd 100644 --- a/.github/workflows/triage-labels.yml +++ b/.github/workflows/triage-labels.yml @@ -23,5 +23,8 @@ permissions: jobs: triage_label: - uses: dbt-labs/actions/.github/workflows/triage-labels.yml@main + uses: dbt-labs/actions/.github/workflows/replace-label.yml@main + with: + original_label: "awaiting_response" + new_label: "triage" secrets: inherit From 8145eed603266951ce35858f7eef3836012090bd Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Thu, 27 Oct 2022 16:10:58 -0500 Subject: [PATCH 013/156] revert to community action (#6163) --- .github/workflows/triage-labels.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/triage-labels.yml b/.github/workflows/triage-labels.yml index dc387de70dd..a71dc5e1f74 100644 --- a/.github/workflows/triage-labels.yml +++ b/.github/workflows/triage-labels.yml @@ -23,8 +23,11 @@ permissions: jobs: triage_label: - uses: dbt-labs/actions/.github/workflows/replace-label.yml@main - with: - original_label: "awaiting_response" - new_label: "triage" - secrets: inherit + if: contains(github.event.issue.labels.*.name, 'awaiting_response') + runs-on: ubuntu-latest + steps: + - name: initial labeling + uses: andymckay/labeler@master + with: + add-labels: "triage" + remove-labels: "awaiting_response" From be4a91a0fe35a619587b7a0145e190690e3771c6 Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Mon, 31 Oct 2022 12:04:56 -0500 Subject: [PATCH 014/156] Convert messages to struct logs (#6064) * Initial structured logging changes * remove "this" from core/dbt/events/functions.py * CT-1047: Fix execution_time definitions to use float * CT-1047: Revert unintended checking of changes to functions.py * WIP * first pass to resolve circular deps * more circular dep resolution * remove a bunch of duplication * move message into log line * update comments * fix field that wen missing during rebase * remove double import * remove some comments and extra code * fix pre-commit * rework deprecations * WIP converting messages * WIP converting messages * remove stray comment * WIP more message conversion * WIP more message conversion * tweak the messages * convert last message * rename * remove warn_or_raise as never used * add fake calls to all new events * fix some tests * put back deprecation * restore deprecation fully * fix unit test * fix log levels * remove some skipped ids * fix macro log function * fix how messages are built to match expected outcome * fix expected test message * small fixes from reviews * fix conflict resolution in UI Co-authored-by: Gerda Shank Co-authored-by: Peter Allen Webb --- core/dbt/adapters/base/impl.py | 6 +- core/dbt/config/runtime.py | 35 +- core/dbt/constants.py | 7 + core/dbt/context/providers.py | 10 +- core/dbt/contracts/files.py | 6 +- core/dbt/contracts/graph/parsed.py | 42 +- core/dbt/contracts/project.py | 4 +- core/dbt/deprecations.py | 84 ++-- core/dbt/deps/git.py | 29 +- core/dbt/events/functions.py | 36 +- core/dbt/events/helpers.py | 16 + core/dbt/events/proto_types.py | 228 +++++++++-- core/dbt/events/types.proto | 186 +++++++-- core/dbt/events/types.py | 484 ++++++++++++++++++++--- core/dbt/exceptions.py | 124 +----- core/dbt/graph/selector.py | 12 +- core/dbt/parser/manifest.py | 160 ++++---- core/dbt/parser/schemas.py | 29 +- core/dbt/parser/sources.py | 26 +- core/dbt/task/list.py | 6 +- core/dbt/task/printer.py | 2 + core/dbt/task/runnable.py | 8 +- test/unit/test_config.py | 56 +-- test/unit/test_graph_selector_methods.py | 8 +- tests/unit/test_events.py | 12 +- 25 files changed, 1055 insertions(+), 561 deletions(-) create mode 100644 core/dbt/events/helpers.py diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 3c301c2e7f4..33b7c45a3c4 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -41,13 +41,13 @@ from dbt.contracts.graph.compiled import CompileResultNode, CompiledSeedNode from dbt.contracts.graph.manifest import Manifest, MacroManifest from dbt.contracts.graph.parsed import ParsedSeedNode -from dbt.exceptions import warn_or_error -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( CacheMiss, ListRelations, CodeExecution, CodeExecutionStatus, + CatalogGenerationError, ) from dbt.utils import filter_null_values, executor, cast_to_str @@ -1327,7 +1327,7 @@ def catch_as_completed( elif isinstance(exc, KeyboardInterrupt) or not isinstance(exc, Exception): raise exc else: - warn_or_error(f"Encountered an error while generating catalog: {str(exc)}") + warn_or_error(CatalogGenerationError(exc=str(exc))) # exc is not None, derives from Exception, and isn't ctrl+c exceptions.append(exc) return merge_tables(tables), exceptions diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index 7a3e475ae54..236baf497a6 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -8,7 +8,6 @@ Dict, Iterable, Iterator, - List, Mapping, MutableSet, Optional, @@ -30,10 +29,10 @@ RuntimeException, raise_compiler_error, validator_error_message, - warn_or_error, ) +from dbt.events.functions import warn_or_error +from dbt.events.types import UnusedResourceConfigPath from dbt.helper_types import DictDefaultEmptyStr, FQNPath, PathSet -from dbt.ui import warning_tag from .profile import Profile from .project import Project, PartialProject @@ -315,11 +314,11 @@ def get_resource_config_paths(self) -> Dict[str, PathSet]: "exposures": self._get_config_paths(self.exposures), } - def get_unused_resource_config_paths( + def warn_for_unused_resource_config_paths( self, resource_fqns: Mapping[str, PathSet], disabled: PathSet, - ) -> List[FQNPath]: + ) -> None: """Return a list of lists of strings, where each inner list of strings represents a type + FQN path of a resource configuration that is not used. @@ -333,23 +332,13 @@ def get_unused_resource_config_paths( for config_path in config_paths: if not _is_config_used(config_path, fqns): - unused_resource_config_paths.append((resource_type,) + config_path) - return unused_resource_config_paths + resource_path = ".".join(i for i in ((resource_type,) + config_path)) + unused_resource_config_paths.append(resource_path) - def warn_for_unused_resource_config_paths( - self, - resource_fqns: Mapping[str, PathSet], - disabled: PathSet, - ) -> None: - unused = self.get_unused_resource_config_paths(resource_fqns, disabled) - if len(unused) == 0: + if len(unused_resource_config_paths) == 0: return - msg = UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE.format( - len(unused), "\n".join("- {}".format(".".join(u)) for u in unused) - ) - - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error(UnusedResourceConfigPath(unused_config_paths=unused_resource_config_paths)) def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: if self.dependencies is None: @@ -626,14 +615,6 @@ def from_args(cls: Type[RuntimeConfig], args: Any) -> "RuntimeConfig": return cls.from_parts(project=project, profile=profile, args=args) -UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE = """\ -Configuration paths exist in your dbt_project.yml file which do not \ -apply to any resources. -There are {} unused configuration paths: -{} -""" - - def _is_config_used(path, fqns): if fqns: for fqn in fqns: diff --git a/core/dbt/constants.py b/core/dbt/constants.py index 1599df3e335..63213476e54 100644 --- a/core/dbt/constants.py +++ b/core/dbt/constants.py @@ -1,3 +1,10 @@ SECRET_ENV_PREFIX = "DBT_ENV_SECRET_" DEFAULT_ENV_PLACEHOLDER = "DBT_DEFAULT_PLACEHOLDER" METADATA_ENV_PREFIX = "DBT_ENV_CUSTOM_ENV_" + +MAXIMUM_SEED_SIZE = 1 * 1024 * 1024 +MAXIMUM_SEED_SIZE_NAME = "1MB" + +PIN_PACKAGE_URL = ( + "https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" +) diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 597b526e384..280b272d553 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -53,7 +53,6 @@ raise_compiler_error, ref_invalid_args, metric_invalid_args, - ref_target_not_found, target_not_found, ref_bad_context, wrapped_exports, @@ -476,10 +475,11 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Rel ) if target_model is None or isinstance(target_model, Disabled): - ref_target_not_found( - self.model, - target_name, - target_package, + target_not_found( + node=self.model, + target_name=target_name, + target_kind="node", + target_package=target_package, disabled=isinstance(target_model, Disabled), ) self.validate(target_model, target_name, target_package) diff --git a/core/dbt/contracts/files.py b/core/dbt/contracts/files.py index b915a0d1197..93f12a1411e 100644 --- a/core/dbt/contracts/files.py +++ b/core/dbt/contracts/files.py @@ -1,18 +1,16 @@ import hashlib import os from dataclasses import dataclass, field + from mashumaro.types import SerializableType from typing import List, Optional, Union, Dict, Any +from dbt.constants import MAXIMUM_SEED_SIZE from dbt.dataclass_schema import dbtClassMixin, StrEnum from .util import SourceKey -MAXIMUM_SEED_SIZE = 1 * 1024 * 1024 -MAXIMUM_SEED_SIZE_NAME = "1MB" - - class ParseFileType(StrEnum): Macro = "macro" Model = "model" diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/parsed.py index 860f3fdf662..f4de6e6155d 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/parsed.py @@ -18,7 +18,7 @@ from dbt.dataclass_schema import dbtClassMixin, ExtensibleDbtClassMixin from dbt.clients.system import write_file -from dbt.contracts.files import FileHash, MAXIMUM_SEED_SIZE_NAME +from dbt.contracts.files import FileHash from dbt.contracts.graph.unparsed import ( UnparsedNode, UnparsedDocumentation, @@ -41,7 +41,13 @@ ) from dbt.contracts.util import Replaceable, AdditionalPropertiesMixin from dbt.events.proto_types import NodeInfo -from dbt.exceptions import warn_or_error +from dbt.events.functions import warn_or_error +from dbt.events.types import ( + SeedIncreased, + SeedExceedsLimitSamePath, + SeedExceedsLimitAndPathChanged, + SeedExceedsLimitChecksumChanged, +) from dbt import flags from dbt.node_types import ModelLanguage, NodeType @@ -375,30 +381,28 @@ def same_seeds(first: ParsedNode, second: ParsedNode) -> bool: if first.checksum.name == "path": msg: str if second.checksum.name != "path": - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was " - f"<={MAXIMUM_SEED_SIZE_NAME}, so it has changed" + warn_or_error( + SeedIncreased(package_name=first.package_name, name=first.name), node=first ) elif result: - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size at the same path, dbt " - f"cannot tell if it has changed: assuming they are the same" + warn_or_error( + SeedExceedsLimitSamePath(package_name=first.package_name, name=first.name), + node=first, ) elif not result: - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was in " - f"a different location, assuming it has changed" + warn_or_error( + SeedExceedsLimitAndPathChanged(package_name=first.package_name, name=first.name), + node=first, ) else: - msg = ( - f"Found a seed ({first.package_name}.{first.name}) " - f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file had a " - f"checksum type of {second.checksum.name}, so it has changed" + warn_or_error( + SeedExceedsLimitChecksumChanged( + package_name=first.package_name, + name=first.name, + checksum_name=second.checksum.name, + ), + node=first, ) - warn_or_error(msg, node=first) return result diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index b56aeddaf17..17523a40bdb 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -12,9 +12,7 @@ from typing import Optional, List, Dict, Union, Any from mashumaro.types import SerializableType -PIN_PACKAGE_URL = ( - "https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" # noqa -) + DEFAULT_SEND_ANONYMOUS_USAGE_STATS = True diff --git a/core/dbt/deprecations.py b/core/dbt/deprecations.py index 223091dea60..f7cee59df5a 100644 --- a/core/dbt/deprecations.py +++ b/core/dbt/deprecations.py @@ -1,14 +1,14 @@ +import abc from typing import Optional, Set, List, Dict, ClassVar import dbt.exceptions -from dbt import ui import dbt.tracking class DBTDeprecation: _name: ClassVar[Optional[str]] = None - _description: ClassVar[Optional[str]] = None + _event: ClassVar[Optional[str]] = None @property def name(self) -> str: @@ -21,66 +21,50 @@ def track_deprecation_warn(self) -> None: dbt.tracking.track_deprecation_warn({"deprecation_name": self.name}) @property - def description(self) -> str: - if self._description is not None: - return self._description - raise NotImplementedError("description not implemented for {}".format(self)) + def event(self) -> abc.ABCMeta: + if self._event is not None: + module_path = dbt.events.types + class_name = self._event + + try: + return getattr(module_path, class_name) + except AttributeError: + msg = f"Event Class `{class_name}` is not defined in `{module_path}`" + raise NameError(msg) + raise NotImplementedError("event not implemented for {}".format(self._event)) def show(self, *args, **kwargs) -> None: if self.name not in active_deprecations: - desc = self.description.format(**kwargs) - msg = ui.line_wrap_message(desc, prefix="Deprecated functionality\n\n") - dbt.exceptions.warn_or_error(msg, log_fmt=ui.warning_tag("{}")) + event = self.event(**kwargs) + dbt.events.functions.warn_or_error(event) self.track_deprecation_warn() active_deprecations.add(self.name) class PackageRedirectDeprecation(DBTDeprecation): _name = "package-redirect" - _description = """\ - The `{old_name}` package is deprecated in favor of `{new_name}`. Please update - your `packages.yml` configuration to use `{new_name}` instead. - """ + _event = "PackageRedirectDeprecation" class PackageInstallPathDeprecation(DBTDeprecation): _name = "install-packages-path" - _description = """\ - The default package install path has changed from `dbt_modules` to `dbt_packages`. - Please update `clean-targets` in `dbt_project.yml` and check `.gitignore` as well. - Or, set `packages-install-path: dbt_modules` if you'd like to keep the current value. - """ + _event = "PackageInstallPathDeprecation" -class ConfigPathDeprecation(DBTDeprecation): - _description = """\ - The `{deprecated_path}` config has been renamed to `{exp_path}`. - Please update your `dbt_project.yml` configuration to reflect this change. - """ - - -class ConfigSourcePathDeprecation(ConfigPathDeprecation): +class ConfigSourcePathDeprecation(DBTDeprecation): _name = "project-config-source-paths" + _event = "ConfigSourcePathDeprecation" -class ConfigDataPathDeprecation(ConfigPathDeprecation): +class ConfigDataPathDeprecation(DBTDeprecation): _name = "project-config-data-paths" - - -_adapter_renamed_description = """\ -The adapter function `adapter.{old_name}` is deprecated and will be removed in -a future release of dbt. Please use `adapter.{new_name}` instead. - -Documentation for {new_name} can be found here: - - https://docs.getdbt.com/docs/adapter -""" + _event = "ConfigDataPathDeprecation" def renamed_method(old_name: str, new_name: str): class AdapterDeprecationWarning(DBTDeprecation): _name = "adapter:{}".format(old_name) - _description = _adapter_renamed_description.format(old_name=old_name, new_name=new_name) + _event = "AdapterDeprecationWarning" dep = AdapterDeprecationWarning() deprecations_list.append(dep) @@ -89,26 +73,12 @@ class AdapterDeprecationWarning(DBTDeprecation): class MetricAttributesRenamed(DBTDeprecation): _name = "metric-attr-renamed" - _description = """\ -dbt-core v1.3 renamed attributes for metrics: -\n 'sql' -> 'expression' -\n 'type' -> 'calculation_method' -\n 'type: expression' -> 'calculation_method: derived' -\nThe old metric parameter names will be fully deprecated in v1.4. -\nPlease remove them from the metric definition of metric '{metric_name}' -\nRelevant issue here: https://github.com/dbt-labs/dbt-core/issues/5849 -""" + _event = "MetricAttributesRenamed" class ExposureNameDeprecation(DBTDeprecation): _name = "exposure-name" - _description = """\ - Starting in v1.3, the 'name' of an exposure should contain only letters, numbers, and underscores. - Exposures support a new property, 'label', which may contain spaces, capital letters, and special characters. - {exposure} does not follow this pattern. - Please update the 'name', and use the 'label' property for a human-friendly title. - This will raise an error in a future version of dbt-core. - """ + _event = "ExposureNameDeprecation" def warn(name, *args, **kwargs): @@ -125,12 +95,12 @@ def warn(name, *args, **kwargs): active_deprecations: Set[str] = set() deprecations_list: List[DBTDeprecation] = [ - ExposureNameDeprecation(), + PackageRedirectDeprecation(), + PackageInstallPathDeprecation(), ConfigSourcePathDeprecation(), ConfigDataPathDeprecation(), - PackageInstallPathDeprecation(), - PackageRedirectDeprecation(), MetricAttributesRenamed(), + ExposureNameDeprecation(), ] deprecations: Dict[str, DBTDeprecation] = {d.name: d for d in deprecations_list} diff --git a/core/dbt/deps/git.py b/core/dbt/deps/git.py index 9e86367acc4..e6dcc479a80 100644 --- a/core/dbt/deps/git.py +++ b/core/dbt/deps/git.py @@ -9,14 +9,9 @@ GitPackage, ) from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path -from dbt.exceptions import ExecutableError, warn_or_error, raise_dependency_error -from dbt.events.functions import fire_event -from dbt.events.types import EnsureGitInstalled -from dbt import ui - -PIN_PACKAGE_URL = ( - "https://docs.getdbt.com/docs/package-management#section-specifying-package-versions" # noqa -) +from dbt.exceptions import ExecutableError, raise_dependency_error +from dbt.events.functions import fire_event, warn_or_error +from dbt.events.types import EnsureGitInstalled, DepsUnpinned def md5sum(s: str): @@ -62,14 +57,6 @@ def nice_version_name(self): else: return "revision {}".format(self.revision) - def unpinned_msg(self): - if self.revision == "HEAD": - return "not pinned, using HEAD (default branch)" - elif self.revision in ("main", "master"): - return f'pinned to the "{self.revision}" branch' - else: - return None - def _checkout(self): """Performs a shallow clone of the repository into the downloads directory. This function can be called repeatedly. If the project has @@ -92,14 +79,8 @@ def _checkout(self): def _fetch_metadata(self, project, renderer) -> ProjectPackageMetadata: path = self._checkout() - if self.unpinned_msg() and self.warn_unpinned: - warn_or_error( - 'The git package "{}" \n\tis {}.\n\tThis can introduce ' - "breaking changes into your project without warning!\n\nSee {}".format( - self.git, self.unpinned_msg(), PIN_PACKAGE_URL - ), - log_fmt=ui.yellow("WARNING: {}"), - ) + if (self.revision == "HEAD" or self.revision in ("main", "master")) and self.warn_unpinned: + warn_or_error(DepsUnpinned(git=self.git)) loaded = Project.from_project_root(path, renderer) return ProjectPackageMetadata.from_project(loaded) diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index 122171bc8bf..2425f0abd7f 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -1,9 +1,16 @@ import betterproto from colorama import Style + from dbt.events.base_types import NoStdOut, BaseEvent, NoFile, Cache -from dbt.events.types import EventBufferFull, MainReportVersion, EmptyLine +from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.types import ( + EventBufferFull, + MainReportVersion, + EmptyLine, +) import dbt.flags as flags -from dbt.constants import SECRET_ENV_PREFIX, METADATA_ENV_PREFIX + +from dbt.constants import METADATA_ENV_PREFIX from dbt.logger import make_log_dir_if_missing, GLOBAL_LOGGER from datetime import datetime @@ -18,7 +25,8 @@ import os import uuid import threading -from typing import List, Optional, Union, Callable, Dict +from typing import Optional, Union, Callable, Dict + from collections import deque LOG_VERSION = 3 @@ -108,19 +116,6 @@ def stop_capture_stdout_logs() -> None: ] -def env_secrets() -> List[str]: - return [v for k, v in os.environ.items() if k.startswith(SECRET_ENV_PREFIX) and v.strip()] - - -def scrub_secrets(msg: str, secrets: List[str]) -> str: - scrubbed = msg - - for secret in secrets: - scrubbed = scrubbed.replace(secret, "*****") - - return scrubbed - - # returns a dictionary representation of the event fields. # the message may contain secrets which must be scrubbed at the usage site. def event_to_json( @@ -220,6 +215,15 @@ def send_to_logger(l: Union[Logger, logbook.Logger], level_tag: str, log_line: s ) +def warn_or_error(event, node=None): + if flags.WARN_ERROR: + from dbt.exceptions import raise_compiler_error + + raise_compiler_error(scrub_secrets(event.info.msg, env_secrets()), node) + else: + fire_event(event) + + # an alternative to fire_event which only creates and logs the event value # if the condition is met. Does nothing otherwise. def fire_event_if(conditional: bool, lazy_e: Callable[[], BaseEvent]) -> None: diff --git a/core/dbt/events/helpers.py b/core/dbt/events/helpers.py new file mode 100644 index 00000000000..2570c8653c9 --- /dev/null +++ b/core/dbt/events/helpers.py @@ -0,0 +1,16 @@ +import os +from typing import List +from dbt.constants import SECRET_ENV_PREFIX + + +def env_secrets() -> List[str]: + return [v for k, v in os.environ.items() if k.startswith(SECRET_ENV_PREFIX) and v.strip()] + + +def scrub_secrets(msg: str, secrets: List[str]) -> str: + scrubbed = msg + + for secret in secrets: + scrubbed = scrubbed.replace(secret, "*****") + + return scrubbed diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index d75713285db..53ad7620bd3 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -281,6 +281,65 @@ class ProjectCreated(betterproto.Message): slack_url: str = betterproto.string_field(4) +@dataclass +class PackageRedirectDeprecation(betterproto.Message): + """D001""" + + info: "EventInfo" = betterproto.message_field(1) + old_name: str = betterproto.string_field(2) + new_name: str = betterproto.string_field(3) + + +@dataclass +class PackageInstallPathDeprecation(betterproto.Message): + """D002""" + + info: "EventInfo" = betterproto.message_field(1) + + +@dataclass +class ConfigSourcePathDeprecation(betterproto.Message): + """D003""" + + info: "EventInfo" = betterproto.message_field(1) + deprecated_path: str = betterproto.string_field(2) + exp_path: str = betterproto.string_field(3) + + +@dataclass +class ConfigDataPathDeprecation(betterproto.Message): + """D004""" + + info: "EventInfo" = betterproto.message_field(1) + deprecated_path: str = betterproto.string_field(2) + exp_path: str = betterproto.string_field(3) + + +@dataclass +class AdapterDeprecationWarning(betterproto.Message): + """D005""" + + info: "EventInfo" = betterproto.message_field(1) + old_name: str = betterproto.string_field(2) + new_name: str = betterproto.string_field(3) + + +@dataclass +class MetricAttributesRenamed(betterproto.Message): + """D006""" + + info: "EventInfo" = betterproto.message_field(1) + metric_name: str = betterproto.string_field(2) + + +@dataclass +class ExposureNameDeprecation(betterproto.Message): + """D007""" + + info: "EventInfo" = betterproto.message_field(1) + exposure: str = betterproto.string_field(2) + + @dataclass class AdapterEventDebug(betterproto.Message): """E001""" @@ -629,6 +688,14 @@ class CodeExecutionStatus(betterproto.Message): elapsed: float = betterproto.float_field(3) +@dataclass +class CatalogGenerationError(betterproto.Message): + """E040""" + + info: "EventInfo" = betterproto.message_field(1) + exc: str = betterproto.string_field(2) + + @dataclass class WriteCatalogFailure(betterproto.Message): """E041""" @@ -1066,17 +1133,119 @@ class PartialParsingDeletedExposure(betterproto.Message): @dataclass -class InvalidDisabledSourceInTestNode(betterproto.Message): +class InvalidDisabledTargetInTestNode(betterproto.Message): """I050""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + resource_type_title: str = betterproto.string_field(2) + unique_id: str = betterproto.string_field(3) + original_file_path: str = betterproto.string_field(4) + target_kind: str = betterproto.string_field(5) + target_name: str = betterproto.string_field(6) + target_package: str = betterproto.string_field(7) @dataclass -class InvalidRefInTestNode(betterproto.Message): +class UnusedResourceConfigPath(betterproto.Message): """I051""" + info: "EventInfo" = betterproto.message_field(1) + unused_config_paths: List[str] = betterproto.string_field(2) + + +@dataclass +class SeedIncreased(betterproto.Message): + """I052""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitSamePath(betterproto.Message): + """I053""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitAndPathChanged(betterproto.Message): + """I054""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitChecksumChanged(betterproto.Message): + """I055""" + + info: "EventInfo" = betterproto.message_field(1) + package_name: str = betterproto.string_field(2) + name: str = betterproto.string_field(3) + checksum_name: str = betterproto.string_field(4) + + +@dataclass +class UnusedTables(betterproto.Message): + """I056""" + + info: "EventInfo" = betterproto.message_field(1) + unused_tables: List[str] = betterproto.string_field(2) + + +@dataclass +class WrongResourceSchemaFile(betterproto.Message): + """I057""" + + info: "EventInfo" = betterproto.message_field(1) + patch_name: str = betterproto.string_field(2) + resource_type: str = betterproto.string_field(3) + plural_resource_type: str = betterproto.string_field(4) + yaml_key: str = betterproto.string_field(5) + file_path: str = betterproto.string_field(6) + + +@dataclass +class NoNodeForYamlKey(betterproto.Message): + """I058""" + + info: "EventInfo" = betterproto.message_field(1) + patch_name: str = betterproto.string_field(2) + yaml_key: str = betterproto.string_field(3) + file_path: str = betterproto.string_field(4) + + +@dataclass +class MacroPatchNotFound(betterproto.Message): + """I059""" + + info: "EventInfo" = betterproto.message_field(1) + patch_name: str = betterproto.string_field(2) + + +@dataclass +class NodeNotFoundOrDisabled(betterproto.Message): + """I060""" + + info: "EventInfo" = betterproto.message_field(1) + original_file_path: str = betterproto.string_field(2) + unique_id: str = betterproto.string_field(3) + resource_type_title: str = betterproto.string_field(4) + target_name: str = betterproto.string_field(5) + target_kind: str = betterproto.string_field(6) + target_package: str = betterproto.string_field(7) + disabled: str = betterproto.string_field(8) + + +@dataclass +class GeneralMacroWarning(betterproto.Message): + """I061""" + info: "EventInfo" = betterproto.message_field(1) msg: str = betterproto.string_field(2) @@ -1309,6 +1478,23 @@ class DepsSetDownloadDirectory(betterproto.Message): path: str = betterproto.string_field(2) +@dataclass +class DepsUnpinned(betterproto.Message): + """M029""" + + info: "EventInfo" = betterproto.message_field(1) + revision: str = betterproto.string_field(2) + git: str = betterproto.string_field(3) + + +@dataclass +class NoNodesForSelectionCriteria(betterproto.Message): + """M030""" + + info: "EventInfo" = betterproto.message_field(1) + spec_raw: str = betterproto.string_field(2) + + @dataclass class RunningOperationCaughtError(betterproto.Message): """Q001""" @@ -1678,6 +1864,13 @@ class SkippingDetails(betterproto.Message): total: int = betterproto.int32_field(7) +@dataclass +class NothingToDo(betterproto.Message): + """Q035""" + + info: "EventInfo" = betterproto.message_field(1) + + @dataclass class RunningOperationUncaughtError(betterproto.Message): """Q036""" @@ -1697,6 +1890,13 @@ class EndRunResult(betterproto.Message): success: bool = betterproto.bool_field(5) +@dataclass +class NoNodesSelected(betterproto.Message): + """Q038""" + + info: "EventInfo" = betterproto.message_field(1) + + @dataclass class CatchableExceptionOnRun(betterproto.Message): """W002""" @@ -2066,34 +2266,16 @@ class TrackingInitializeFailure(betterproto.Message): exc_info: str = betterproto.string_field(2) -@dataclass -class GeneralWarningMsg(betterproto.Message): - """Z046""" - - info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) - log_fmt: str = betterproto.string_field(3) - - -@dataclass -class GeneralWarningException(betterproto.Message): - """Z047""" - - info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) - log_fmt: str = betterproto.string_field(3) - - @dataclass class EventBufferFull(betterproto.Message): - """Z048""" + """Z045""" info: "EventInfo" = betterproto.message_field(1) @dataclass class RunResultWarningMessage(betterproto.Message): - """Z049""" + """Z046""" info: "EventInfo" = betterproto.message_field(1) msg: str = betterproto.string_field(2) diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index eaa05b4f93d..8f7e1e94fc4 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -213,6 +213,53 @@ message ProjectCreated { string slack_url = 4; } +// D - Deprecation + +// D001 +message PackageRedirectDeprecation { + EventInfo info = 1; + string old_name = 2; + string new_name = 3; +} + +// D002 +message PackageInstallPathDeprecation { + EventInfo info = 1; +} + +// D003 +message ConfigSourcePathDeprecation { + EventInfo info = 1; + string deprecated_path = 2; + string exp_path = 3; +} + +// D004 +message ConfigDataPathDeprecation { + EventInfo info = 1; + string deprecated_path = 2; + string exp_path = 3; +} + +//D005 +message AdapterDeprecationWarning { + EventInfo info = 1; + string old_name = 2; + string new_name = 3; +} + +//D006 +message MetricAttributesRenamed { + EventInfo info = 1; + string metric_name = 2; +} + +//D007 +message ExposureNameDeprecation { + EventInfo info = 1; + string exposure = 2; +} + // E - DB Adapter // E001 @@ -455,7 +502,6 @@ message AdapterImportError { message PluginLoadError { EventInfo info = 1; string exc_info = 2; - } // E037 @@ -478,7 +524,11 @@ message CodeExecutionStatus { float elapsed = 3; } -// Skipped E040 +// E040 +message CatalogGenerationError { + EventInfo info = 1; + string exc = 2; +} // E041 message WriteCatalogFailure { @@ -806,17 +856,98 @@ message PartialParsingDeletedExposure { } // I050 -message InvalidDisabledSourceInTestNode { +message InvalidDisabledTargetInTestNode { EventInfo info = 1; - string msg = 2; + string resource_type_title = 2; + string unique_id = 3; + string original_file_path = 4; + string target_kind = 5; + string target_name = 6; + string target_package = 7; } // I051 -message InvalidRefInTestNode { +message UnusedResourceConfigPath { EventInfo info = 1; - string msg = 2; + repeated string unused_config_paths = 2; +} + +// I052 +message SeedIncreased { + EventInfo info = 1; + string package_name = 2; + string name = 3; +} + +// I053 +message SeedExceedsLimitSamePath { + EventInfo info = 1; + string package_name = 2; + string name = 3; +} + +// I054 +message SeedExceedsLimitAndPathChanged { + EventInfo info = 1; + string package_name = 2; + string name = 3; +} + +// I055 +message SeedExceedsLimitChecksumChanged { + EventInfo info = 1; + string package_name = 2; + string name = 3; + string checksum_name = 4; +} + +// I056 +message UnusedTables { + EventInfo info = 1; + repeated string unused_tables = 2; } +// I057 +message WrongResourceSchemaFile { + EventInfo info = 1; + string patch_name = 2; + string resource_type = 3; + string plural_resource_type = 4; + string yaml_key = 5; + string file_path = 6; +} + +// I058 +message NoNodeForYamlKey { + EventInfo info = 1; + string patch_name = 2; + string yaml_key = 3; + string file_path = 4; +} + +// I059 +message MacroPatchNotFound { + EventInfo info = 1; + string patch_name = 2; +} + +// I060 +message NodeNotFoundOrDisabled { + EventInfo info = 1; + string original_file_path = 2; + string unique_id = 3; + string resource_type_title = 4; + string target_name = 5; + string target_kind = 6; + string target_package = 7; + string disabled = 8; +} + +// I061 +message GeneralMacroWarning { + EventInfo info = 1; + string msg = 2; +} // M - Deps generation @@ -992,6 +1123,19 @@ message DepsSetDownloadDirectory { string path = 2; } +// M029 +message DepsUnpinned { + EventInfo info = 1; + string revision = 2; + string git = 3; +} + +// M030 +message NoNodesForSelectionCriteria { + EventInfo info = 1; + string spec_raw = 2; +} + // Q - Node execution // Q001 @@ -1291,7 +1435,10 @@ message SkippingDetails { int32 total = 7; } -// Skipped Q035 +// Q035 +message NothingToDo { + EventInfo info = 1; +} // Q036 message RunningOperationUncaughtError { @@ -1308,6 +1455,11 @@ message EndRunResult { bool success = 5; } +// Q038 +message NoNodesSelected { + EventInfo info = 1; +} + // W - Node testing // Skipped W001 @@ -1593,28 +1745,12 @@ message TrackingInitializeFailure { string exc_info = 2; } -// Skipped Z045 - -// Z046 -message GeneralWarningMsg { - EventInfo info = 1; - string msg = 2; - string log_fmt = 3; -} - -// Z047 -message GeneralWarningException { - EventInfo info = 1; - string exc = 2; - string log_fmt = 3; -} - -// Z048 +// Z045 message EventBufferFull { EventInfo info = 1; } -// Z049 +// Z046 message RunResultWarningMessage { EventInfo info = 1; string msg = 2; diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index f6e66f941d2..de562fb62aa 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -1,5 +1,6 @@ from dataclasses import dataclass -from dbt import ui +from dbt.ui import line_wrap_message, warning_tag, red, green, yellow +from dbt.constants import MAXIMUM_SEED_SIZE_NAME, PIN_PACKAGE_URL from dbt.events.base_types import ( NoFile, DebugLevel, @@ -32,10 +33,11 @@ # | Code | Description | # |:----:|:-------------------:| # | A | Pre-project loading | +# | D | Deprecations | # | E | DB adapter | # | I | Project parsing | # | M | Deps generation | -# | Q | Node execution | +# | Q | Node execution | # | W | Node testing | # | Z | Misc | # | T | Test only | @@ -305,6 +307,114 @@ def message(self) -> str: """ +# ======================================================= +# D - Deprecations +# ======================================================= + + +@dataclass +class PackageRedirectDeprecation(WarnLevel, pt.PackageRedirectDeprecation): # noqa + def code(self): + return "D001" + + def message(self): + description = ( + f"The `{self.old_name}` package is deprecated in favor of `{self.new_name}`. Please " + f"update your `packages.yml` configuration to use `{self.new_name}` instead." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class PackageInstallPathDeprecation(WarnLevel, pt.PackageInstallPathDeprecation): # noqa + def code(self): + return "D002" + + def message(self): + description = """\ + The default package install path has changed from `dbt_modules` to `dbt_packages`. + Please update `clean-targets` in `dbt_project.yml` and check `.gitignore` as well. + Or, set `packages-install-path: dbt_modules` if you'd like to keep the current value. + """ + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class ConfigSourcePathDeprecation(WarnLevel, pt.ConfigSourcePathDeprecation): # noqa + def code(self): + return "D003" + + def message(self): + description = ( + f"The `{self.deprecated_path}` config has been renamed to `{self.exp_path}`." + "Please update your `dbt_project.yml` configuration to reflect this change." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class ConfigDataPathDeprecation(WarnLevel, pt.ConfigDataPathDeprecation): # noqa + def code(self): + return "D004" + + def message(self): + description = ( + f"The `{self.deprecated_path}` config has been renamed to `{self.exp_path}`." + "Please update your `dbt_project.yml` configuration to reflect this change." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class AdapterDeprecationWarning(WarnLevel, pt.AdapterDeprecationWarning): # noqa + def code(self): + return "D005" + + def message(self): + description = ( + f"The adapter function `adapter.{self.old_name}` is deprecated and will be removed in " + f"a future release of dbt. Please use `adapter.{self.new_name}` instead. " + f"\n\nDocumentation for {self.new_name} can be found here:" + f"\n\nhttps://docs.getdbt.com/docs/adapter" + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + +@dataclass +class MetricAttributesRenamed(WarnLevel, pt.MetricAttributesRenamed): # noqa + def code(self): + return "D006" + + def message(self): + description = ( + "dbt-core v1.3 renamed attributes for metrics:" + "\n 'sql' -> 'expression'" + "\n 'type' -> 'calculation_method'" + "\n 'type: expression' -> 'calculation_method: derived'" + "\nThe old metric parameter names will be fully deprecated in v1.4." + f"\nPlease remove them from the metric definition of metric '{self.metric_name}'" + "\nRelevant issue here: https://github.com/dbt-labs/dbt-core/issues/5849" + ) + + return warning_tag(f"Deprecated functionality\n\n{description}") + + +@dataclass +class ExposureNameDeprecation(WarnLevel, pt.ExposureNameDeprecation): # noqa + def code(self): + return "D007" + + def message(self): + description = ( + "Starting in v1.3, the 'name' of an exposure should contain only letters, " + "numbers, and underscores. Exposures support a new property, 'label', which may " + f"contain spaces, capital letters, and special characters. {self.exposure} does not " + "follow this pattern. Please update the 'name', and use the 'label' property for a " + "human-friendly title. This will raise an error in a future version of dbt-core." + ) + return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) + + # ======================================================= # E - DB Adapter # ======================================================= @@ -675,7 +785,13 @@ def message(self) -> str: return f"Execution status: {self.status} in {self.elapsed} seconds" -# Skipped E040 +@dataclass +class CatalogGenerationError(WarnLevel, pt.CatalogGenerationError): + def code(self): + return "E040" + + def message(self) -> str: + return f"Encountered an error while generating catalog: {self.exc}" @dataclass @@ -1218,23 +1334,194 @@ def message(self) -> str: return f"Partial parsing: deleted exposure {self.unique_id}" -# TODO: switch to storing structured info and calling get_target_failure_msg @dataclass -class InvalidDisabledSourceInTestNode( - WarnLevel, EventStringFunctor, pt.InvalidDisabledSourceInTestNode -): +class InvalidDisabledTargetInTestNode(WarnLevel, pt.InvalidDisabledTargetInTestNode): def code(self): return "I050" def message(self) -> str: - return ui.warning_tag(self.msg) + + target_package_string = "" + if self.target_package != target_package_string: + target_package_string = "in package '{}' ".format(self.target_package) + + msg = "{} '{}' ({}) depends on a {} named '{}' {}which is disabled".format( + self.resource_type_title, + self.unique_id, + self.original_file_path, + self.target_kind, + self.target_name, + target_package_string, + ) + + return warning_tag(msg) @dataclass -class InvalidRefInTestNode(DebugLevel, EventStringFunctor, pt.InvalidRefInTestNode): +class UnusedResourceConfigPath(WarnLevel, pt.UnusedResourceConfigPath): def code(self): return "I051" + def message(self) -> str: + path_list = "\n".join(f"- {u}" for u in self.unused_config_paths) + msg = ( + "Configuration paths exist in your dbt_project.yml file which do not " + "apply to any resources.\n" + f"There are {len(self.unused_config_paths)} unused configuration paths:\n{path_list}" + ) + return warning_tag(msg) + + +@dataclass +class SeedIncreased(WarnLevel, pt.SeedIncreased): + def code(self): + return "I052" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was " + f"<={MAXIMUM_SEED_SIZE_NAME}, so it has changed" + ) + return msg + + +@dataclass +class SeedExceedsLimitSamePath(WarnLevel, pt.SeedExceedsLimitSamePath): + def code(self): + return "I053" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size at the same path, dbt " + f"cannot tell if it has changed: assuming they are the same" + ) + return msg + + +@dataclass +class SeedExceedsLimitAndPathChanged(WarnLevel, pt.SeedExceedsLimitAndPathChanged): + def code(self): + return "I054" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file was in " + f"a different location, assuming it has changed" + ) + return msg + + +@dataclass +class SeedExceedsLimitChecksumChanged(WarnLevel, pt.SeedExceedsLimitChecksumChanged): + def code(self): + return "I055" + + def message(self) -> str: + msg = ( + f"Found a seed ({self.package_name}.{self.name}) " + f">{MAXIMUM_SEED_SIZE_NAME} in size. The previous file had a " + f"checksum type of {self.checksum_name}, so it has changed" + ) + return msg + + +@dataclass +class UnusedTables(WarnLevel, pt.UnusedTables): + def code(self): + return "I056" + + def message(self) -> str: + msg = [ + "During parsing, dbt encountered source overrides that had no target:", + ] + msg += self.unused_tables + msg.append("") + return warning_tag("\n".join(msg)) + + +@dataclass +class WrongResourceSchemaFile(WarnLevel, pt.WrongResourceSchemaFile): + def code(self): + return "I057" + + def message(self) -> str: + msg = line_wrap_message( + f"""\ + '{self.patch_name}' is a {self.resource_type} node, but it is + specified in the {self.yaml_key} section of + {self.file_path}. + To fix this error, place the `{self.patch_name}` + specification under the {self.plural_resource_type} key instead. + """ + ) + return warning_tag(msg) + + +@dataclass +class NoNodeForYamlKey(WarnLevel, pt.NoNodeForYamlKey): + def code(self): + return "I058" + + def message(self) -> str: + msg = ( + f"Did not find matching node for patch with name '{self.patch_name}' " + f"in the '{self.yaml_key}' section of " + f"file '{self.file_path}'" + ) + return warning_tag(msg) + + +@dataclass +class MacroPatchNotFound(WarnLevel, pt.MacroPatchNotFound): + def code(self): + return "I059" + + def message(self) -> str: + msg = f'Found patch for macro "{self.patch_name}" which was not found' + return warning_tag(msg) + + +@dataclass +class NodeNotFoundOrDisabled(WarnLevel, pt.NodeNotFoundOrDisabled): + def code(self): + return "I060" + + def message(self) -> str: + # this is duplicated logic from exceptions.get_not_found_or_disabled_msg + # when we convert exceptions to be stuctured maybe it can be combined? + # convverting the bool to a string since None is also valid + if self.disabled == "None": + reason = "was not found or is disabled" + elif self.disabled == "True": + reason = "is disabled" + else: + reason = "was not found" + + target_package_string = "" + if self.target_package is not None: + target_package_string = "in package '{}' ".format(self.target_package) + + msg = "{} '{}' ({}) depends on a {} named '{}' {}which {}".format( + self.resource_type_title, + self.unique_id, + self.original_file_path, + self.target_kind, + self.target_name, + target_package_string, + reason, + ) + + return warning_tag(msg) + + +@dataclass +class GeneralMacroWarning(WarnLevel, pt.GeneralMacroWarning): + def code(self): + return "I061" + def message(self) -> str: return self.msg @@ -1343,6 +1630,7 @@ def code(self): return "M011" def message(self) -> str: + # This is for the log method used in macros so msg cannot be built here return self.msg @@ -1352,6 +1640,7 @@ def code(self): return "M012" def message(self) -> str: + # This is for the log method used in macros so msg cannot be built here return self.msg @@ -1505,6 +1794,35 @@ def message(self) -> str: return f"Set downloads directory='{self.path}'" +@dataclass +class DepsUnpinned(WarnLevel, pt.DepsUnpinned): + def code(self): + return "M029" + + def message(self) -> str: + if self.revision == "HEAD": + unpinned_msg = "not pinned, using HEAD (default branch)" + elif self.revision in ("main", "master"): + unpinned_msg = f'pinned to the "{self.revision}" branch' + else: + unpinned_msg = None + + msg = ( + f'The git package "{self.git}" \n\tis {unpinned_msg}.\n\tThis can introduce ' + f"breaking changes into your project without warning!\n\nSee {PIN_PACKAGE_URL}" + ) + return yellow(f"WARNING: {msg}") + + +@dataclass +class NoNodesForSelectionCriteria(WarnLevel, pt.NoNodesForSelectionCriteria): + def code(self): + return "M030" + + def message(self) -> str: + return f"The selection criterion '{self.spec_raw}' does not match any nodes" + + # ======================================================= # Q - Node execution # ======================================================= @@ -1575,7 +1893,7 @@ def message(self) -> str: msg = f"{info} {self.name}" return format_fancy_output_line( msg=msg, - status=ui.red(info), + status=red(info), index=self.index, total=self.num_models, execution_time=self.execution_time, @@ -1592,7 +1910,7 @@ def message(self) -> str: msg = f"{info} {self.name}" return format_fancy_output_line( msg=msg, - status=ui.green(info), + status=green(info), index=self.index, total=self.num_models, execution_time=self.execution_time, @@ -1609,7 +1927,7 @@ def message(self) -> str: msg = f"{info} {self.name}" return format_fancy_output_line( msg=msg, - status=ui.yellow(info), + status=yellow(info), index=self.index, total=self.num_models, execution_time=self.execution_time, @@ -1626,7 +1944,7 @@ def message(self) -> str: msg = f"{info} {self.name}" return format_fancy_output_line( msg=msg, - status=ui.red(info), + status=red(info), index=self.index, total=self.num_models, execution_time=self.execution_time, @@ -1653,7 +1971,7 @@ def message(self) -> str: msg = f"{info} {self.description}" return format_fancy_output_line( msg=msg, - status=ui.green(self.status), + status=green(self.status), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1670,7 +1988,7 @@ def message(self) -> str: msg = f"{info} {self.description}" return format_fancy_output_line( msg=msg, - status=ui.red(self.status.upper()), + status=red(self.status.upper()), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1687,7 +2005,7 @@ def message(self) -> str: msg = "{info} {description}".format(info=info, description=self.description, **self.cfg) return format_fancy_output_line( msg=msg, - status=ui.red(self.status.upper()), + status=red(self.status.upper()), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1704,7 +2022,7 @@ def message(self) -> str: msg = "{info} {description}".format(info=info, description=self.description, **self.cfg) return format_fancy_output_line( msg=msg, - status=ui.green(self.status), + status=green(self.status), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1721,7 +2039,7 @@ def message(self) -> str: msg = f"{info} seed file {self.schema}.{self.relation}" return format_fancy_output_line( msg=msg, - status=ui.red(self.status.upper()), + status=red(self.status.upper()), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1738,7 +2056,7 @@ def message(self) -> str: msg = f"{info} seed file {self.schema}.{self.relation}" return format_fancy_output_line( msg=msg, - status=ui.green(self.status), + status=green(self.status), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1755,7 +2073,7 @@ def message(self) -> str: msg = f"{info} freshness of {self.source_name}.{self.table_name}" return format_fancy_output_line( msg=msg, - status=ui.red(info), + status=red(info), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1772,7 +2090,7 @@ def message(self) -> str: msg = f"{info} freshness of {self.source_name}.{self.table_name}" return format_fancy_output_line( msg=msg, - status=ui.red(info), + status=red(info), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1789,7 +2107,7 @@ def message(self) -> str: msg = f"{info} freshness of {self.source_name}.{self.table_name}" return format_fancy_output_line( msg=msg, - status=ui.yellow(info), + status=yellow(info), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1806,7 +2124,7 @@ def message(self) -> str: msg = f"{info} freshness of {self.source_name}.{self.table_name}" return format_fancy_output_line( msg=msg, - status=ui.green(info), + status=green(info), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1820,7 +2138,7 @@ def code(self): def message(self) -> str: msg = "CANCEL query {}".format(self.conn_name) - return format_fancy_output_line(msg=msg, status=ui.red("CANCEL"), index=None, total=None) + return format_fancy_output_line(msg=msg, status=red("CANCEL"), index=None, total=None) @dataclass @@ -1861,7 +2179,7 @@ def message(self) -> str: "cancellation. Some queries may still be " "running!" ) - return ui.yellow(msg) + return yellow(msg) @dataclass @@ -1930,7 +2248,7 @@ def message(self) -> str: msg = "OK hook: {}".format(self.statement) return format_fancy_output_line( msg=msg, - status=ui.green(self.status), + status=green(self.status), index=self.index, total=self.total, execution_time=self.execution_time, @@ -1949,11 +2267,17 @@ def message(self) -> str: else: msg = f"SKIP {self.resource_type} {self.node_name}" return format_fancy_output_line( - msg=msg, status=ui.yellow("SKIP"), index=self.index, total=self.total + msg=msg, status=yellow("SKIP"), index=self.index, total=self.total ) -# Skipped Q035 +@dataclass +class NothingToDo(WarnLevel, pt.NothingToDo): + def code(self): + return "Q035" + + def message(self) -> str: + return "Nothing to do. Try checking your model configs and model specification args" @dataclass @@ -1974,6 +2298,15 @@ def message(self) -> str: return "Command end result" +@dataclass +class NoNodesSelected(WarnLevel, pt.NoNodesSelected): + def code(self): + return "Q038" + + def message(self) -> str: + return "No nodes selected!" + + # ======================================================= # W - Node testing # ======================================================= @@ -2003,7 +2336,7 @@ def message(self) -> str: """.strip() return "{prefix}\n{error}\n\n{note}".format( - prefix=ui.red(prefix), error=str(self.exc).strip(), note=internal_error_string + prefix=red(prefix), error=str(self.exc).strip(), note=internal_error_string ) @@ -2017,7 +2350,7 @@ def message(self) -> str: if node_description is None: node_description = self.unique_id prefix = "Unhandled error while executing {}".format(node_description) - return "{prefix}\n{error}".format(prefix=ui.red(prefix), error=str(self.exc).strip()) + return "{prefix}\n{error}".format(prefix=red(prefix), error=str(self.exc).strip()) @dataclass @@ -2241,7 +2574,7 @@ def code(self): def message(self) -> str: info = "Warning" - return ui.yellow(f"{info} in {self.resource_type} {self.node_name} ({self.path})") + return yellow(f"{info} in {self.resource_type} {self.node_name} ({self.path})") @dataclass @@ -2251,7 +2584,7 @@ def code(self): def message(self) -> str: info = "Failure" - return ui.red(f"{info} in {self.resource_type} {self.node_name} ({self.path})") + return red(f"{info} in {self.resource_type} {self.node_name} ({self.path})") @dataclass @@ -2270,6 +2603,7 @@ def code(self): return "Z024" def message(self) -> str: + # This is the message on the result object, cannot be built here return f" {self.msg}" @@ -2302,13 +2636,16 @@ def message(self) -> str: return f" See test failures:\n {border}\n {msg}\n {border}" +# FirstRunResultError and AfterFirstRunResultError are just splitting the message from the result +# object into multiple log lines +# TODO: is this reallly needed? See printer.py @dataclass class FirstRunResultError(ErrorLevel, EventStringFunctor, pt.FirstRunResultError): def code(self): return "Z028" def message(self) -> str: - return ui.yellow(self.msg) + return yellow(self.msg) @dataclass @@ -2329,13 +2666,13 @@ def message(self) -> str: error_plural = pluralize(self.num_errors, "error") warn_plural = pluralize(self.num_warnings, "warning") if self.keyboard_interrupt: - message = ui.yellow("Exited because of keyboard interrupt.") + message = yellow("Exited because of keyboard interrupt.") elif self.num_errors > 0: - message = ui.red("Completed with {} and {}:".format(error_plural, warn_plural)) + message = red("Completed with {} and {}:".format(error_plural, warn_plural)) elif self.num_warnings > 0: - message = ui.yellow("Completed with {}:".format(warn_plural)) + message = yellow("Completed with {}:".format(warn_plural)) else: - message = ui.green("Completed successfully") + message = green("Completed successfully") return message @@ -2350,7 +2687,7 @@ def code(self): def message(self) -> str: msg = f"SKIP relation {self.schema}.{self.relation} due to ephemeral model error" return format_fancy_output_line( - msg=msg, status=ui.red("ERROR SKIP"), index=self.index, total=self.total + msg=msg, status=red("ERROR SKIP"), index=self.index, total=self.total ) @@ -2446,31 +2783,10 @@ def message(self) -> str: return "Got an exception trying to initialize tracking" -# Skipped Z045 - - -@dataclass -class GeneralWarningMsg(WarnLevel, EventStringFunctor, pt.GeneralWarningMsg): - def code(self): - return "Z046" - - def message(self) -> str: - return self.log_fmt.format(self.msg) if self.log_fmt is not None else self.msg - - -@dataclass -class GeneralWarningException(WarnLevel, pt.GeneralWarningException): - def code(self): - return "Z047" - - def message(self) -> str: - return self.log_fmt.format(str(self.exc)) if self.log_fmt is not None else str(self.exc) - - @dataclass class EventBufferFull(WarnLevel, pt.EventBufferFull): def code(self): - return "Z048" + return "Z045" def message(self) -> str: return ( @@ -2479,12 +2795,14 @@ def message(self) -> str: ) +# this is the message from the result object @dataclass class RunResultWarningMessage(WarnLevel, EventStringFunctor, pt.RunResultWarningMessage): def code(self): - return "Z049" + return "Z046" def message(self) -> str: + # This is the message on the result object, cannot be formatted in event return self.msg @@ -2522,6 +2840,15 @@ def message(self) -> str: ProjectNameAlreadyExists(name="") ProjectCreated(project_name="") + # D - Deprecations ====================== + PackageRedirectDeprecation(old_name="", new_name="") + PackageInstallPathDeprecation() + ConfigSourcePathDeprecation(deprecated_path="", exp_path="") + ConfigDataPathDeprecation(deprecated_path="", exp_path="") + AdapterDeprecationWarning(old_name="", new_name="") + MetricAttributesRenamed(metric_name="") + ExposureNameDeprecation(exposure="") + # E - DB Adapter ====================== AdapterEventDebug() AdapterEventInfo() @@ -2580,6 +2907,7 @@ def message(self) -> str: NewConnectionOpening(connection_state="") CodeExecution(conn_name="", code_content="") CodeExecutionStatus(status="", elapsed=0.1) + CatalogGenerationError(exc="") WriteCatalogFailure(num_exceptions=0) CatalogWritten(path="") CannotGenerateDocs() @@ -2638,8 +2966,32 @@ def message(self) -> str: PartialParsingUpdateSchemaFile(file_id="") PartialParsingDeletedSource(unique_id="") PartialParsingDeletedExposure(unique_id="") - InvalidDisabledSourceInTestNode(msg="") - InvalidRefInTestNode(msg="") + InvalidDisabledTargetInTestNode( + resource_type_title="", + unique_id="", + original_file_path="", + target_kind="", + target_name="", + target_package="", + ) + UnusedResourceConfigPath(unused_config_paths=[]) + SeedIncreased(package_name="", name="") + SeedExceedsLimitSamePath(package_name="", name="") + SeedExceedsLimitAndPathChanged(package_name="", name="") + SeedExceedsLimitChecksumChanged(package_name="", name="", checksum_name="") + UnusedTables(unused_tables=[]) + WrongResourceSchemaFile(patch_name="", resource_type="", file_path="", plural_resource_type="") + NoNodeForYamlKey(patch_name="", yaml_key="", file_path="") + MacroPatchNotFound(patch_name="") + NodeNotFoundOrDisabled( + original_file_path="", + unique_id="", + resource_type_title="", + target_name="", + target_kind="", + target_package="", + disabled="", + ) # M - Deps generation ====================== @@ -2810,8 +3162,12 @@ def message(self) -> str: index=0, total=0, ) + NothingToDo() RunningOperationUncaughtError(exc="") EndRunResult() + NoNodesSelected() + DepsUnpinned(revision="", git="") + NoNodesForSelectionCriteria(spec_raw="") # W - Node testing ====================== @@ -2863,6 +3219,4 @@ def message(self) -> str: FlushEvents() FlushEventsFailure() TrackingInitializeFailure() - GeneralWarningMsg(msg="", log_fmt="") - GeneralWarningException(exc="", log_fmt="") EventBufferFull() diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index db824e19bf1..f0eeb4f6d4f 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -2,11 +2,9 @@ import functools from typing import NoReturn, Optional, Mapping, Any -from dbt.events.functions import fire_event, scrub_secrets, env_secrets -from dbt.events.types import GeneralWarningMsg, GeneralWarningException +from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.types import GeneralMacroWarning from dbt.node_types import NodeType -from dbt import flags -from dbt.ui import line_wrap_message, warning_tag import dbt.dataclass_schema @@ -570,39 +568,13 @@ def doc_target_not_found( raise_compiler_error(msg, model) -def _get_target_failure_msg( +def get_not_found_or_disabled_msg( original_file_path, unique_id, resource_type_title, target_name: str, - target_model_package: Optional[str], - include_path: bool, - reason: str, target_kind: str, -) -> str: - target_package_string = "" - if target_model_package is not None: - target_package_string = "in package '{}' ".format(target_model_package) - - source_path_string = "" - if include_path: - source_path_string = " ({})".format(original_file_path) - - return "{} '{}'{} depends on a {} named '{}' {}which {}".format( - resource_type_title, - unique_id, - source_path_string, - target_kind, - target_name, - target_package_string, - reason, - ) - - -def get_target_not_found_or_disabled_msg( - node, - target_name: str, - target_package: Optional[str], + target_package: Optional[str] = None, disabled: Optional[bool] = None, ) -> str: if disabled is None: @@ -611,52 +583,19 @@ def get_target_not_found_or_disabled_msg( reason = "is disabled" else: reason = "was not found" - return _get_target_failure_msg( - node.original_file_path, - node.unique_id, - node.resource_type.title(), - target_name, - target_package, - include_path=True, - reason=reason, - target_kind="node", - ) - - -def ref_target_not_found( - model, - target_model_name: str, - target_model_package: Optional[str], - disabled: Optional[bool] = None, -) -> NoReturn: - msg = get_target_not_found_or_disabled_msg( - model, target_model_name, target_model_package, disabled - ) - raise_compiler_error(msg, model) + target_package_string = "" + if target_package is not None: + target_package_string = "in package '{}' ".format(target_package) -def get_not_found_or_disabled_msg( - node, - target_name: str, - target_kind: str, - target_package: Optional[str] = None, - disabled: Optional[bool] = None, -) -> str: - if disabled is None: - reason = "was not found or is disabled" - elif disabled is True: - reason = "is disabled" - else: - reason = "was not found" - return _get_target_failure_msg( - node.original_file_path, - node.unique_id, - node.resource_type.title(), + return "{} '{}' ({}) depends on a {} named '{}' {}which {}".format( + resource_type_title, + unique_id, + original_file_path, + target_kind, target_name, - target_package, - include_path=True, - reason=reason, - target_kind=target_kind, + target_package_string, + reason, ) @@ -668,7 +607,9 @@ def target_not_found( disabled: Optional[bool] = None, ) -> NoReturn: msg = get_not_found_or_disabled_msg( - node=node, + original_file_path=node.original_file_path, + unique_id=node.unique_id, + resource_type_title=node.resource_type.title(), target_name=target_name, target_kind=target_kind, target_package=target_package, @@ -1041,19 +982,6 @@ def raise_unrecognized_credentials_type(typename, supported_types): ) -def warn_invalid_patch(patch, resource_type): - msg = line_wrap_message( - f"""\ - '{patch.name}' is a {resource_type} node, but it is - specified in the {patch.yaml_key} section of - {patch.original_file_path}. - To fix this error, place the `{patch.name}` - specification under the {resource_type.pluralize()} key instead. - """ - ) - warn_or_error(msg, log_fmt=warning_tag("{}")) - - def raise_not_implemented(msg): raise NotImplementedException("ERROR: {}".format(msg)) @@ -1067,24 +995,8 @@ def raise_duplicate_alias( raise AliasException(f'Got duplicate keys: ({key_names}) all map to "{canonical_key}"') -def warn_or_error(msg, node=None, log_fmt=None): - if flags.WARN_ERROR: - raise_compiler_error(scrub_secrets(msg, env_secrets()), node) - else: - fire_event(GeneralWarningMsg(msg=msg, log_fmt=log_fmt)) - - -def warn_or_raise(exc, log_fmt=None): - if flags.WARN_ERROR: - raise exc - else: - fire_event(GeneralWarningException(exc=str(exc), log_fmt=log_fmt)) - - def warn(msg, node=None): - # there's no reason to expose log_fmt to macros - it's only useful for - # handling colors - warn_or_error(msg, node=node) + dbt.events.functions.warn_or_error(GeneralMacroWarning(msg=msg), node=node) return "" diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 49b73fc71c4..89de27b3697 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -5,13 +5,12 @@ from .selector_methods import MethodManager from .selector_spec import SelectionCriteria, SelectionSpec, IndirectSelection -from dbt.events.functions import fire_event -from dbt.events.types import SelectorReportInvalidSelector +from dbt.events.functions import fire_event, warn_or_error +from dbt.events.types import SelectorReportInvalidSelector, NoNodesForSelectionCriteria from dbt.node_types import NodeType from dbt.exceptions import ( InternalException, InvalidSelectorException, - warn_or_error, ) from dbt.contracts.graph.compiled import GraphMemberNode from dbt.contracts.graph.manifest import Manifest @@ -24,11 +23,6 @@ def get_package_names(nodes): return set([node.split(".")[1] for node in nodes]) -def alert_non_existence(raw_spec, nodes): - if len(nodes) == 0: - warn_or_error(f"The selection criterion '{str(raw_spec)}' does not match any nodes") - - def can_select_indirectly(node): """If a node is not selected itself, but its parent(s) are, it may qualify for indirect selection. @@ -143,7 +137,7 @@ def select_nodes_recursively(self, spec: SelectionSpec) -> Tuple[Set[UniqueId], direct_nodes = self.incorporate_indirect_nodes(initial_direct, indirect_nodes) if spec.expect_exists: - alert_non_existence(spec.raw, direct_nodes) + warn_or_error(NoNodesForSelectionCriteria(spec_raw=str(spec.raw))) return direct_nodes, indirect_nodes diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 29f93b5bae2..80fac715178 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -18,7 +18,7 @@ get_adapter_package_names, ) from dbt.helper_types import PathSet -from dbt.events.functions import fire_event, get_invocation_id +from dbt.events.functions import fire_event, get_invocation_id, warn_or_error from dbt.events.types import ( PartialParsingFullReparseBecauseOfError, PartialParsingExceptionFile, @@ -35,10 +35,10 @@ PartialParsingNotEnabled, ParsedFileLoadFailed, PartialParseSaveFileNotFound, - InvalidDisabledSourceInTestNode, - InvalidRefInTestNode, + InvalidDisabledTargetInTestNode, PartialParsingProjectEnvVarsChanged, PartialParsingProfileEnvVarsChanged, + NodeNotFoundOrDisabled, ) from dbt.logger import DbtProcessState from dbt.node_types import NodeType @@ -71,11 +71,7 @@ ) from dbt.contracts.util import Writable from dbt.exceptions import ( - ref_target_not_found, - get_target_not_found_or_disabled_msg, target_not_found, - get_not_found_or_disabled_msg, - warn_or_error, ) from dbt.parser.base import Parser from dbt.parser.analysis import AnalysisParser @@ -90,7 +86,6 @@ from dbt.parser.seeds import SeedParser from dbt.parser.snapshots import SnapshotParser from dbt.parser.sources import SourcePatcher -from dbt.ui import warning_tag from dbt.version import __version__ from dbt.dataclass_schema import StrEnum, dbtClassMixin @@ -955,65 +950,43 @@ def process_nodes(self): self.manifest.rebuild_ref_lookup() -def invalid_ref_fail_unless_test(node, target_model_name, target_model_package, disabled): - - if node.resource_type == NodeType.Test: - msg = get_target_not_found_or_disabled_msg( - node=node, - target_name=target_model_name, - target_package=target_model_package, - disabled=disabled, - ) - if disabled: - fire_event(InvalidRefInTestNode(msg=msg)) - else: - warn_or_error(msg, log_fmt=warning_tag("{}")) - else: - ref_target_not_found( - node, - target_model_name, - target_model_package, - disabled=disabled, - ) - - -def invalid_source_fail_unless_test(node, target_name, target_table_name, disabled): +def invalid_target_fail_unless_test( + node, + target_name: str, + target_kind: str, + target_package: Optional[str] = None, + disabled: Optional[bool] = None, +): if node.resource_type == NodeType.Test: - msg = get_not_found_or_disabled_msg( - node=node, - target_name=f"{target_name}.{target_table_name}", - target_kind="source", - disabled=disabled, - ) if disabled: - fire_event(InvalidDisabledSourceInTestNode(msg=msg)) + fire_event( + InvalidDisabledTargetInTestNode( + resource_type_title=node.resource_type.title(), + unique_id=node.unique_id, + original_file_path=node.original_file_path, + target_kind=target_kind, + target_name=target_name, + target_package=target_package if target_package else "", + ) + ) else: - warn_or_error(msg, log_fmt=warning_tag("{}")) - else: - target_not_found( - node=node, - target_name=f"{target_name}.{target_table_name}", - target_kind="source", - disabled=disabled, - ) - - -def invalid_metric_fail_unless_test(node, target_metric_name, target_metric_package, disabled): - - if node.resource_type == NodeType.Test: - msg = get_target_not_found_or_disabled_msg( - node=node, - target_name=target_metric_name, - target_package=target_metric_package, - disabled=disabled, - ) - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error( + NodeNotFoundOrDisabled( + original_file_path=node.original_file_path, + unique_id=node.unique_id, + resource_type_title=node.resource_type.title(), + target_name=target_name, + target_kind=target_kind, + target_package=target_package if target_package else "", + disabled=str(disabled), + ) + ) else: target_not_found( node=node, - target_name=target_metric_name, - target_kind="metric", - target_package=target_metric_package, + target_name=target_name, + target_kind=target_kind, + target_package=target_package, disabled=disabled, ) @@ -1121,11 +1094,6 @@ def _process_docs_for_metrics(context: Dict[str, Any], metric: ParsedMetric) -> metric.description = get_rendered(metric.description, context) -# TODO: this isn't actually referenced anywhere? -def _process_derived_metrics(context: Dict[str, Any], metric: ParsedMetric) -> None: - metric.description = get_rendered(metric.description, context) - - def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: ParsedExposure): """Given a manifest and exposure in that manifest, process its refs""" for ref in exposure.refs: @@ -1153,10 +1121,11 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur # This may raise. Even if it doesn't, we don't want to add # this exposure to the graph b/c there is no destination exposure exposure.config.enabled = False - invalid_ref_fail_unless_test( - exposure, - target_model_name, - target_model_package, + invalid_target_fail_unless_test( + node=exposure, + target_name=target_model_name, + target_kind="node", + target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), ) @@ -1195,13 +1164,13 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: P # This may raise. Even if it doesn't, we don't want to add # this metric to the graph b/c there is no destination metric metric.config.enabled = False - invalid_ref_fail_unless_test( - metric, - target_model_name, - target_model_package, + invalid_target_fail_unless_test( + node=metric, + target_name=target_model_name, + target_kind="node", + target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), ) - continue target_model_id = target_model.unique_id @@ -1239,13 +1208,13 @@ def _process_metrics_for_node( # This may raise. Even if it doesn't, we don't want to add # this node to the graph b/c there is no destination node node.config.enabled = False - invalid_metric_fail_unless_test( - node, - target_metric_name, - target_metric_package, + invalid_target_fail_unless_test( + node=node, + target_name=target_metric_name, + target_kind="source", + target_package=target_metric_package, disabled=(isinstance(target_metric, Disabled)), ) - continue target_metric_id = target_metric.unique_id @@ -1280,13 +1249,13 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif # This may raise. Even if it doesn't, we don't want to add # this node to the graph b/c there is no destination node node.config.enabled = False - invalid_ref_fail_unless_test( - node, - target_model_name, - target_model_package, + invalid_target_fail_unless_test( + node=node, + target_name=target_model_name, + target_kind="node", + target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), ) - continue target_model_id = target_model.unique_id @@ -1312,8 +1281,11 @@ def _process_sources_for_exposure( ) if target_source is None or isinstance(target_source, Disabled): exposure.config.enabled = False - invalid_source_fail_unless_test( - exposure, source_name, table_name, disabled=(isinstance(target_source, Disabled)) + invalid_target_fail_unless_test( + node=exposure, + target_name=f"{source_name}.{table_name}", + target_kind="source", + disabled=(isinstance(target_source, Disabled)), ) continue target_source_id = target_source.unique_id @@ -1332,8 +1304,11 @@ def _process_sources_for_metric(manifest: Manifest, current_project: str, metric ) if target_source is None or isinstance(target_source, Disabled): metric.config.enabled = False - invalid_source_fail_unless_test( - metric, source_name, table_name, disabled=(isinstance(target_source, Disabled)) + invalid_target_fail_unless_test( + node=metric, + target_name=f"{source_name}.{table_name}", + target_kind="source", + disabled=(isinstance(target_source, Disabled)), ) continue target_source_id = target_source.unique_id @@ -1354,8 +1329,11 @@ def _process_sources_for_node(manifest: Manifest, current_project: str, node: Ma if target_source is None or isinstance(target_source, Disabled): # this folows the same pattern as refs node.config.enabled = False - invalid_source_fail_unless_test( - node, source_name, table_name, disabled=(isinstance(target_source, Disabled)) + invalid_target_fail_unless_test( + node=node, + target_name=f"{source_name}.{table_name}", + target_kind="source", + disabled=(isinstance(target_source, Disabled)), ) continue target_source_id = target_source.unique_id diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 8b22427cb39..d47c2a29684 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -50,7 +50,6 @@ UnparsedSourceDefinition, ) from dbt.exceptions import ( - warn_invalid_patch, validator_error_message, JSONValidationException, raise_invalid_property_yml_version, @@ -60,9 +59,10 @@ raise_duplicate_macro_patch_name, InternalException, raise_duplicate_source_patch_name, - warn_or_error, CompilationException, ) +from dbt.events.functions import warn_or_error +from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroPatchNotFound from dbt.node_types import NodeType from dbt.parser.base import SimpleParser from dbt.parser.search import FileBlock @@ -74,7 +74,6 @@ TestBlock, Testable, ) -from dbt.ui import warning_tag from dbt.utils import get_pseudo_test_path, coerce_dict_str @@ -873,7 +872,15 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: if unique_id: resource_type = NodeType(unique_id.split(".")[0]) if resource_type.pluralize() != patch.yaml_key: - warn_invalid_patch(patch, resource_type) + warn_or_error( + WrongResourceSchemaFile( + patch_name=patch.name, + resource_type=resource_type, + plural_resource_type=resource_type.pluralize(), + yaml_key=patch.yaml_key, + file_path=patch.original_file_path, + ) + ) return elif patch.yaml_key == "analyses": @@ -912,12 +919,13 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: node.patch(patch) else: - msg = ( - f"Did not find matching node for patch with name '{patch.name}' " - f"in the '{patch.yaml_key}' section of " - f"file '{source_file.path.original_file_path}'" + warn_or_error( + NoNodeForYamlKey( + patch_name=patch.name, + yaml_key=patch.yaml_key, + file_path=source_file.path.original_file_path, + ) ) - warn_or_error(msg, log_fmt=warning_tag("{}")) return # patches can't be overwritten @@ -977,8 +985,7 @@ def parse_patch(self, block: TargetBlock[UnparsedMacroUpdate], refs: ParserRef) unique_id = f"macro.{patch.package_name}.{patch.name}" macro = self.manifest.macros.get(unique_id) if not macro: - msg = f'Found patch for macro "{patch.name}" ' f"which was not found" - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error(MacroPatchNotFound(patch_name=patch.name)) return if macro.patch_path: package_name, existing_file_path = macro.patch_path.split("://") diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 1c55281db56..30440076440 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -1,6 +1,6 @@ import itertools from pathlib import Path -from typing import Iterable, Dict, Optional, Set, Any +from typing import Iterable, Dict, Optional, Set, Any, List from dbt.adapters.factory import get_adapter from dbt.config import RuntimeConfig from dbt.context.context_config import ( @@ -24,11 +24,12 @@ UnparsedColumn, Time, ) -from dbt.exceptions import warn_or_error, InternalException +from dbt.events.functions import warn_or_error +from dbt.events.types import UnusedTables +from dbt.exceptions import InternalException from dbt.node_types import NodeType from dbt.parser.schemas import SchemaParser, ParserRef -from dbt import ui # An UnparsedSourceDefinition is taken directly from the yaml @@ -307,28 +308,27 @@ def warn_unused(self) -> None: unused_tables[key] = unused if unused_tables: - msg = self.get_unused_msg(unused_tables) - warn_or_error(msg, log_fmt=ui.warning_tag("{}")) + unused_tables_formatted = self.get_unused_msg(unused_tables) + warn_or_error(UnusedTables(unused_tables=unused_tables_formatted)) self.manifest.source_patches = {} def get_unused_msg( self, unused_tables: Dict[SourceKey, Optional[Set[str]]], - ) -> str: - msg = [ - "During parsing, dbt encountered source overrides that had no target:", - ] + ) -> List: + unused_tables_formatted = [] for key, table_names in unused_tables.items(): patch = self.manifest.source_patches[key] patch_name = f"{patch.overrides}.{patch.name}" if table_names is None: - msg.append(f" - Source {patch_name} (in {patch.path})") + unused_tables_formatted.append(f" - Source {patch_name} (in {patch.path})") else: for table_name in sorted(table_names): - msg.append(f" - Source table {patch_name}.{table_name} " f"(in {patch.path})") - msg.append("") - return "\n".join(msg) + unused_tables_formatted.append( + f" - Source table {patch_name}.{table_name} " f"(in {patch.path})" + ) + return unused_tables_formatted def merge_freshness_time_thresholds( diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index df0a181ba5c..43cd8e3f8fe 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -5,7 +5,9 @@ from dbt.task.runnable import GraphRunnableTask, ManifestTask from dbt.task.test import TestSelector from dbt.node_types import NodeType -from dbt.exceptions import RuntimeException, InternalException, warn_or_error +from dbt.events.functions import warn_or_error +from dbt.events.types import NoNodesSelected +from dbt.exceptions import RuntimeException, InternalException from dbt.logger import log_manager import logging import dbt.events.functions as event_logger @@ -69,7 +71,7 @@ def _iterate_selected_nodes(self): spec = self.get_selection_spec() nodes = sorted(selector.get_selected(spec)) if not nodes: - warn_or_error("No nodes selected!") + warn_or_error(NoNodesSelected()) return if self.manifest is None: raise InternalException("manifest is None in _iterate_selected_nodes") diff --git a/core/dbt/task/printer.py b/core/dbt/task/printer.py index 3861b41bef2..edb2592d194 100644 --- a/core/dbt/task/printer.py +++ b/core/dbt/task/printer.py @@ -120,6 +120,8 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals elif result.message is not None: first = True for line in result.message.split("\n"): + # TODO: why do we format like this? Is there a reason this needs to + # be split instead of sending it as a single log line? if first: fire_event(FirstRunResultError(msg=line)) first = False diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index af0de610c98..f12ce94f830 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -26,7 +26,7 @@ ModelMetadata, NodeCount, ) -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( EmptyLine, PrintCancelLine, @@ -36,6 +36,7 @@ QueryCancelationUnsupported, ConcurrencyLine, EndRunResult, + NothingToDo, ) from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.manifest import Manifest @@ -47,7 +48,6 @@ NotImplementedException, RuntimeException, FailFastException, - warn_or_error, ) from dbt.graph import GraphQueue, NodeSelector, SelectionSpec, parse_difference, Graph @@ -57,7 +57,6 @@ import dbt.exceptions from dbt import flags import dbt.utils -from dbt.ui import warning_tag RESULT_FILE_NAME = "run_results.json" MANIFEST_FILE_NAME = "manifest.json" @@ -459,8 +458,7 @@ def run(self): if len(self._flattened_nodes) == 0: with TextOnly(): fire_event(EmptyLine()) - msg = "Nothing to do. Try checking your model configs and model specification args" - warn_or_error(msg, log_fmt=warning_tag("{}")) + warn_or_error(NothingToDo()) result = self.get_result( results=[], generated_at=datetime.utcnow(), diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 8ca8238a7d0..2f4c7b45ca1 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -1084,35 +1084,6 @@ def test_archive_not_allowed(self): with self.assertRaises(dbt.exceptions.DbtProjectError): self.get_project() - def test__no_unused_resource_config_paths(self): - self.default_project_data.update({ - 'models': model_config, - 'seeds': {}, - }) - project = self.from_parts() - - resource_fqns = {'models': model_fqns} - unused = project.get_unused_resource_config_paths(resource_fqns, []) - self.assertEqual(len(unused), 0) - - def test__unused_resource_config_paths(self): - self.default_project_data.update({ - 'models': model_config['my_package_name'], - 'seeds': {}, - }) - project = self.from_parts() - - resource_fqns = {'models': model_fqns} - unused = project.get_unused_resource_config_paths(resource_fqns, []) - self.assertEqual(len(unused), 3) - - def test__get_unused_resource_config_paths_empty(self): - project = self.from_parts() - unused = project.get_unused_resource_config_paths({'models': frozenset(( - ('my_test_project', 'foo', 'bar'), - ('my_test_project', 'foo', 'baz'), - ))}, []) - self.assertEqual(len(unused), 0) def test__warn_for_unused_resource_config_paths_empty(self): project = self.from_parts() @@ -1172,26 +1143,17 @@ def from_parts(self, exc=None): else: return err - def test__get_unused_resource_config_paths(self): - project = self.from_parts() - unused = project.get_unused_resource_config_paths(self.used, []) - self.assertEqual(len(unused), 1) - self.assertEqual(unused[0], ('models', 'my_test_project', 'baz')) - @mock.patch.object(dbt.config.runtime, 'warn_or_error') - def test__warn_for_unused_resource_config_paths(self, warn_or_error): + def test__warn_for_unused_resource_config_paths(self): project = self.from_parts() - project.warn_for_unused_resource_config_paths(self.used, []) - warn_or_error.assert_called_once() - - def test__warn_for_unused_resource_config_paths_disabled(self): - project = self.from_parts() - unused = project.get_unused_resource_config_paths( - self.used, - frozenset([('my_test_project', 'baz')]) - ) - - self.assertEqual(len(unused), 0) + with mock.patch('dbt.config.runtime.warn_or_error') as warn_or_error_patch: + project.warn_for_unused_resource_config_paths(self.used, []) + warn_or_error_patch.assert_called_once() + event = warn_or_error_patch.call_args[0][0] + assert event.info.name == 'UnusedResourceConfigPath' + msg = event.info.msg + expected_msg = "- models.my_test_project.baz" + assert expected_msg in msg class TestRuntimeConfigFiles(BaseFileTest): diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index e32267e2d6f..55559b13e17 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -981,7 +981,9 @@ def test_select_state_changed_seed_checksum_path_to_path(manifest, previous_stat with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'modified') warn_or_error_patch.assert_called_once() - msg = warn_or_error_patch.call_args[0][0] + event = warn_or_error_patch.call_args[0][0] + assert event.info.name == 'SeedExceedsLimitSamePath' + msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') @@ -996,7 +998,9 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state assert search_manifest_using_method( manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_called_once() - msg = warn_or_error_patch.call_args[0][0] + event = warn_or_error_patch.call_args[0][0] + assert event.info.name == 'SeedIncreased' + msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index c2064b84c1a..6ba1b1ba69c 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -289,8 +289,14 @@ def MockNode(): PartialParsingUpdateSchemaFile(file_id=""), PartialParsingDeletedSource(unique_id=""), PartialParsingDeletedExposure(unique_id=""), - InvalidDisabledSourceInTestNode(msg=""), - InvalidRefInTestNode(msg=""), + InvalidDisabledTargetInTestNode( + resource_type_title="", + unique_id="", + original_file_path="", + target_kind="", + target_name="", + target_package="", + ), RunningOperationCaughtError(exc=""), RunningOperationUncaughtError(exc=""), DbtProjectError(), @@ -420,8 +426,6 @@ def MockNode(): FlushEventsFailure(), TrackingInitializeFailure(), RetryExternalCall(attempt=0, max=0), - GeneralWarningMsg(msg="", log_fmt=""), - GeneralWarningException(exc="", log_fmt=""), PartialParsingProfileEnvVarsChanged(), AdapterEventDebug(name="", base_msg="", args=()), AdapterEventInfo(name="", base_msg="", args=()), From a42748433d7ad963296bb1ce0251b0b1d0875e08 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Tue, 1 Nov 2022 16:26:12 -0400 Subject: [PATCH 015/156] converting 023_exit_codes_tests (#6105) * converting 023_exit_codes_tests * use packages fixture, clean up test names --- .../023_exit_codes_tests/models/bad.sql | 2 - .../023_exit_codes_tests/models/dupe.sql | 8 - .../023_exit_codes_tests/models/good.sql | 8 - .../023_exit_codes_tests/models/schema.yml | 17 -- .../023_exit_codes_tests/seeds-bad/data.csv | 2 - .../023_exit_codes_tests/seeds-good/data.csv | 2 - .../023_exit_codes_tests/snapshots-bad/b.sql | 4 - .../023_exit_codes_tests/snapshots-good/g.sql | 4 - .../023_exit_codes_tests/test_exit_codes.py | 200 ------------------ tests/CONVERTING.md | 2 +- tests/functional/exit_codes/fixtures.py | 78 +++++++ .../functional/exit_codes/test_exit_codes.py | 123 +++++++++++ 12 files changed, 202 insertions(+), 248 deletions(-) delete mode 100644 test/integration/023_exit_codes_tests/models/bad.sql delete mode 100644 test/integration/023_exit_codes_tests/models/dupe.sql delete mode 100644 test/integration/023_exit_codes_tests/models/good.sql delete mode 100644 test/integration/023_exit_codes_tests/models/schema.yml delete mode 100644 test/integration/023_exit_codes_tests/seeds-bad/data.csv delete mode 100644 test/integration/023_exit_codes_tests/seeds-good/data.csv delete mode 100644 test/integration/023_exit_codes_tests/snapshots-bad/b.sql delete mode 100644 test/integration/023_exit_codes_tests/snapshots-good/g.sql delete mode 100644 test/integration/023_exit_codes_tests/test_exit_codes.py create mode 100644 tests/functional/exit_codes/fixtures.py create mode 100644 tests/functional/exit_codes/test_exit_codes.py diff --git a/test/integration/023_exit_codes_tests/models/bad.sql b/test/integration/023_exit_codes_tests/models/bad.sql deleted file mode 100644 index dad7fe5fc10..00000000000 --- a/test/integration/023_exit_codes_tests/models/bad.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select bad sql here diff --git a/test/integration/023_exit_codes_tests/models/dupe.sql b/test/integration/023_exit_codes_tests/models/dupe.sql deleted file mode 100644 index f7bb37c8b71..00000000000 --- a/test/integration/023_exit_codes_tests/models/dupe.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select 1 as id, current_date as updated_at -union all -select 2 as id, current_date as updated_at -union all -select 3 as id, current_date as updated_at -union all -select 4 as id, current_date as updated_at diff --git a/test/integration/023_exit_codes_tests/models/good.sql b/test/integration/023_exit_codes_tests/models/good.sql deleted file mode 100644 index f7bb37c8b71..00000000000 --- a/test/integration/023_exit_codes_tests/models/good.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select 1 as id, current_date as updated_at -union all -select 2 as id, current_date as updated_at -union all -select 3 as id, current_date as updated_at -union all -select 4 as id, current_date as updated_at diff --git a/test/integration/023_exit_codes_tests/models/schema.yml b/test/integration/023_exit_codes_tests/models/schema.yml deleted file mode 100644 index f7243286b7b..00000000000 --- a/test/integration/023_exit_codes_tests/models/schema.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 -models: -- name: good - columns: - - name: updated_at - tests: - - not_null -- name: bad - columns: - - name: updated_at - tests: - - not_null -- name: dupe - columns: - - name: updated_at - tests: - - unique diff --git a/test/integration/023_exit_codes_tests/seeds-bad/data.csv b/test/integration/023_exit_codes_tests/seeds-bad/data.csv deleted file mode 100644 index fcc8e001bbd..00000000000 --- a/test/integration/023_exit_codes_tests/seeds-bad/data.csv +++ /dev/null @@ -1,2 +0,0 @@ -a,b,c -1,\2,3,a,a,a diff --git a/test/integration/023_exit_codes_tests/seeds-good/data.csv b/test/integration/023_exit_codes_tests/seeds-good/data.csv deleted file mode 100644 index bfde6bfa0b8..00000000000 --- a/test/integration/023_exit_codes_tests/seeds-good/data.csv +++ /dev/null @@ -1,2 +0,0 @@ -a,b,c -1,2,3 diff --git a/test/integration/023_exit_codes_tests/snapshots-bad/b.sql b/test/integration/023_exit_codes_tests/snapshots-bad/b.sql deleted file mode 100644 index 52425b7c9bc..00000000000 --- a/test/integration/023_exit_codes_tests/snapshots-bad/b.sql +++ /dev/null @@ -1,4 +0,0 @@ -{% snapshot good_snapshot %} - {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at_not_real')}} - select * from {{ schema }}.good -{% endsnapshot %} diff --git a/test/integration/023_exit_codes_tests/snapshots-good/g.sql b/test/integration/023_exit_codes_tests/snapshots-good/g.sql deleted file mode 100644 index 0c1205d9441..00000000000 --- a/test/integration/023_exit_codes_tests/snapshots-good/g.sql +++ /dev/null @@ -1,4 +0,0 @@ -{% snapshot good_snapshot %} - {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}} - select * from {{ schema }}.good -{% endsnapshot %} diff --git a/test/integration/023_exit_codes_tests/test_exit_codes.py b/test/integration/023_exit_codes_tests/test_exit_codes.py deleted file mode 100644 index 7da8d85e321..00000000000 --- a/test/integration/023_exit_codes_tests/test_exit_codes.py +++ /dev/null @@ -1,200 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - -import dbt.exceptions - - -class TestExitCodes(DBTIntegrationTest): - - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "snapshot-paths": ['snapshots-good'], - } - - @use_profile('postgres') - def test_postgres_exit_code_run_succeed(self): - results, success = self.run_dbt_and_check(['run', '--model', 'good']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - self.assertTableDoesExist('good') - - @use_profile('postgres') - def test__postgres_exit_code_run_fail(self): - results, success = self.run_dbt_and_check(['run', '--model', 'bad']) - self.assertEqual(len(results.results), 1) - self.assertFalse(success) - self.assertTableDoesNotExist('bad') - - @use_profile('postgres') - def test__postgres_schema_test_pass(self): - results, success = self.run_dbt_and_check(['run', '--model', 'good']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - results, success = self.run_dbt_and_check(['test', '--model', 'good']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - - @use_profile('postgres') - def test__postgres_schema_test_fail(self): - results, success = self.run_dbt_and_check(['run', '--model', 'dupe']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - results, success = self.run_dbt_and_check(['test', '--model', 'dupe']) - self.assertEqual(len(results.results), 1) - self.assertFalse(success) - - @use_profile('postgres') - def test__postgres_compile(self): - results, success = self.run_dbt_and_check(['compile']) - self.assertEqual(len(results.results), 7) - self.assertTrue(success) - - @use_profile('postgres') - def test__postgres_snapshot_pass(self): - self.run_dbt_and_check(['run', '--model', 'good']) - results, success = self.run_dbt_and_check(['snapshot']) - self.assertEqual(len(results.results), 1) - self.assertTableDoesExist('good_snapshot') - self.assertTrue(success) - - -class TestExitCodesSnapshotFail(DBTIntegrationTest): - - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "snapshot-paths": ['snapshots-bad'], - } - - @use_profile('postgres') - def test__postgres_snapshot_fail(self): - results, success = self.run_dbt_and_check(['run', '--model', 'good']) - self.assertTrue(success) - self.assertEqual(len(results.results), 1) - - results, success = self.run_dbt_and_check(['snapshot']) - self.assertEqual(len(results.results), 1) - self.assertTableDoesNotExist('good_snapshot') - self.assertFalse(success) - -class TestExitCodesDeps(DBTIntegrationTest): - - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def packages_config(self): - return { - "packages": [ - { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'dbt/1.0.0', - } - ] - } - - @use_profile('postgres') - def test_postgres_deps(self): - _, success = self.run_dbt_and_check(['deps']) - self.assertTrue(success) - - -class TestExitCodesDepsFail(DBTIntegrationTest): - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def packages_config(self): - return { - "packages": [ - { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'bad-branch', - }, - ] - } - - @use_profile('postgres') - def test_postgres_deps(self): - with self.assertRaises(dbt.exceptions.InternalException): - # this should fail - self.run_dbt_and_check(['deps']) - - -class TestExitCodesSeed(DBTIntegrationTest): - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds-good'], - 'seeds': { - 'quote_columns': False, - }, - } - - @use_profile('postgres') - def test_postgres_seed(self): - results, success = self.run_dbt_and_check(['seed']) - self.assertEqual(len(results.results), 1) - self.assertTrue(success) - - -class TestExitCodesSeedFail(DBTIntegrationTest): - @property - def schema(self): - return "exit_codes_test_023" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds-bad'], - 'seeds': { - 'quote_columns': False, - }, - } - - @use_profile('postgres') - def test_postgres_seed(self): - _, success = self.run_dbt_and_check(['seed']) - self.assertFalse(success) diff --git a/tests/CONVERTING.md b/tests/CONVERTING.md index 89801fc74b9..44057cad05b 100644 --- a/tests/CONVERTING.md +++ b/tests/CONVERTING.md @@ -30,7 +30,7 @@ * some of the legacy tests used a 'default_project' method to change (for example) the seeds directory to load a different seed. Don't do that. Copying a file is probably a better option. - +* If there are more than 50 lines of fixture strings, they should be defined in a fixtures.py and then imported. We definitely don't do this everywhere right now but should move to this model. # Integration test directories that have been converted * 001\_simple\_copy\_tests => moved to 'basic' diff --git a/tests/functional/exit_codes/fixtures.py b/tests/functional/exit_codes/fixtures.py new file mode 100644 index 00000000000..23a0bef3897 --- /dev/null +++ b/tests/functional/exit_codes/fixtures.py @@ -0,0 +1,78 @@ +import pytest + +bad_sql = """ +select bad sql here +""" + +dupe_sql = """ +select 1 as id, current_date as updated_at +union all +select 2 as id, current_date as updated_at +union all +select 3 as id, current_date as updated_at +union all +select 4 as id, current_date as updated_at +""" + +good_sql = """ +select 1 as id, current_date as updated_at +union all +select 2 as id, current_date as updated_at +union all +select 3 as id, current_date as updated_at +union all +select 4 as id, current_date as updated_at +""" + +snapshots_good_sql = """ +{% snapshot good_snapshot %} + {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}} + select * from {{ schema }}.good +{% endsnapshot %} +""" + +snapshots_bad_sql = """ +{% snapshot good_snapshot %} + {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at_not_real')}} + select * from {{ schema }}.good +{% endsnapshot %} +""" + +schema_yml = """ +version: 2 +models: +- name: good + columns: + - name: updated_at + tests: + - not_null +- name: bad + columns: + - name: updated_at + tests: + - not_null +- name: dupe + columns: + - name: updated_at + tests: + - unique +""" + +data_seed_good_csv = """a,b,c +1,2,3 +""" + +data_seed_bad_csv = """a,b,c +1,\2,3,a,a,a +""" + + +class BaseConfigProject: + @pytest.fixture(scope="class") + def models(self): + return { + "bad.sql": bad_sql, + "dupe.sql": dupe_sql, + "good.sql": good_sql, + "schema.yml": schema_yml + } diff --git a/tests/functional/exit_codes/test_exit_codes.py b/tests/functional/exit_codes/test_exit_codes.py new file mode 100644 index 00000000000..dbef6361713 --- /dev/null +++ b/tests/functional/exit_codes/test_exit_codes.py @@ -0,0 +1,123 @@ +import pytest + +import dbt.exceptions +from dbt.tests.util import ( + check_table_does_exist, + check_table_does_not_exist, + run_dbt +) +from tests.functional.exit_codes.fixtures import ( + BaseConfigProject, + snapshots_bad_sql, + snapshots_good_sql, + data_seed_bad_csv, + data_seed_good_csv +) + + +class TestExitCodes(BaseConfigProject): + @pytest.fixture(scope="class") + def snapshots(self): + return {"g.sql": snapshots_good_sql} + + def test_exit_code_run_succeed(self, project): + results = run_dbt(['run', '--model', 'good']) + assert len(results) == 1 + check_table_does_exist(project.adapter, 'good') + + def test_exit_code_run_fail(self, project): + results = run_dbt(['run', '--model', 'bad'], expect_pass=False) + assert len(results) == 1 + check_table_does_not_exist(project.adapter, 'bad') + + def test_schema_test_pass(self, project): + results = run_dbt(['run', '--model', 'good']) + assert len(results) == 1 + + results = run_dbt(['test', '--model', 'good']) + assert len(results) == 1 + + def test_schema_test_fail(self, project): + results = run_dbt(['run', '--model', 'dupe']) + assert len(results) == 1 + + results = run_dbt(['test', '--model', 'dupe'], expect_pass=False) + assert len(results) == 1 + + def test_compile(self, project): + results = run_dbt(['compile']) + assert len(results) == 7 + + def test_snapshot_pass(self, project): + results = run_dbt(['snapshot']) + assert len(results) == 1 + check_table_does_exist(project.adapter, 'good_snapshot') + + +class TestExitCodesSnapshotFail(BaseConfigProject): + @pytest.fixture(scope="class") + def snapshots(self): + return {"b.sql": snapshots_bad_sql} + + def test_snapshot_fail(self, project): + results = run_dbt(['run', '--model', 'good']) + assert len(results) == 1 + + results = run_dbt(['snapshot'], expect_pass=False) + assert len(results) == 1 + check_table_does_not_exist(project.adapter, 'good_snapshot') + + +class TestExitCodesDeps: + @pytest.fixture(scope="class") + def packages(self): + return { + "packages": [ + { + 'git': 'https://github.com/dbt-labs/dbt-integration-project', + 'revision': 'dbt/1.0.0', + } + ] + } + + def test_deps(self, project): + results = run_dbt(['deps']) + assert results is None + + +class TestExitCodesDepsFail: + @pytest.fixture(scope="class") + def packages(self): + return { + "packages": [ + { + 'git': 'https://github.com/dbt-labs/dbt-integration-project', + 'revision': 'bad-branch', + }, + ] + } + + def test_deps_fail(self, project): + with pytest.raises(dbt.exceptions.InternalException) as exc: + run_dbt(['deps']) + expected_msg = "Error checking out spec='bad-branch'" + assert expected_msg in str(exc.value) + + +class TestExitCodesSeed: + @pytest.fixture(scope="class") + def seeds(self): + return {"good.csv": data_seed_good_csv} + + def test_seed(self, project): + results = run_dbt(['seed']) + assert len(results) == 1 + + +class TestExitCodesSeedFail: + @pytest.fixture(scope="class") + def seeds(self): + return {"bad.csv": data_seed_bad_csv} + + def test_seed(self, project): + run_dbt(['seed'], expect_pass=False) From d23285b4bacbf1c75f88e09376a13d7ea74244a2 Mon Sep 17 00:00:00 2001 From: FishtownBuildBot <77737458+FishtownBuildBot@users.noreply.github.com> Date: Wed, 2 Nov 2022 09:36:56 -0600 Subject: [PATCH 016/156] Add new index.html and changelog yaml files from dbt-docs (#6112) --- .changes/unreleased/Docs-20221017-171411.yaml | 6 ++++++ core/dbt/include/index.html | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Docs-20221017-171411.yaml diff --git a/.changes/unreleased/Docs-20221017-171411.yaml b/.changes/unreleased/Docs-20221017-171411.yaml new file mode 100644 index 00000000000..6f480f3c4bd --- /dev/null +++ b/.changes/unreleased/Docs-20221017-171411.yaml @@ -0,0 +1,6 @@ +kind: Docs +time: 2022-10-17T17:14:11.715348-05:00 +custom: + Author: paulbenschmidt + Issue: "5880" + PR: "324" diff --git a/core/dbt/include/index.html b/core/dbt/include/index.html index 182b6b49f99..cd86f14ef7b 100644 --- a/core/dbt/include/index.html +++ b/core/dbt/include/index.html @@ -90,7 +90,7 @@ https://github.com/jquery/jquery/blob/master/src/event.js */var r=function(e,t){this.recycle(e,t)};function i(){return!1}function o(){return!0}r.prototype={instanceString:function(){return"event"},recycle:function(e,t){if(this.isImmediatePropagationStopped=this.isPropagationStopped=this.isDefaultPrevented=i,null!=e&&e.preventDefault?(this.type=e.type,this.isDefaultPrevented=e.defaultPrevented?o:i):null!=e&&e.type?t=e:this.type=e,null!=t&&(this.originalEvent=t.originalEvent,this.type=null!=t.type?t.type:this.type,this.cy=t.cy,this.target=t.target,this.position=t.position,this.renderedPosition=t.renderedPosition,this.namespace=t.namespace,this.layout=t.layout),null!=this.cy&&null!=this.position&&null==this.renderedPosition){var n=this.position,r=this.cy.zoom(),a=this.cy.pan();this.renderedPosition={x:n.x*r+a.x,y:n.y*r+a.y}}this.timeStamp=e&&e.timeStamp||Date.now()},preventDefault:function(){this.isDefaultPrevented=o;var e=this.originalEvent;e&&e.preventDefault&&e.preventDefault()},stopPropagation:function(){this.isPropagationStopped=o;var e=this.originalEvent;e&&e.stopPropagation&&e.stopPropagation()},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=o,this.stopPropagation()},isDefaultPrevented:i,isPropagationStopped:i,isImmediatePropagationStopped:i},e.exports=r},function(e,t,n){"use strict";var r=n(1);e.exports=function(e,t){var n=e.cy().hasCompoundNodes();function i(e){var t=e.pstyle("z-compound-depth");return"auto"===t.value?n?e.zDepth():0:"bottom"===t.value?-1:"top"===t.value?r.MAX_INT:0}var o=i(e)-i(t);if(0!==o)return o;function a(e){return"auto"===e.pstyle("z-index-compare").value&&e.isNode()?1:0}var s=a(e)-a(t);if(0!==s)return s;var l=e.pstyle("z-index").value-t.pstyle("z-index").value;return 0!==l?l:e.poolIndex()-t.poolIndex()}},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(6),a=function e(t){if(!(this instanceof e))return new e(t);r.core(t)?(this._private={cy:t,coreStyle:{}},this.length=0,this.resetToDefault()):i.error("A style must have a core reference")},s=a.prototype;s.instanceString=function(){return"style"},s.clear=function(){for(var e=0;e=e.deqFastCost*m)break}else if(o){if(h>=e.deqCost*c||h>=e.deqAvgCost*l)break}else if(g>=e.deqNoDrawCost*(1e3/60))break;var v=e.deq(t,f,d);if(!(v.length>0))break;for(var b=0;b0&&(e.onDeqd(t,u),!o&&e.shouldRedraw(t,u,f,d)&&i())}),o(t))}}}}},function(e,t,n){"use strict";var r=n(0),i=n(12),o=n(94),a=n(136),s=function(e){return void 0===e&&(e={}),r.plainObject(e)?new i(e):r.string(e)?o.apply(o,arguments):void 0};s.use=function(e){var t=Array.prototype.slice.call(arguments,1);return t.unshift(s),e.apply(null,t),this},s.version=n(137),s.stylesheet=s.Stylesheet=a,e.exports=s},function(e,t,n){"use strict";var r=n(0);e.exports={hex2tuple:function(e){if((4===e.length||7===e.length)&&"#"===e[0]){var t=void 0,n=void 0,r=void 0;return 4===e.length?(t=parseInt(e[1]+e[1],16),n=parseInt(e[2]+e[2],16),r=parseInt(e[3]+e[3],16)):(t=parseInt(e[1]+e[2],16),n=parseInt(e[3]+e[4],16),r=parseInt(e[5]+e[6],16)),[t,n,r]}},hsl2tuple:function(e){var t=void 0,n=void 0,r=void 0,i=void 0,o=void 0,a=void 0,s=void 0,l=void 0;function c(e,t,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?e+6*(t-e)*n:n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}var u=new RegExp("^"+this.regex.hsla+"$").exec(e);if(u){if((n=parseInt(u[1]))<0?n=(360- -1*n%360)%360:n>360&&(n%=360),n/=360,(r=parseFloat(u[2]))<0||r>100)return;if(r/=100,(i=parseFloat(u[3]))<0||i>100)return;if(i/=100,void 0!==(o=u[4])&&((o=parseFloat(o))<0||o>1))return;if(0===r)a=s=l=Math.round(255*i);else{var d=i<.5?i*(1+r):i+r-i*r,f=2*i-d;a=Math.round(255*c(f,d,n+1/3)),s=Math.round(255*c(f,d,n)),l=Math.round(255*c(f,d,n-1/3))}t=[a,s,l,o]}return t},rgb2tuple:function(e){var t=void 0,n=new RegExp("^"+this.regex.rgba+"$").exec(e);if(n){t=[];for(var r=[],i=1;i<=3;i++){var o=n[i];if("%"===o[o.length-1]&&(r[i]=!0),o=parseFloat(o),r[i]&&(o=o/100*255),o<0||o>255)return;t.push(Math.floor(o))}var a=r[1]||r[2]||r[3],s=r[1]&&r[2]&&r[3];if(a&&!s)return;var l=n[4];if(void 0!==l){if((l=parseFloat(l))<0||l>1)return;t.push(l)}}return t},colorname2tuple:function(e){return this.colors[e.toLowerCase()]},color2tuple:function(e){return(r.array(e)?e:null)||this.colorname2tuple(e)||this.hex2tuple(e)||this.rgb2tuple(e)||this.hsl2tuple(e)},colors:{transparent:[0,0,0,0],aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],grey:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]}}},function(e,t,n){"use strict";var r=n(0);e.exports={mapEmpty:function(e){return null==e||0===Object.keys(e).length},pushMap:function(e){var t=this.getMap(e);null==t?this.setMap(this.extend({},e,{value:[e.value]})):t.push(e.value)},setMap:function(e){for(var t=e.map,n=e.keys,i=n.length,o=0;ot?1:0}e.exports={sort:{ascending:r,descending:function(e,t){return-1*r(e,t)}}}},function(e,t,n){"use strict";function r(){this._obj={}}var i=r.prototype;i.set=function(e,t){this._obj[e]=t},i.delete=function(e){this._obj[e]=null},i.has=function(e){return null!=this._obj[e]},i.get=function(e){return this._obj[e]},e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};[n(30),n(31),n(33),n(34),n(35),n(36),n(37),n(38),n(39),n(40),n(41)].forEach((function(e){r.extend(i,e)})),e.exports=i},function(e,t,n){"use strict";var r=n(0),i=function(e){return e={bfs:e.bfs||!e.dfs,dfs:e.dfs||!e.bfs},function(t,n,i){var o;r.plainObject(t)&&!r.elementOrCollection(t)&&(t=(o=t).roots||o.root,n=o.visit,i=o.directed),i=2!==arguments.length||r.fn(n)?i:n,n=r.fn(n)?n:function(){};for(var a,s=this._private.cy,l=t=r.string(t)?this.filter(t):t,c=[],u=[],d={},f={},p={},h=0,g=this.nodes(),m=this.edges(),v=0;v0;){var y=g.pop(),x=p(y),w=y.id();if(u[w]=x,x!==1/0){var k=y.neighborhood().intersect(f);for(m=0;m0)for(n.unshift(t);c[i.id()];){var o=c[i.id()];n.unshift(o.edge),n.unshift(o.node),i=o.node}return a.collection(n)}}}};e.exports=o},function(e,t){e.exports=n},function(e,t,n){"use strict";var r=n(0),i={kruskal:function(e){var t=this.cy();function n(e){for(var t=0;t0;){var y=n(p,v),x=i.getElementById(p[y]),w=x.id();if(b++,w==d){var k=t(u,d,h,[]);return{found:!0,distance:m[w],path:this.spawn(k),steps:b}}f.push(w),p.splice(y,1);for(var A=x._private.edges,E=0;Eb&&(u[m][v]=b,p[m][v]=v,h[m][v]=o[c])}if(!i)for(c=0;cb&&(u[m][v]=b,p[m][v]=v,h[m][v]=o[c]);for(var y=0;yu&&(u=t)},f=function(e){return c[e]},p=0;p0?S.edgesTo(E)[0]:E.edgesTo(S)[0]);E=E.id(),y[E]>y[k]+$&&(y[E]=y[k]+$,x.nodes.indexOf(E)<0?x.push(E):x.updateItem(E),b[E]=0,v[E]=[]),y[E]==y[k]+$&&(b[E]=b[E]+b[k],v[E].push(k))}else for(A=0;A0;)for(E=m.pop(),A=0;A0:void 0}},clearQueue:function(){return function(){var e=void 0!==this.length?this:[this];if(!(this._private.cy||this).styleEnabled())return this;for(var t=0;t0&&this.spawn(n).updateStyle().emit("class"),t},addClass:function(e){return this.toggleClass(e,!0)},hasClass:function(e){var t=this[0];return null!=t&&t._private.classes.has(e)},toggleClass:function(e,t){for(var n=e.match(/\S+/g)||[],r=[],i=0,o=this.length;i0&&this.spawn(r).updateStyle().emit("class"),this},removeClass:function(e){return this.toggleClass(e,!1)},flashClass:function(e,t){var n=this;if(null==t)t=250;else if(0===t)return n;return n.addClass(e),setTimeout((function(){n.removeClass(e)}),t),n}};e.exports=i},function(e,t,n){"use strict";n(0);var r=n(6),i={allAre:function(e){var t=new r(e);return this.every((function(e){return t.matches(e)}))},is:function(e){var t=new r(e);return this.some((function(e){return t.matches(e)}))},some:function(e,t){for(var n=0;n\\?\\@\\[\\]\\^\\`\\{\\|\\}\\~]",comparatorOp:"=|\\!=|>|>=|<|<=|\\$=|\\^=|\\*=",boolOp:"\\?|\\!|\\^",string:"\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'",number:n(1).regex.number,meta:"degree|indegree|outdegree",separator:"\\s*,\\s*",descendant:"\\s+",child:"\\s+>\\s+",subject:"\\$",group:"node|edge|\\*",directedEdge:"\\s+->\\s+",undirectedEdge:"\\s+<->\\s+"};r.variable="(?:[\\w-]|(?:\\\\"+r.metaChar+"))+",r.value=r.string+"|"+r.number,r.className=r.variable,r.id=r.variable,function(){var e=void 0,t=void 0,n=void 0;for(e=r.comparatorOp.split("|"),n=0;n=0||"="!==t&&(r.comparatorOp+="|\\!"+t)}(),e.exports=r},function(e,t,n){"use strict";var r=n(15).stateSelectorMatches,i=n(0),o=function(e,t){for(var n=!0,r=0;r=0&&(d=d.toLowerCase(),f=f.toLowerCase(),a=a.replace("@",""),p=!0);var h=!1;a.indexOf("!")>=0&&(a=a.replace("!",""),h=!0),p&&(s=f.toLowerCase(),u=d.toLowerCase());var g=!1;switch(a){case"*=":c=d.indexOf(f)>=0;break;case"$=":c=d.indexOf(f,d.length-f.length)>=0;break;case"^=":c=0===d.indexOf(f);break;case"=":c=u===s;break;case">":g=!0,c=u>s;break;case">=":g=!0,c=u>=s;break;case"<":g=!0,c=u0;){var u=o.shift();t(u),a.add(u.id()),s&&i(o,a,u)}return e}function a(e,t,n){if(n.isParent())for(var r=n._private.children,i=0;i1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,a)},i.forEachUp=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,s)},i.forEachUpAndDown=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,l)},i.ancestors=i.parents,e.exports=i},function(e,t,n){"use strict";var r,i=n(4),o=void 0;(o=r={data:i.data({field:"data",bindingEvent:"data",allowBinding:!0,allowSetting:!0,settingEvent:"data",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),removeData:i.removeData({field:"data",event:"data",triggerFnName:"trigger",triggerEvent:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),scratch:i.data({field:"scratch",bindingEvent:"scratch",allowBinding:!0,allowSetting:!0,settingEvent:"scratch",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,updateStyle:!0}),removeScratch:i.removeData({field:"scratch",event:"scratch",triggerFnName:"trigger",triggerEvent:!0,updateStyle:!0}),rscratch:i.data({field:"rscratch",allowBinding:!1,allowSetting:!0,settingTriggersEvent:!1,allowGetting:!0}),removeRscratch:i.removeData({field:"rscratch",triggerEvent:!1}),id:function(){var e=this[0];if(e)return e._private.data.id}}).attr=o.data,o.removeAttr=o.removeData,e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};function o(e){return function(t){if(void 0===t&&(t=!0),0!==this.length&&this.isNode()&&!this.removed()){for(var n=0,r=this[0],i=r._private.edges,o=0;ot})),minIndegree:a("indegree",(function(e,t){return et})),minOutdegree:a("outdegree",(function(e,t){return et}))}),r.extend(i,{totalDegree:function(e){for(var t=0,n=this.nodes(),r=0;r0,d=u;u&&(c=c[0]);var f=d?c.position():{x:0,y:0};return i={x:l.x-f.x,y:l.y-f.y},void 0===e?i:i[e]}for(var p=0;p0,v=m;m&&(g=g[0]);var b=v?g.position():{x:0,y:0};void 0!==t?h.position(e,t+b[e]):void 0!==i&&h.position({x:i.x+b.x,y:i.y+b.y})}}else if(!a)return;return this}}).modelPosition=s.point=s.position,s.modelPositions=s.points=s.positions,s.renderedPoint=s.renderedPosition,s.relativePoint=s.relativePosition,e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(2),a=void 0,s=void 0;a=s={},s.renderedBoundingBox=function(e){var t=this.boundingBox(e),n=this.cy(),r=n.zoom(),i=n.pan(),o=t.x1*r+i.x,a=t.x2*r+i.x,s=t.y1*r+i.y,l=t.y2*r+i.y;return{x1:o,x2:a,y1:s,y2:l,w:a-o,h:l-s}},s.dirtyCompoundBoundsCache=function(){var e=this.cy();return e.styleEnabled()&&e.hasCompoundNodes()?(this.forEachUp((function(e){e._private.compoundBoundsClean=!1,e.isParent()&&e.emit("bounds")})),this):this},s.updateCompoundBounds=function(){var e=this.cy();if(!e.styleEnabled()||!e.hasCompoundNodes())return this;if(e.batching())return this;var t=[];function n(e){if(e.isParent()){var n=e._private,r=e.children(),i="include"===e.pstyle("compound-sizing-wrt-labels").value,o={width:{val:e.pstyle("min-width").pfValue,left:e.pstyle("min-width-bias-left"),right:e.pstyle("min-width-bias-right")},height:{val:e.pstyle("min-height").pfValue,top:e.pstyle("min-height-bias-top"),bottom:e.pstyle("min-height-bias-bottom")}},a=r.boundingBox({includeLabels:i,includeOverlays:!1,useCache:!1}),s=n.position;0!==a.w&&0!==a.h||((a={w:e.pstyle("width").pfValue,h:e.pstyle("height").pfValue}).x1=s.x-a.w/2,a.x2=s.x+a.w/2,a.y1=s.y-a.h/2,a.y2=s.y+a.h/2);var l=o.width.left.value;"px"===o.width.left.units&&o.width.val>0&&(l=100*l/o.width.val);var c=o.width.right.value;"px"===o.width.right.units&&o.width.val>0&&(c=100*c/o.width.val);var u=o.height.top.value;"px"===o.height.top.units&&o.height.val>0&&(u=100*u/o.height.val);var d=o.height.bottom.value;"px"===o.height.bottom.units&&o.height.val>0&&(d=100*d/o.height.val);var f=b(o.width.val-a.w,l,c),p=f.biasDiff,h=f.biasComplementDiff,g=b(o.height.val-a.h,u,d),m=g.biasDiff,v=g.biasComplementDiff;n.autoPadding=function(e,t,n,r){if("%"!==n.units)return"px"===n.units?n.pfValue:0;switch(r){case"width":return e>0?n.pfValue*e:0;case"height":return t>0?n.pfValue*t:0;case"average":return e>0&&t>0?n.pfValue*(e+t)/2:0;case"min":return e>0&&t>0?e>t?n.pfValue*t:n.pfValue*e:0;case"max":return e>0&&t>0?e>t?n.pfValue*e:n.pfValue*t:0;default:return 0}}(a.w,a.h,e.pstyle("padding"),e.pstyle("padding-relative-to").value),n.autoWidth=Math.max(a.w,o.width.val),s.x=(-p+a.x1+a.x2+h)/2,n.autoHeight=Math.max(a.h,o.height.val),s.y=(-m+a.y1+a.y2+v)/2,t.push(e)}function b(e,t,n){var r=0,i=0,o=t+n;return e>0&&o>0&&(r=t/o*e,i=n/o*e),{biasDiff:r,biasComplementDiff:i}}}for(var r=0;re.x2?r:e.x2,e.y1=ne.y2?i:e.y2)},u=function(e,t,n){return i.getPrefixedProperty(e,t,n)},d=function(e,t,n){if(!t.cy().headless()){var r=t._private.rstyle,i=r.arrowWidth/2,o=void 0,a=void 0;"none"!==t.pstyle(n+"-arrow-shape").value&&("source"===n?(o=r.srcX,a=r.srcY):"target"===n?(o=r.tgtX,a=r.tgtY):(o=r.midX,a=r.midY),c(e,o-i,a-i,o+i,a+i))}},f=function(e,t,n){if(!t.cy().headless()){var r=void 0;r=n?n+"-":"";var i=t._private,o=i.rstyle;if(t.pstyle(r+"label").strValue){var a=t.pstyle("text-halign"),s=t.pstyle("text-valign"),l=u(o,"labelWidth",n),d=u(o,"labelHeight",n),f=u(o,"labelX",n),p=u(o,"labelY",n),h=t.pstyle(r+"text-margin-x").pfValue,g=t.pstyle(r+"text-margin-y").pfValue,m=t.isEdge(),v=t.pstyle(r+"text-rotation"),b=t.pstyle("text-outline-width").pfValue,y=t.pstyle("text-border-width").pfValue/2,x=t.pstyle("text-background-padding").pfValue,w=d+2*x,k=l+2*x,A=k/2,E=w/2,S=void 0,$=void 0,C=void 0,_=void 0;if(m)S=f-A,$=f+A,C=p-E,_=p+E;else{switch(a.value){case"left":S=f-k,$=f;break;case"center":S=f-A,$=f+A;break;case"right":S=f,$=f+k}switch(s.value){case"top":C=p-w,_=p;break;case"center":C=p-E,_=p+E;break;case"bottom":C=p,_=p+w}}var O=m&&"autorotate"===v.strValue,j=null!=v.pfValue&&0!==v.pfValue;if(O||j){var T=O?u(i.rstyle,"labelAngle",n):v.pfValue,P=Math.cos(T),D=Math.sin(T),R=function(e,t){return{x:(e-=f)*P-(t-=p)*D+f,y:e*D+t*P+p}},I=R(S,C),N=R(S,_),M=R($,C),z=R($,_);S=Math.min(I.x,N.x,M.x,z.x),$=Math.max(I.x,N.x,M.x,z.x),C=Math.min(I.y,N.y,M.y,z.y),_=Math.max(I.y,N.y,M.y,z.y)}S+=h-Math.max(b,y),$+=h+Math.max(b,y),C+=g-Math.max(b,y),_+=g+Math.max(b,y),c(e,S,C,$,_)}return e}},p=function(e){return e?"t":"f"},h=function(e){var t="";return t+=p(e.incudeNodes),t+=p(e.includeEdges),t+=p(e.includeLabels),t+=p(e.includeOverlays)},g=function(e,t){var n=e._private,r=void 0,i=e.cy().headless(),a=t===m?v:h(t);return t.useCache&&!i&&n.bbCache&&n.bbCache[a]?r=n.bbCache[a]:(r=function(e,t){var n=e._private.cy,r=n.styleEnabled(),i=n.headless(),a={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},s=e._private,u=r?e.pstyle("display").value:"element",p=e.isNode(),h=e.isEdge(),g=void 0,m=void 0,v=void 0,b=void 0,y=void 0,x=void 0,w="none"!==u;if(w){var k=0;r&&t.includeOverlays&&0!==e.pstyle("overlay-opacity").value&&(k=e.pstyle("overlay-padding").value);var A=0;if(r&&(A=e.pstyle("width").pfValue/2),p&&t.includeNodes){var E=e.position();y=E.x,x=E.y;var S=e.outerWidth()/2,$=e.outerHeight()/2;c(a,g=y-S-k,v=x-$-k,m=y+S+k,b=x+$+k)}else if(h&&t.includeEdges){var C=s.rstyle||{};if(r&&!i&&(g=Math.min(C.srcX,C.midX,C.tgtX),m=Math.max(C.srcX,C.midX,C.tgtX),v=Math.min(C.srcY,C.midY,C.tgtY),b=Math.max(C.srcY,C.midY,C.tgtY),c(a,g-=A,v-=A,m+=A,b+=A)),r&&!i&&"haystack"===e.pstyle("curve-style").strValue){var _=C.haystackPts||[];if(g=_[0].x,v=_[0].y,g>(m=_[1].x)){var O=g;g=m,m=O}if(v>(b=_[1].y)){var j=v;v=b,b=j}c(a,g-A,v-A,m+A,b+A)}else{for(var T=C.bezierPts||C.linePts||[],P=0;P(m=I.x)){var N=g;g=m,m=N}if((v=R.y)>(b=I.y)){var M=v;v=b,b=M}c(a,g-=A,v-=A,m+=A,b+=A)}}}if(r&&t.includeEdges&&h&&(d(a,e,"mid-source"),d(a,e,"mid-target"),d(a,e,"source"),d(a,e,"target")),r&&"yes"===e.pstyle("ghost").value){var z=e.pstyle("ghost-offset-x").pfValue,L=e.pstyle("ghost-offset-y").pfValue;c(a,a.x1+z,a.y1+L,a.x2+z,a.y2+L)}r&&(g=a.x1,m=a.x2,v=a.y1,b=a.y2,c(a,g-k,v-k,m+k,b+k)),r&&t.includeLabels&&(f(a,e,null),h&&(f(a,e,"source"),f(a,e,"target")))}return a.x1=l(a.x1),a.y1=l(a.y1),a.x2=l(a.x2),a.y2=l(a.y2),a.w=l(a.x2-a.x1),a.h=l(a.y2-a.y1),a.w>0&&a.h>0&&w&&o.expandBoundingBox(a,1),a}(e,t),i||(n.bbCache=n.bbCache||{},n.bbCache[a]=r)),r},m={includeNodes:!0,includeEdges:!0,includeLabels:!0,includeOverlays:!0,useCache:!0},v=h(m);function b(e){return{includeNodes:i.default(e.includeNodes,m.includeNodes),includeEdges:i.default(e.includeEdges,m.includeEdges),includeLabels:i.default(e.includeLabels,m.includeLabels),includeOverlays:i.default(e.includeOverlays,m.includeOverlays),useCache:i.default(e.useCache,m.useCache)}}s.boundingBox=function(e){if(1===this.length&&this[0]._private.bbCache&&(void 0===e||void 0===e.useCache||!0===e.useCache))return e=void 0===e?m:b(e),g(this[0],e);var t={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},n=b(e=e||i.staticEmptyObject()),r=this.cy().styleEnabled();r&&this.recalculateRenderedStyle(n.useCache),this.updateCompoundBounds();for(var o,a,s={},u=0;u1&&!a){var s=this.length-1,l=this[s],c=l._private.data.id;this[s]=void 0,this[o]=l,r.set(c,{ele:l,index:o})}return this.length--,this},unmerge:function(e){var t=this._private.cy;if(!e)return this;if(e&&r.string(e)){var n=e;e=t.mutableElements().filter(n)}for(var i=0;in&&(n=a,r=o)}return{value:n,ele:r}},min:function(e,t){for(var n=1/0,r=void 0,i=0;i=0&&i0&&t.push(u[0]),t.push(s[0])}return this.spawn(t,{unique:!0}).filter(e)}),"neighborhood"),closedNeighborhood:function(e){return this.neighborhood().add(this).filter(e)},openNeighborhood:function(e){return this.neighborhood(e)}}),o.neighbourhood=o.neighborhood,o.closedNeighbourhood=o.closedNeighborhood,o.openNeighbourhood=o.openNeighborhood,r.extend(o,{source:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.source||t.cy().collection()),n&&e?n.filter(e):n}),"source"),target:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.target||t.cy().collection()),n&&e?n.filter(e):n}),"target"),sources:u({attr:"source"}),targets:u({attr:"target"})}),r.extend(o,{edgesWith:a(d(),"edgesWith"),edgesTo:a(d({thisIsSrc:!0}),"edgesTo")}),r.extend(o,{connectedEdges:a((function(e){for(var t=[],n=0;n0);return i.map((function(e){var t=e.connectedEdges().stdFilter((function(t){return e.anySame(t.source())&&e.anySame(t.target())}));return e.union(t)}))}}),e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(7),a=n(14),s={add:function(e){var t=void 0,n=this;if(r.elementOrCollection(e)){var s=e;if(s._private.cy===n)t=s.restore();else{for(var l=[],c=0;c=0;t--)(0,e[t])();e.splice(0,e.length)},p=s.length-1;p>=0;p--){var h=s[p],g=h._private;g.stopped?(s.splice(p,1),g.hooked=!1,g.playing=!1,g.started=!1,f(g.frames)):(g.playing||g.applying)&&(g.playing&&g.applying&&(g.applying=!1),g.started||i(t,h,e,n),r(t,h,e,n),g.applying&&(g.applying=!1),f(g.frames),h.completed()&&(s.splice(p,1),g.hooked=!1,g.playing=!1,g.started=!1,f(g.completes)),c=!0)}return n||0!==s.length||0!==l.length||o.push(t),c}for(var s=!1,l=0;l0?(n.dirtyCompoundBoundsCache(),t.notify({type:"draw",eles:n})):t.notify({type:"draw"})),n.unmerge(o),t.emit("step")}},function(e,t,n){"use strict";var r=n(73),i=n(76),o=n(0);function a(e,t){return!!(null!=e&&null!=t&&(o.number(e)&&o.number(t)||e&&t))}e.exports=function(e,t,n,s){var l=!s,c=e._private,u=t._private,d=u.easing,f=u.startTime,p=(s?e:e.cy()).style();if(!u.easingImpl)if(null==d)u.easingImpl=r.linear;else{var h=void 0;h=o.string(d)?p.parse("transition-timing-function",d).value:d;var g=void 0,m=void 0;o.string(h)?(g=h,m=[]):(g=h[1],m=h.slice(2).map((function(e){return+e}))),m.length>0?("spring"===g&&m.push(u.duration),u.easingImpl=r[g].apply(null,m)):u.easingImpl=r[g]}var v=u.easingImpl,b=void 0;if(b=0===u.duration?1:(n-f)/u.duration,u.applying&&(b=u.progress),b<0?b=0:b>1&&(b=1),null==u.delay){var y=u.startPosition,x=u.position;if(x&&l&&!e.locked()){var w=e.position();a(y.x,x.x)&&(w.x=i(y.x,x.x,b,v)),a(y.y,x.y)&&(w.y=i(y.y,x.y,b,v)),e.emit("position")}var k=u.startPan,A=u.pan,E=c.pan,S=null!=A&&s;S&&(a(k.x,A.x)&&(E.x=i(k.x,A.x,b,v)),a(k.y,A.y)&&(E.y=i(k.y,A.y,b,v)),e.emit("pan"));var $=u.startZoom,C=u.zoom,_=null!=C&&s;_&&(a($,C)&&(c.zoom=i($,C,b,v)),e.emit("zoom")),(S||_)&&e.emit("viewport");var O=u.style;if(O&&O.length>0&&l){for(var j=0;j0?i=l:r=l}while(Math.abs(o)>a&&++c=o?b(t,s):0===u?s:x(t,r,r+c)}var k=!1;function A(){k=!0,e===t&&n===r||y()}var E=function(i){return k||A(),e===t&&n===r?i:0===i?0:1===i?1:m(w(i),t,r)};E.getControlPoints=function(){return[{x:e,y:t},{x:n,y:r}]};var S="generateBezier("+[e,t,n,r]+")";return E.toString=function(){return S},E}},function(e,t,n){"use strict"; -/*! Runge-Kutta spring physics function generator. Adapted from Framer.js, copyright Koen Bok. MIT License: http://en.wikipedia.org/wiki/MIT_License */var r=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,n,r){var i={x:t.x+r.dx*n,v:t.v+r.dv*n,tension:t.tension,friction:t.friction};return{dx:i.v,dv:e(i)}}function n(n,r){var i={dx:n.v,dv:e(n)},o=t(n,.5*r,i),a=t(n,.5*r,o),s=t(n,r,a),l=1/6*(i.dx+2*(o.dx+a.dx)+s.dx),c=1/6*(i.dv+2*(o.dv+a.dv)+s.dv);return n.x=n.x+l*r,n.v=n.v+c*r,n}return function e(t,r,i){var o,a={x:-1,v:0,tension:null,friction:null},s=[0],l=0,c=void 0,u=void 0;for(t=parseFloat(t)||500,r=parseFloat(r)||20,i=i||null,a.tension=t,a.friction=r,c=(o=null!==i)?(l=e(t,r))/i*.016:.016;u=n(u||a,c),s.push(1+u.x),l+=16,Math.abs(u.x)>1e-4&&Math.abs(u.v)>1e-4;);return o?function(e){return s[e*(s.length-1)|0]}:l}}();e.exports=r},function(e,t,n){"use strict";var r=n(0);function i(e,t,n,r,i){if(1===r)return n;var o=i(t,n,r);return null==e||((e.roundValue||e.color)&&(o=Math.round(o)),void 0!==e.min&&(o=Math.max(o,e.min)),void 0!==e.max&&(o=Math.min(o,e.max))),o}function o(e,t){return null!=e.pfValue||null!=e.value?null==e.pfValue||null!=t&&"%"===t.type.units?e.value:e.pfValue:e}e.exports=function(e,t,n,a,s){var l=null!=s?s.type:null;n<0?n=0:n>1&&(n=1);var c=o(e,s),u=o(t,s);if(r.number(c)&&r.number(u))return i(l,c,u,n,a);if(r.array(c)&&r.array(u)){for(var d=[],f=0;f0},startBatch:function(){var e=this._private;return null==e.batchCount&&(e.batchCount=0),0===e.batchCount&&(e.batchingStyle=e.batchingNotify=!0,e.batchStyleEles=this.collection(),e.batchNotifyEles=this.collection(),e.batchNotifyTypes=[],e.batchNotifyTypes.ids={}),e.batchCount++,this},endBatch:function(){var e=this._private;return e.batchCount--,0===e.batchCount&&(e.batchingStyle=!1,e.batchStyleEles.updateStyle(),e.batchingNotify=!1,this.notify({type:e.batchNotifyTypes,eles:e.batchNotifyEles})),this},batch:function(e){return this.startBatch(),e(),this.endBatch(),this},batchData:function(e){var t=this;return this.batch((function(){for(var n=Object.keys(e),r=0;r0;)e.removeChild(e.childNodes[0]);this._private.renderer=null},onRender:function(e){return this.on("render",e)},offRender:function(e){return this.off("render",e)}};i.invalidateDimensions=i.resize,e.exports=i},function(e,t,n){"use strict";var r=n(0),i=n(7),o={collection:function(e,t){return r.string(e)?this.$(e):r.elementOrCollection(e)?e.collection():r.array(e)?new i(this,e,t):new i(this)},nodes:function(e){var t=this.$((function(e){return e.isNode()}));return e?t.filter(e):t},edges:function(e){var t=this.$((function(e){return e.isEdge()}));return e?t.filter(e):t},$:function(e){var t=this._private.elements;return e?t.filter(e):t.spawnSelf()},mutableElements:function(){return this._private.elements}};o.elements=o.filter=o.$,e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(18),o={style:function(e){return e&&this.setStyle(e).update(),this._private.style},setStyle:function(e){var t=this._private;return r.stylesheet(e)?t.style=e.generateStyle(this):r.array(e)?t.style=i.fromJson(this,e):r.string(e)?t.style=i.fromString(this,e):t.style=i(this),t.style}};e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(5),a={apply:function(e){var t=this._private,n=t.cy.collection();t.newStyle&&(t.contextStyles={},t.propDiffs={},this.cleanElements(e,!0));for(var r=0;r0;if(c||u){var d=void 0;c&&u||c?d=l.properties:u&&(d=l.mappedProperties);for(var f=0;f0){n=!0;break}t.hasPie=n;var i=e.pstyle("text-transform").strValue,o=e.pstyle("label").strValue,a=e.pstyle("source-label").strValue,s=e.pstyle("target-label").strValue,l=e.pstyle("font-style").strValue,c=e.pstyle("font-size").pfValue+"px",u=e.pstyle("font-family").strValue,d=e.pstyle("font-weight").strValue,f=l+"$"+c+"$"+u+"$"+d+"$"+i+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-outline-width").pfValue+"$"+e.pstyle("text-wrap").strValue+"$"+e.pstyle("text-max-width").pfValue;t.labelStyleKey=f,t.sourceLabelKey=f+"$"+a,t.targetLabelKey=f+"$"+s,t.labelKey=f+"$"+o,t.fontKey=l+"$"+d+"$"+c+"$"+u,t.styleKey=Date.now()}},applyParsedProperty:function(e,t){var n=this,o=t,a=e._private.style,s=void 0,l=n.types,c=n.properties[o.name].type,u=o.bypass,d=a[o.name],f=d&&d.bypass,p=e._private,h=function(){n.checkZOrderTrigger(e,o.name,d?d.value:null,o.value)};if("curve-style"===t.name&&"haystack"===t.value&&e.isEdge()&&(e.isLoop()||e.source().isParent()||e.target().isParent())&&(o=t=this.parse(t.name,"bezier",u)),o.delete)return a[o.name]=void 0,h(),!0;if(o.deleteBypassed)return d?!!d.bypass&&(d.bypassed=void 0,h(),!0):(h(),!0);if(o.deleteBypass)return d?!!d.bypass&&(a[o.name]=d.bypassed,h(),!0):(h(),!0);var g=function(){r.error("Do not assign mappings to elements without corresponding data (e.g. ele `"+e.id()+"` for property `"+o.name+"` with data field `"+o.field+"`); try a `["+o.field+"]` selector to limit scope to elements with `"+o.field+"` defined")};switch(o.mapped){case l.mapData:for(var m=o.field.split("."),v=p.data,b=0;b1&&(y=1),c.color){var x=o.valueMin[0],w=o.valueMax[0],k=o.valueMin[1],A=o.valueMax[1],E=o.valueMin[2],S=o.valueMax[2],$=null==o.valueMin[3]?1:o.valueMin[3],C=null==o.valueMax[3]?1:o.valueMax[3],_=[Math.round(x+(w-x)*y),Math.round(k+(A-k)*y),Math.round(E+(S-E)*y),Math.round($+(C-$)*y)];s={bypass:o.bypass,name:o.name,value:_,strValue:"rgb("+_[0]+", "+_[1]+", "+_[2]+")"}}else{if(!c.number)return!1;var O=o.valueMin+(o.valueMax-o.valueMin)*y;s=this.parse(o.name,O,o.bypass,"mapping")}s||(s=this.parse(o.name,d.strValue,o.bypass,"mapping")),s||g(),s.mapping=o,o=s;break;case l.data:var j=o.field.split("."),T=p.data;if(T)for(var P=0;P0&&l>0){for(var u={},d=!1,f=0;f0?e.delayAnimation(c).play().promise().then(t):t()})).then((function(){return e.animation({style:u,duration:l,easing:e.pstyle("transition-timing-function").value,queue:!1}).play().promise()})).then((function(){r.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1}))}else a.transitioning&&(this.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1)},checkZOrderTrigger:function(e,t,n,r){var i=this.properties[t];null==i.triggersZOrder||null!=n&&!i.triggersZOrder(n,r)||this._private.cy.notify({type:"zorder",eles:e})}};e.exports=a},function(e,t,n){"use strict";var r=n(0),i=n(1),o={applyBypass:function(e,t,n,o){var a=[];if("*"===t||"**"===t){if(void 0!==n)for(var s=0;sn.length?t.substr(n.length):""}function l(){o=o.length>a.length?o.substr(a.length):""}for(t=t.replace(/[/][*](\s|.)+?[*][/]/g,"");!t.match(/^\s*$/);){var c=t.match(/^\s*((?:.|\s)+?)\s*\{((?:.|\s)+?)\}/);if(!c){r.error("Halting stylesheet parsing: String stylesheet contains more to parse but no selector and block found in: "+t);break}n=c[0];var u=c[1];if("core"!==u&&new i(u)._private.invalid)r.error("Skipping parsing of block: Invalid selector found in string stylesheet: "+u),s();else{var d=c[2],f=!1;o=d;for(var p=[];!o.match(/^\s*$/);){var h=o.match(/^\s*(.+?)\s*:\s*(.+?)\s*;/);if(!h){r.error("Skipping parsing of block: Invalid formatting of style property and value definitions found in:"+d),f=!0;break}a=h[0];var g=h[1],m=h[2];this.properties[g]?this.parse(g,m)?(p.push({name:g,val:m}),l()):(r.error("Skipping property: Invalid property definition in: "+a),l()):(r.error("Skipping property: Invalid property name in: "+a),l())}if(f){s();break}this.selector(u);for(var v=0;v node").css({shape:"rectangle",padding:10,"background-color":"#eee","border-color":"#ccc","border-width":1}).selector("edge").css({width:3,"curve-style":"haystack"}).selector(":parent <-> node").css({"curve-style":"bezier","source-endpoint":"outside-to-line","target-endpoint":"outside-to-line"}).selector(":selected").css({"background-color":"#0169D9","line-color":"#0169D9","source-arrow-color":"#0169D9","target-arrow-color":"#0169D9","mid-source-arrow-color":"#0169D9","mid-target-arrow-color":"#0169D9"}).selector("node:parent:selected").css({"background-color":"#CCE1F9","border-color":"#aec8e5"}).selector(":active").css({"overlay-color":"black","overlay-padding":10,"overlay-opacity":.25}).selector("core").css({"selection-box-color":"#ddd","selection-box-opacity":.65,"selection-box-border-color":"#aaa","selection-box-border-width":1,"active-bg-color":"black","active-bg-opacity":.15,"active-bg-size":30,"outside-texture-bg-color":"#000","outside-texture-bg-opacity":.125}),this.defaultLength=this.length},e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(2),a={parse:function(e,t,n,o){if(i.fn(t))return this.parseImplWarn(e,t,n,o);var a=[e,t,n,"mapping"===o||!0===o||!1===o||null==o?"dontcare":o].join("$"),s=this.propCache=this.propCache||{},l=void 0;return(l=s[a])||(l=s[a]=this.parseImplWarn(e,t,n,o)),(n||"mapping"===o)&&(l=r.copy(l))&&(l.value=r.copy(l.value)),l},parseImplWarn:function(e,t,n,i){var o=this.parseImpl(e,t,n,i);return o||null==t||r.error("The style property `%s: %s` is invalid",e,t),o},parseImpl:function(e,t,n,a){e=r.camel2dash(e);var s=this.properties[e],l=t,c=this.types;if(!s)return null;if(void 0===t)return null;s.alias&&(s=s.pointsTo,e=s.name);var u=i.string(t);u&&(t=t.trim());var d=s.type;if(!d)return null;if(n&&(""===t||null===t))return{name:e,value:t,bypass:!0,deleteBypass:!0};if(i.fn(t))return{name:e,value:t,strValue:"fn",mapped:c.fn,bypass:n};var f=void 0,p=void 0;if(!u||a);else{if(f=new RegExp(c.data.regex).exec(t)){if(n)return!1;var h=c.data;return{name:e,value:f,strValue:""+t,mapped:h,field:f[1],bypass:n}}if(p=new RegExp(c.mapData.regex).exec(t)){if(n)return!1;if(d.multiple)return!1;var g=c.mapData;if(!d.color&&!d.number)return!1;var m=this.parse(e,p[4]);if(!m||m.mapped)return!1;var v=this.parse(e,p[5]);if(!v||v.mapped)return!1;if(m.value===v.value)return!1;if(d.color){var b=m.value,y=v.value;if(!(b[0]!==y[0]||b[1]!==y[1]||b[2]!==y[2]||b[3]!==y[3]&&(null!=b[3]&&1!==b[3]||null!=y[3]&&1!==y[3])))return!1}return{name:e,value:p,strValue:""+t,mapped:g,field:p[1],fieldMin:parseFloat(p[2]),fieldMax:parseFloat(p[3]),valueMin:m.value,valueMax:v.value,bypass:n}}}if(d.multiple&&"multiple"!==a){var x=void 0;if(x=u?t.split(/\s+/):i.array(t)?t:[t],d.evenMultiple&&x.length%2!=0)return null;for(var w=[],k=[],A=[],E=!1,S=0;Sd.max||d.strictMax&&t===d.max))return null;var P={name:e,value:t,strValue:""+t+(_||""),units:_,bypass:n};return d.unitless||"px"!==_&&"em"!==_?P.pfValue=t:P.pfValue="px"!==_&&_?this.getEmSizeInPixels()*t:t,"ms"!==_&&"s"!==_||(P.pfValue="ms"===_?t:1e3*t),"deg"!==_&&"rad"!==_||(P.pfValue="rad"===_?t:o.deg2rad(t)),"%"===_&&(P.pfValue=t/100),P}if(d.propList){var D=[],R=""+t;if("none"===R);else{for(var I=R.split(","),N=0;N0&&s>0&&!isNaN(n.w)&&!isNaN(n.h)&&n.w>0&&n.h>0)return{zoom:l=(l=(l=Math.min((a-2*t)/n.w,(s-2*t)/n.h))>this._private.maxZoom?this._private.maxZoom:l)t.maxZoom?t.maxZoom:s)t.maxZoom||!t.zoomingEnabled?a=!0:(t.zoom=l,o.push("zoom"))}if(i&&(!a||!e.cancelOnFailedZoom)&&t.panningEnabled){var c=e.pan;r.number(c.x)&&(t.pan.x=c.x,s=!1),r.number(c.y)&&(t.pan.y=c.y,s=!1),s||o.push("pan")}return o.length>0&&(o.push("viewport"),this.emit(o.join(" ")),this.notify({type:"viewport"})),this},center:function(e){var t=this.getCenterPan(e);return t&&(this._private.pan=t,this.emit("pan viewport"),this.notify({type:"viewport"})),this},getCenterPan:function(e,t){if(this._private.panningEnabled){if(r.string(e)){var n=e;e=this.mutableElements().filter(n)}else r.elementOrCollection(e)||(e=this.mutableElements());if(0!==e.length){var i=e.boundingBox(),o=this.width(),a=this.height();return{x:(o-(t=void 0===t?this._private.zoom:t)*(i.x1+i.x2))/2,y:(a-t*(i.y1+i.y2))/2}}}},reset:function(){return this._private.panningEnabled&&this._private.zoomingEnabled?(this.viewport({pan:{x:0,y:0},zoom:1}),this):this},invalidateSize:function(){this._private.sizeCache=null},size:function(){var e,t,n=this._private,r=n.container;return n.sizeCache=n.sizeCache||(r?(e=i.getComputedStyle(r),t=function(t){return parseFloat(e.getPropertyValue(t))},{width:r.clientWidth-t("padding-left")-t("padding-right"),height:r.clientHeight-t("padding-top")-t("padding-bottom")}):{width:1,height:1})},width:function(){return this.size().width},height:function(){return this.size().height},extent:function(){var e=this._private.pan,t=this._private.zoom,n=this.renderedExtent(),r={x1:(n.x1-e.x)/t,x2:(n.x2-e.x)/t,y1:(n.y1-e.y)/t,y2:(n.y2-e.y)/t};return r.w=r.x2-r.x1,r.h=r.y2-r.y1,r},renderedExtent:function(){var e=this.width(),t=this.height();return{x1:0,y1:0,x2:e,y2:t,w:e,h:t}}};a.centre=a.center,a.autolockNodes=a.autolock,a.autoungrabifyNodes=a.autoungrabify,e.exports=a},function(e,t,n){"use strict";var r=n(1),i=n(4),o=n(7),a=n(12),s=n(95),l=n(0),c=n(11),u={},d={};function f(e,t,n){var s=n,d=function(n){r.error("Can not register `"+t+"` for `"+e+"` since `"+n+"` already exists in the prototype and can not be overridden")};if("core"===e){if(a.prototype[t])return d(t);a.prototype[t]=n}else if("collection"===e){if(o.prototype[t])return d(t);o.prototype[t]=n}else if("layout"===e){for(var f=function(e){this.options=e,n.call(this,e),l.plainObject(this._private)||(this._private={}),this._private.cy=e.cy,this._private.listeners=[],this.createEmitter()},h=f.prototype=Object.create(n.prototype),g=[],m=0;m0;)m();c=n.collection();for(var v=function(e){var t=h[e],n=t.maxDegree(!1),r=t.filter((function(e){return e.degree(!1)===n}));c=c.add(r)},b=0;by.length-1;)y.push([]);y[J].push(X),Z.depth=J,Z.index=y[J].length-1}N()}var K=0;if(t.avoidOverlap)for(var ee=0;eec||0===t)&&(r+=l/u,i++)}return r/=i=Math.max(1,i),0===i&&(r=void 0),ie[e.id()]=r,r},ae=function(e,t){return oe(e)-oe(t)},se=0;se<3;se++){for(var le=0;le0&&y[0].length<=3?u/2:0),f=2*Math.PI/y[i].length*o;return 0===i&&1===y[0].length&&(d=1),{x:de+d*Math.cos(f),y:fe+d*Math.sin(f)}}return{x:de+(o+1-(a+1)/2)*s,y:(i+1)*c}}var p={x:de+(o+1-(a+1)/2)*s,y:(i+1)*c};return p},he={},ge=y.length-1;ge>=0;ge--)for(var me=y[ge],ve=0;ve1&&t.avoidOverlap){p*=1.75;var b=Math.cos(d)-Math.cos(0),y=Math.sin(d)-Math.sin(0),x=Math.sqrt(p*p/(b*b+y*y));f=Math.max(x,f)}return s.layoutPositions(this,t,(function(e,n){var r=t.startAngle+n*d*(a?1:-1),i=f*Math.cos(r),o=f*Math.sin(r);return{x:c+i,y:u+o}})),this},e.exports=s},function(e,t,n){"use strict";var r=n(1),i=n(2),o={fit:!0,padding:30,startAngle:1.5*Math.PI,sweep:void 0,clockwise:!0,equidistant:!1,minNodeSpacing:10,boundingBox:void 0,avoidOverlap:!0,nodeDimensionsIncludeLabels:!1,height:void 0,width:void 0,spacingFactor:void 0,concentric:function(e){return e.degree()},levelWidth:function(e){return e.maxDegree()/4},animate:!1,animationDuration:500,animationEasing:void 0,animateFilter:function(e,t){return!0},ready:void 0,stop:void 0,transform:function(e,t){return t}};function a(e){this.options=r.extend({},o,e)}a.prototype.run=function(){for(var e=this.options,t=e,n=void 0!==t.counterclockwise?!t.counterclockwise:t.clockwise,r=e.cy,o=t.eles.nodes().not(":parent"),a=i.makeBoundingBox(t.boundingBox?t.boundingBox:{x1:0,y1:0,w:r.width(),h:r.height()}),s=a.x1+a.w/2,l=a.y1+a.h/2,c=[],u=(t.startAngle,0),d=0;d0&&Math.abs(b[0].value-x.value)>=m&&(b=[],v.push(b)),b.push(x)}var w=u+t.minNodeSpacing;if(!t.avoidOverlap){var k=v.length>0&&v[0].length>1,A=(Math.min(a.w,a.h)/2-w)/(v.length+k?1:0);w=Math.min(w,A)}for(var E=0,S=0;S1&&t.avoidOverlap){var O=Math.cos(_)-Math.cos(0),j=Math.sin(_)-Math.sin(0),T=Math.sqrt(w*w/(O*O+j*j));E=Math.max(T,E)}$.r=E,E+=w}if(t.equidistant){for(var P=0,D=0,R=0;R0)var c=(f=r.nodeOverlap*s)*i/(b=Math.sqrt(i*i+o*o)),d=f*o/b;else{var f,p=u(e,i,o),h=u(t,-1*i,-1*o),g=h.x-p.x,m=h.y-p.y,v=g*g+m*m,b=Math.sqrt(v);c=(f=(e.nodeRepulsion+t.nodeRepulsion)/v)*g/b,d=f*m/b}e.isLocked||(e.offsetX-=c,e.offsetY-=d),t.isLocked||(t.offsetX+=c,t.offsetY+=d)}},l=function(e,t,n,r){if(n>0)var i=e.maxX-t.minX;else i=t.maxX-e.minX;if(r>0)var o=e.maxY-t.minY;else o=t.maxY-e.minY;return i>=0&&o>=0?Math.sqrt(i*i+o*o):0},u=function(e,t,n){var r=e.positionX,i=e.positionY,o=e.height||1,a=e.width||1,s=n/t,l=o/a,c={};return 0===t&&0n?(c.x=r,c.y=i+o/2,c):0t&&-1*l<=s&&s<=l?(c.x=r-a/2,c.y=i-a*n/2/t,c):0=l)?(c.x=r+o*t/2/n,c.y=i+o/2,c):0>n&&(s<=-1*l||s>=l)?(c.x=r-o*t/2/n,c.y=i-o/2,c):c},d=function(e,t){for(var n=0;n1){var h=t.gravity*d/p,g=t.gravity*f/p;u.offsetX+=h,u.offsetY+=g}}}}},p=function(e,t){var n=[],r=0,i=-1;for(n.push.apply(n,e.graphSet[0]),i+=e.graphSet[0].length;r<=i;){var o=n[r++],a=e.idToIndex[o],s=e.layoutNodes[a],l=s.children;if(0n)var i={x:n*e/r,y:n*t/r};else i={x:e,y:t};return i},m=function e(t,n){var r=t.parentId;if(null!=r){var i=n.layoutNodes[n.idToIndex[r]],o=!1;return(null==i.maxX||t.maxX+i.padRight>i.maxX)&&(i.maxX=t.maxX+i.padRight,o=!0),(null==i.minX||t.minX-i.padLefti.maxY)&&(i.maxY=t.maxY+i.padBottom,o=!0),(null==i.minY||t.minY-i.padTopg&&(f+=h+t.componentSpacing,d=0,p=0,h=0)}}}(0,i),r})).then((function(e){d.layoutNodes=e.layoutNodes,o.stop(),b()}));var b=function(){!0===e.animate||!1===e.animate?v({force:!0,next:function(){n.one("layoutstop",e.stop),n.emit({type:"layoutstop",layout:n})}}):e.eles.nodes().layoutPositions(n,e,(function(e){var t=d.layoutNodes[d.idToIndex[e.data("id")]];return{x:t.positionX,y:t.positionY}}))};return this},c.prototype.stop=function(){return this.stopped=!0,this.thread&&this.thread.stop(),this.emit("layoutstop"),this},c.prototype.destroy=function(){return this.thread&&this.thread.stop(),this};var u=function(e,t,n){for(var r=n.eles.edges(),i=n.eles.nodes(),s={isCompound:e.hasCompoundNodes(),layoutNodes:[],idToIndex:{},nodeSize:i.size(),graphSet:[],indexToGraph:[],layoutEdges:[],edgeSize:r.size(),temperature:n.initialTemp,clientWidth:e.width(),clientHeight:e.width(),boundingBox:o.makeBoundingBox(n.boundingBox?n.boundingBox:{x1:0,y1:0,w:e.width(),h:e.height()})},l=n.eles.components(),c={},u=0;u0)for(s.graphSet.push(A),u=0;ur.count?0:r.graph},f=function e(t,n,r,i){var o=i.graphSet[r];if(-1a){var h=u(),g=d();(h-1)*g>=a?u(h-1):(g-1)*h>=a&&d(g-1)}else for(;c*l=a?d(v+1):u(m+1)}var b=o.w/c,y=o.h/l;if(t.condense&&(b=0,y=0),t.avoidOverlap)for(var x=0;x=c&&(T=0,j++)},D={},R=0;R(r=i.sqdistToFiniteLine(e,t,w[k],w[k+1],w[k+2],w[k+3])))return b(n,r),!0}else if("bezier"===a.edgeType||"multibezier"===a.edgeType||"self"===a.edgeType||"compound"===a.edgeType)for(w=a.allpts,k=0;k+5(r=i.sqdistToQuadraticBezier(e,t,w[k],w[k+1],w[k+2],w[k+3],w[k+4],w[k+5])))return b(n,r),!0;v=v||o.source,x=x||o.target;var A=l.getArrowWidth(s,u),E=[{name:"source",x:a.arrowStartX,y:a.arrowStartY,angle:a.srcArrowAngle},{name:"target",x:a.arrowEndX,y:a.arrowEndY,angle:a.tgtArrowAngle},{name:"mid-source",x:a.midX,y:a.midY,angle:a.midsrcArrowAngle},{name:"mid-target",x:a.midX,y:a.midY,angle:a.midtgtArrowAngle}];for(k=0;k0&&(y(v),y(x))}function w(e,t,n){return o.getPrefixedProperty(e,t,n)}function k(n,r){var o,a=n._private,s=m;o=r?r+"-":"";var l=n.pstyle(o+"label").value;if("yes"===n.pstyle("text-events").strValue&&l){var c=a.rstyle,u=n.pstyle("text-border-width").pfValue,d=n.pstyle("text-background-padding").pfValue,f=w(c,"labelWidth",r)+u+2*s+2*d,p=w(c,"labelHeight",r)+u+2*s+2*d,h=w(c,"labelX",r),g=w(c,"labelY",r),v=w(a.rscratch,"labelAngle",r),y=h-f/2,x=h+f/2,k=g-p/2,A=g+p/2;if(v){var E=Math.cos(v),S=Math.sin(v),$=function(e,t){return{x:(e-=h)*E-(t-=g)*S+h,y:e*S+t*E+g}},C=$(y,k),_=$(y,A),O=$(x,k),j=$(x,A),T=[C.x,C.y,O.x,O.y,j.x,j.y,_.x,_.y];if(i.pointInsidePolygonPoints(e,t,T))return b(n),!0}else{var P={w:f,h:p,x1:y,x2:x,y1:k,y2:A};if(i.inBoundingBox(P,e,t))return b(n),!0}}}n&&(u=u.interactive);for(var A=u.length-1;A>=0;A--){var E=u[A];E.isNode()?y(E)||k(E):x(E)||k(E)||k(E,"source")||k(E,"target")}return d},getAllInBox:function(e,t,n,r){var o=this.getCachedZSortedEles().interactive,a=[],s=Math.min(e,n),l=Math.max(e,n),c=Math.min(t,r),u=Math.max(t,r);e=s,n=l,t=c,r=u;for(var d=i.makeBoundingBox({x1:e,y1:t,x2:n,y2:r}),f=0;fb?b+"$-$"+v:v+"$-$"+b,g&&(t="unbundled$-$"+h.id);var y=u[t];null==y&&(y=u[t]=[],d.push(t)),y.push(Bt),g&&(y.hasUnbundled=!0),m&&(y.hasBezier=!0)}else f.push(Bt)}for(var x=0;xGt.id()){var k=Ht;Ht=Gt,Gt=k}Wt=Ht.position(),Yt=Gt.position(),Xt=Ht.outerWidth(),Qt=Ht.outerHeight(),Zt=Gt.outerWidth(),Jt=Gt.outerHeight(),n=l.nodeShapes[this.getNodeShape(Ht)],o=l.nodeShapes[this.getNodeShape(Gt)],s=!1;var A={north:0,west:0,south:0,east:0,northwest:0,southwest:0,northeast:0,southeast:0},E=Wt.x,S=Wt.y,$=Xt,C=Qt,_=Yt.x,O=Yt.y,j=Zt,T=Jt,P=w.length;for(p=0;p=d||w){p={cp:b,segment:x};break}}if(p)break}b=p.cp;var k=(d-g)/(x=p.segment).length,A=x.t1-x.t0,E=u?x.t0+A*k:x.t1-A*k;E=r.bound(0,E,1),t=r.qbezierPtAt(b.p0,b.p1,b.p2,E),c=function(e,t,n,i){var o=r.bound(0,i-.001,1),a=r.bound(0,i+.001,1),s=r.qbezierPtAt(e,t,n,o),l=r.qbezierPtAt(e,t,n,a);return f(s,l)}(b.p0,b.p1,b.p2,E);break;case"straight":case"segments":case"haystack":var S,$,C,_,O=0,j=i.allpts.length;for(v=0;v+3=d));v+=2);E=(d-$)/S,E=r.bound(0,E,1),t=r.lineAt(C,_,E),c=f(C,_)}l("labelX",o,t.x),l("labelY",o,t.y),l("labelAutoAngle",o,c)}};c("source"),c("target"),this.applyLabelDimensions(e)}},applyLabelDimensions:function(e){this.applyPrefixedLabelDimensions(e),e.isEdge()&&(this.applyPrefixedLabelDimensions(e,"source"),this.applyPrefixedLabelDimensions(e,"target"))},applyPrefixedLabelDimensions:function(e,t){var n=e._private,r=this.getLabelText(e,t),i=this.calculateLabelDimensions(e,r);o.setPrefixedProperty(n.rstyle,"labelWidth",t,i.width),o.setPrefixedProperty(n.rscratch,"labelWidth",t,i.width),o.setPrefixedProperty(n.rstyle,"labelHeight",t,i.height),o.setPrefixedProperty(n.rscratch,"labelHeight",t,i.height)},getLabelText:function(e,t){var n=e._private,r=t?t+"-":"",i=e.pstyle(r+"label").strValue,a=e.pstyle("text-transform").value,s=function(e,r){return r?(o.setPrefixedProperty(n.rscratch,e,t,r),r):o.getPrefixedProperty(n.rscratch,e,t)};"none"==a||("uppercase"==a?i=i.toUpperCase():"lowercase"==a&&(i=i.toLowerCase()));var l=e.pstyle("text-wrap").value;if("wrap"===l){var c=s("labelKey");if(c&&s("labelWrapKey")===c)return s("labelWrapCachedText");for(var u=i.split("\n"),d=e.pstyle("text-max-width").pfValue,f=[],p=0;pd){for(var g=h.split(/\s+/),m="",v=0;vd);k++)x+=i[k],k===i.length-1&&(w=!0);return w||(x+="…"),x}return i},calculateLabelDimensions:function(e,t,n){var r=e._private.labelStyleKey+"$@$"+t;n&&(r+="$@$"+n);var i=this.labelDimCache||(this.labelDimCache={});if(i[r])return i[r];var o=e.pstyle("font-style").strValue,a=1*e.pstyle("font-size").pfValue+"px",s=e.pstyle("font-family").strValue,l=e.pstyle("font-weight").strValue,c=this.labelCalcDiv;c||(c=this.labelCalcDiv=document.createElement("div"),document.body.appendChild(c));var u=c.style;return u.fontFamily=s,u.fontStyle=o,u.fontSize=a,u.fontWeight=l,u.position="absolute",u.left="-9999px",u.top="-9999px",u.zIndex="-1",u.visibility="hidden",u.pointerEvents="none",u.padding="0",u.lineHeight="1","wrap"===e.pstyle("text-wrap").value?u.whiteSpace="pre":u.whiteSpace="normal",c.textContent=t,i[r]={width:Math.ceil(c.clientWidth/1),height:Math.ceil(c.clientHeight/1)},i[r]},calculateLabelAngles:function(e){var t=e._private.rscratch,n=e.isEdge(),r=e.pstyle("text-rotation"),i=r.strValue;"none"===i?t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle=0:n&&"autorotate"===i?(t.labelAngle=Math.atan(t.midDispY/t.midDispX),t.sourceLabelAngle=t.sourceLabelAutoAngle,t.targetLabelAngle=t.targetLabelAutoAngle):t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle="autorotate"===i?0:r.pfValue}};e.exports=a},function(e,t,n){"use strict";var r={getNodeShape:function(e){var t=e.pstyle("shape").value;if(e.isParent())return"rectangle"===t||"roundrectangle"===t||"cutrectangle"===t||"barrel"===t?t:"rectangle";if("polygon"===t){var n=e.pstyle("shape-polygon-points").value;return this.nodeShapes.makePolygon(n).name}return t}};e.exports=r},function(e,t,n){"use strict";var r={registerCalculationListeners:function(){var e=this.cy,t=e.collection(),n=this,r=function(e,n){var r=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t.merge(e);for(var i=0;i=e.desktopTapThreshold2}var C=n(i);b&&(e.hoverData.tapholdCancelled=!0),s=!0,t(v,["mousemove","vmousemove","tapdrag"],i,{position:{x:p[0],y:p[1]}});var _=function(){e.data.bgActivePosistion=void 0,e.hoverData.selecting||l.emit("boxstart"),m[4]=1,e.hoverData.selecting=!0,e.redrawHint("select",!0),e.redraw()};if(3===e.hoverData.which){if(b){var O={originalEvent:i,type:"cxtdrag",position:{x:p[0],y:p[1]}};x?x.emit(O):l.emit(O),e.hoverData.cxtDragged=!0,e.hoverData.cxtOver&&v===e.hoverData.cxtOver||(e.hoverData.cxtOver&&e.hoverData.cxtOver.emit({originalEvent:i,type:"cxtdragout",position:{x:p[0],y:p[1]}}),e.hoverData.cxtOver=v,v&&v.emit({originalEvent:i,type:"cxtdragover",position:{x:p[0],y:p[1]}}))}}else if(e.hoverData.dragging){if(s=!0,l.panningEnabled()&&l.userPanningEnabled()){var T;if(e.hoverData.justStartedPan){var P=e.hoverData.mdownPos;T={x:(p[0]-P[0])*c,y:(p[1]-P[1])*c},e.hoverData.justStartedPan=!1}else T={x:w[0]*c,y:w[1]*c};l.panBy(T),e.hoverData.dragged=!0}p=e.projectIntoViewport(i.clientX,i.clientY)}else if(1!=m[4]||null!=x&&!x.isEdge()){if(x&&x.isEdge()&&x.active()&&x.unactivate(),x&&x.grabbed()||v==y||(y&&t(y,["mouseout","tapdragout"],i,{position:{x:p[0],y:p[1]}}),v&&t(v,["mouseover","tapdragover"],i,{position:{x:p[0],y:p[1]}}),e.hoverData.last=v),x)if(b){if(l.boxSelectionEnabled()&&C)x&&x.grabbed()&&(f(k),x.emit("free")),_();else if(x&&x.grabbed()&&e.nodeIsDraggable(x)){var D=!e.dragData.didDrag;D&&e.redrawHint("eles",!0),e.dragData.didDrag=!0;var R=[];e.hoverData.draggingEles||u(l.collection(k),{inDragLayer:!0});for(var I=0;I0&&e.redrawHint("eles",!0),e.dragData.possibleDragElements=l=[]),t(s,["mouseup","tapend","vmouseup"],r,{position:{x:o[0],y:o[1]}}),e.dragData.didDrag||e.hoverData.dragged||e.hoverData.selecting||e.hoverData.isOverThresholdDrag||t(c,["click","tap","vclick"],r,{position:{x:o[0],y:o[1]}}),s!=c||e.dragData.didDrag||e.hoverData.selecting||null!=s&&s._private.selectable&&(e.hoverData.dragging||("additive"===i.selectionType()||u?s.selected()?s.unselect():s.select():u||(i.$(":selected").unmerge(s).unselect(),s.select())),e.redrawHint("eles",!0)),e.hoverData.selecting){var h=i.collection(e.getAllInBox(a[0],a[1],a[2],a[3]));e.redrawHint("select",!0),h.length>0&&e.redrawHint("eles",!0),i.emit("boxend");var g=function(e){return e.selectable()&&!e.selected()};"additive"===i.selectionType()||u||i.$(":selected").unmerge(h).unselect(),h.emit("box").stdFilter(g).select().emit("boxselect"),e.redraw()}if(e.hoverData.dragging&&(e.hoverData.dragging=!1,e.redrawHint("select",!0),e.redrawHint("eles",!0),e.redraw()),!a[4]){e.redrawHint("drag",!0),e.redrawHint("eles",!0);var m=c&&c.grabbed();f(l),m&&c.emit("free")}}a[4]=0,e.hoverData.down=null,e.hoverData.cxtStarted=!1,e.hoverData.draggingEles=!1,e.hoverData.selecting=!1,e.hoverData.isOverThresholdDrag=!1,e.dragData.didDrag=!1,e.hoverData.dragged=!1,e.hoverData.dragDelta=[],e.hoverData.mdownPos=null,e.hoverData.mdownGPos=null}}),!1),e.registerBinding(e.container,"wheel",(function(t){if(!e.scrollingPage){var n,r=e.cy,i=e.projectIntoViewport(t.clientX,t.clientY),o=[i[0]*r.zoom()+r.pan().x,i[1]*r.zoom()+r.pan().y];e.hoverData.draggingEles||e.hoverData.dragging||e.hoverData.cxtStarted||0!==e.selection[4]?t.preventDefault():r.panningEnabled()&&r.userPanningEnabled()&&r.zoomingEnabled()&&r.userZoomingEnabled()&&(t.preventDefault(),e.data.wheelZooming=!0,clearTimeout(e.data.wheelTimeout),e.data.wheelTimeout=setTimeout((function(){e.data.wheelZooming=!1,e.redrawHint("eles",!0),e.redraw()}),150),n=null!=t.deltaY?t.deltaY/-250:null!=t.wheelDeltaY?t.wheelDeltaY/1e3:t.wheelDelta/1e3,n*=e.wheelSensitivity,1===t.deltaMode&&(n*=33),r.zoom({level:r.zoom()*Math.pow(10,n),renderedPosition:{x:o[0],y:o[1]}}))}}),!0),e.registerBinding(window,"scroll",(function(t){e.scrollingPage=!0,clearTimeout(e.scrollingPageTimeout),e.scrollingPageTimeout=setTimeout((function(){e.scrollingPage=!1}),250)}),!0),e.registerBinding(e.container,"mouseout",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseout",position:{x:n[0],y:n[1]}})}),!1),e.registerBinding(e.container,"mouseover",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseover",position:{x:n[0],y:n[1]}})}),!1);var T,P,D,R,I=function(e,t,n,r){return Math.sqrt((n-e)*(n-e)+(r-t)*(r-t))},N=function(e,t,n,r){return(n-e)*(n-e)+(r-t)*(r-t)};if(e.registerBinding(e.container,"touchstart",T=function(n){if(j(n)){e.touchData.capture=!0,e.data.bgActivePosistion=void 0;var r=e.cy,i=e.touchData.now,o=e.touchData.earlier;if(n.touches[0]){var a=e.projectIntoViewport(n.touches[0].clientX,n.touches[0].clientY);i[0]=a[0],i[1]=a[1]}if(n.touches[1]&&(a=e.projectIntoViewport(n.touches[1].clientX,n.touches[1].clientY),i[2]=a[0],i[3]=a[1]),n.touches[2]&&(a=e.projectIntoViewport(n.touches[2].clientX,n.touches[2].clientY),i[4]=a[0],i[5]=a[1]),n.touches[1]){f(e.dragData.touchDragEles);var s=e.findContainerClientCoords();S=s[0],$=s[1],C=s[2],_=s[3],v=n.touches[0].clientX-S,b=n.touches[0].clientY-$,y=n.touches[1].clientX-S,x=n.touches[1].clientY-$,O=0<=v&&v<=C&&0<=y&&y<=C&&0<=b&&b<=_&&0<=x&&x<=_;var c=r.pan(),p=r.zoom();if(w=I(v,b,y,x),k=N(v,b,y,x),E=[((A=[(v+y)/2,(b+x)/2])[0]-c.x)/p,(A[1]-c.y)/p],k<4e4&&!n.touches[2]){var h=e.findNearestElement(i[0],i[1],!0,!0),g=e.findNearestElement(i[2],i[3],!0,!0);return h&&h.isNode()?(h.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=h):g&&g.isNode()?(g.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=g):r.emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxt=!0,e.touchData.cxtDragged=!1,e.data.bgActivePosistion=void 0,void e.redraw()}}if(n.touches[2]);else if(n.touches[1]);else if(n.touches[0]){var m=e.findNearestElements(i[0],i[1],!0,!0),T=m[0];if(null!=T&&(T.activate(),e.touchData.start=T,e.touchData.starts=m,e.nodeIsGrabbable(T))){var P=e.dragData.touchDragEles=[],D=null;e.redrawHint("eles",!0),e.redrawHint("drag",!0),T.selected()?(D=r.$((function(t){return t.selected()&&e.nodeIsGrabbable(t)})),u(D,{addToList:P})):d(T,{addToList:P}),l(T);var R=function(e){return{originalEvent:n,type:e,position:{x:i[0],y:i[1]}}};T.emit(R("grabon")),D?D.forEach((function(e){e.emit(R("grab"))})):T.emit(R("grab"))}t(T,["touchstart","tapstart","vmousedown"],n,{position:{x:i[0],y:i[1]}}),null==T&&(e.data.bgActivePosistion={x:a[0],y:a[1]},e.redrawHint("select",!0),e.redraw()),e.touchData.singleTouchMoved=!1,e.touchData.singleTouchStartTime=+new Date,clearTimeout(e.touchData.tapholdTimeout),e.touchData.tapholdTimeout=setTimeout((function(){!1!==e.touchData.singleTouchMoved||e.pinching||e.touchData.selecting||(t(e.touchData.start,["taphold"],n,{position:{x:i[0],y:i[1]}}),e.touchData.start||r.$(":selected").unselect())}),e.tapholdDuration)}if(n.touches.length>=1){for(var M=e.touchData.startPosition=[],z=0;z=e.touchTapThreshold2}if(i&&e.touchData.cxt){n.preventDefault();var D=n.touches[0].clientX-S,R=n.touches[0].clientY-$,M=n.touches[1].clientX-S,z=n.touches[1].clientY-$,L=N(D,R,M,z);if(L/k>=2.25||L>=22500){e.touchData.cxt=!1,e.data.bgActivePosistion=void 0,e.redrawHint("select",!0);var B={originalEvent:n,type:"cxttapend",position:{x:c[0],y:c[1]}};e.touchData.start?(e.touchData.start.unactivate().emit(B),e.touchData.start=null):l.emit(B)}}if(i&&e.touchData.cxt){B={originalEvent:n,type:"cxtdrag",position:{x:c[0],y:c[1]}},e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),e.touchData.start?e.touchData.start.emit(B):l.emit(B),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxtDragged=!0;var F=e.findNearestElement(c[0],c[1],!0,!0);e.touchData.cxtOver&&F===e.touchData.cxtOver||(e.touchData.cxtOver&&e.touchData.cxtOver.emit({originalEvent:n,type:"cxtdragout",position:{x:c[0],y:c[1]}}),e.touchData.cxtOver=F,F&&F.emit({originalEvent:n,type:"cxtdragover",position:{x:c[0],y:c[1]}}))}else if(i&&n.touches[2]&&l.boxSelectionEnabled())n.preventDefault(),e.data.bgActivePosistion=void 0,this.lastThreeTouch=+new Date,e.touchData.selecting||l.emit("boxstart"),e.touchData.selecting=!0,e.redrawHint("select",!0),s&&0!==s.length&&void 0!==s[0]?(s[2]=(c[0]+c[2]+c[4])/3,s[3]=(c[1]+c[3]+c[5])/3):(s[0]=(c[0]+c[2]+c[4])/3,s[1]=(c[1]+c[3]+c[5])/3,s[2]=(c[0]+c[2]+c[4])/3+1,s[3]=(c[1]+c[3]+c[5])/3+1),s[4]=1,e.touchData.selecting=!0,e.redraw();else if(i&&n.touches[1]&&l.zoomingEnabled()&&l.panningEnabled()&&l.userZoomingEnabled()&&l.userPanningEnabled()){if(n.preventDefault(),e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),ee=e.dragData.touchDragEles){e.redrawHint("drag",!0);for(var q=0;q0)return h[0]}return null},p=Object.keys(d),h=0;h0?f:r.roundRectangleIntersectLine(o,a,e,t,n,i,s)},checkPoint:function(e,t,n,i,o,a,s){var l=r.getRoundRectangleRadius(i,o),c=2*l;if(r.pointInsidePolygon(e,t,this.points,a,s,i,o-c,[0,-1],n))return!0;if(r.pointInsidePolygon(e,t,this.points,a,s,i-c,o,[0,-1],n))return!0;var u=i/2+2*n,d=o/2+2*n,f=[a-u,s-d,a-u,s,a+u,s,a+u,s-d];return!!r.pointInsidePolygonPoints(e,t,f)||!!r.checkInEllipse(e,t,c,c,a+i/2-l,s+o/2-l,n)||!!r.checkInEllipse(e,t,c,c,a-i/2+l,s+o/2-l,n)}}},registerNodeShapes:function(){var e=this.nodeShapes={},t=this;this.generateEllipse(),this.generatePolygon("triangle",r.generateUnitNgonPointsFitToSquare(3,0)),this.generatePolygon("rectangle",r.generateUnitNgonPointsFitToSquare(4,0)),e.square=e.rectangle,this.generateRoundRectangle(),this.generateCutRectangle(),this.generateBarrel(),this.generateBottomRoundrectangle(),this.generatePolygon("diamond",[0,1,1,0,0,-1,-1,0]),this.generatePolygon("pentagon",r.generateUnitNgonPointsFitToSquare(5,0)),this.generatePolygon("hexagon",r.generateUnitNgonPointsFitToSquare(6,0)),this.generatePolygon("heptagon",r.generateUnitNgonPointsFitToSquare(7,0)),this.generatePolygon("octagon",r.generateUnitNgonPointsFitToSquare(8,0));var n=new Array(20),i=r.generateUnitNgonPoints(5,0),o=r.generateUnitNgonPoints(5,Math.PI/5),a=.5*(3-Math.sqrt(5));a*=1.57;for(var s=0;s0&&t.data.lyrTxrCache.invalidateElements(n)}))}l.CANVAS_LAYERS=3,l.SELECT_BOX=0,l.DRAG=1,l.NODE=2,l.BUFFER_COUNT=3,l.TEXTURE_BUFFER=0,l.MOTIONBLUR_BUFFER_NODE=1,l.MOTIONBLUR_BUFFER_DRAG=2,l.redrawHint=function(e,t){var n=this;switch(e){case"eles":n.data.canvasNeedsRedraw[l.NODE]=t;break;case"drag":n.data.canvasNeedsRedraw[l.DRAG]=t;break;case"select":n.data.canvasNeedsRedraw[l.SELECT_BOX]=t}};var u="undefined"!=typeof Path2D;l.path2dEnabled=function(e){if(void 0===e)return this.pathsEnabled;this.pathsEnabled=!!e},l.usePaths=function(){return u&&this.pathsEnabled},[n(126),n(127),n(128),n(129),n(130),n(131),n(132),n(133),n(134),n(135)].forEach((function(e){r.extend(l,e)})),e.exports=s},function(e,t,n){"use strict";var r=n(2),i=n(1),o=n(9),a=n(19),s={dequeue:"dequeue",downscale:"downscale",highQuality:"highQuality"},l=function(e){this.renderer=e,this.onDequeues=[],this.setupDequeueing()},c=l.prototype;c.reasons=s,c.getTextureQueue=function(e){return this.eleImgCaches=this.eleImgCaches||{},this.eleImgCaches[e]=this.eleImgCaches[e]||[]},c.getRetiredTextureQueue=function(e){var t=this.eleImgCaches.retired=this.eleImgCaches.retired||{};return t[e]=t[e]||[]},c.getElementQueue=function(){return this.eleCacheQueue=this.eleCacheQueue||new o((function(e,t){return t.reqs-e.reqs}))},c.getElementIdToQueue=function(){return this.eleIdToCacheQueue=this.eleIdToCacheQueue||{}},c.getElement=function(e,t,n,i,o){var a=this,l=this.renderer,c=e._private.rscratch,u=l.cy.zoom();if(0===t.w||0===t.h||!e.visible())return null;if(null==i&&(i=Math.ceil(r.log2(u*n))),i<-4)i=-4;else if(u>=3.99||i>2)return null;var d,f=Math.pow(2,i),p=t.h*f,h=t.w*f,g=c.imgCaches=c.imgCaches||{},m=g[i];if(m)return m;if(d=p<=25?25:p<=50?50:50*Math.ceil(p/50),p>1024||h>1024||e.isEdge()||e.isParent())return null;var v=a.getTextureQueue(d),b=v[v.length-2],y=function(){return a.recycleTexture(d,h)||a.addTexture(d,h)};b||(b=v[v.length-1]),b||(b=y()),b.width-b.usedWidthi;$--)C=a.getElement(e,t,n,$,s.downscale);_()}else{var O;if(!A&&!E&&!S)for($=i-1;$>=-4;$--){var j;if(j=g[$]){O=j;break}}if(k(O))return a.queueElement(e,i),O;b.context.translate(b.usedWidth,0),b.context.scale(f,f),l.drawElement(b.context,e,t,w),b.context.scale(1/f,1/f),b.context.translate(-b.usedWidth,0)}return m=g[i]={ele:e,x:b.usedWidth,texture:b,level:i,scale:f,width:h,height:p,scaledLabelShown:w},b.usedWidth+=Math.ceil(h+8),b.eleCaches.push(m),a.checkTextureFullness(b),m},c.invalidateElement=function(e){var t=e._private.rscratch.imgCaches;if(t)for(var n=-4;n<=2;n++){var r=t[n];if(r){var o=r.texture;o.invalidatedWidth+=r.width,t[n]=null,i.removeFromArray(o.eleCaches,r),this.removeFromQueue(e),this.checkTextureUtility(o)}}},c.checkTextureUtility=function(e){e.invalidatedWidth>=.5*e.width&&this.retireTexture(e)},c.checkTextureFullness=function(e){var t=this.getTextureQueue(e.height);e.usedWidth/e.width>.8&&e.fullnessChecks>=10?i.removeFromArray(t,e):e.fullnessChecks++},c.retireTexture=function(e){var t=e.height,n=this.getTextureQueue(t);i.removeFromArray(n,e),e.retired=!0;for(var r=e.eleCaches,o=0;o=t)return a.retired=!1,a.usedWidth=0,a.invalidatedWidth=0,a.fullnessChecks=0,i.clearArray(a.eleCaches),a.context.setTransform(1,0,0,1,0,0),a.context.clearRect(0,0,a.width,a.height),i.removeFromArray(r,a),n.push(a),a}},c.queueElement=function(e,t){var n=this.getElementQueue(),r=this.getElementIdToQueue(),i=e.id(),o=r[i];if(o)o.level=Math.max(o.level,t),o.reqs++,n.updateItem(o);else{var a={ele:e,level:t,reqs:1};n.push(a),r[i]=a}},c.dequeue=function(e){for(var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=[],i=0;i<1&&t.size()>0;i++){var o=t.pop(),a=o.ele;if(null==a._private.rscratch.imgCaches[o.level]){n[a.id()]=null,r.push(o);var l=a.boundingBox();this.getElement(a,l,e,o.level,s.dequeue)}}return r},c.removeFromQueue=function(e){var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=n[e.id()];null!=r&&(r.reqs=i.MAX_INT,t.updateItem(r),t.pop(),n[e.id()]=null)},c.onDequeue=function(e){this.onDequeues.push(e)},c.offDequeue=function(e){i.removeFromArray(this.onDequeues,e)},c.setupDequeueing=a.setupDequeueing({deqRedrawThreshold:100,deqCost:.15,deqAvgCost:.1,deqNoDrawCost:.9,deqFastCost:.9,deq:function(e,t,n){return e.dequeue(t,n)},onDeqd:function(e,t){for(var n=0;n=3.99||n>2)return null;o.validateLayersElesOrdering(n,e);var l,c,u=o.layersByLevel,d=Math.pow(2,n),f=u[n]=u[n]||[];if(o.levelIsComplete(n,e))return f;!function(){var t=function(t){if(o.validateLayersElesOrdering(t,e),o.levelIsComplete(t,e))return c=u[t],!0},i=function(e){if(!c)for(var r=n+e;-4<=r&&r<=2&&!t(r);r+=e);};i(1),i(-1);for(var a=f.length-1;a>=0;a--){var s=f[a];s.invalid&&r.removeFromArray(f,s)}}();var p=function(t){var r=(t=t||{}).after;if(function(){if(!l){l=i.makeBoundingBox();for(var t=0;t16e6)return null;var a=o.makeLayer(l,n);if(null!=r){var s=f.indexOf(r)+1;f.splice(s,0,a)}else(void 0===t.insert||t.insert)&&f.unshift(a);return a};if(o.skipping&&!s)return null;for(var h=null,g=e.length/1,m=!s,v=0;v=g||!i.boundingBoxInBoundingBox(h.bb,b.boundingBox()))&&!(h=p({insert:!0,after:h})))return null;c||m?o.queueLayer(h,b):o.drawEleInLayer(h,b,n,t),h.eles.push(b),x[n]=h}}return c||(m?null:f)},c.getEleLevelForLayerLevel=function(e,t){return e},c.drawEleInLayer=function(e,t,n,r){var i=this.renderer,o=e.context,a=t.boundingBox();if(0!==a.w&&0!==a.h&&t.visible()){var s=this.eleTxrCache,l=s.reasons.highQuality;n=this.getEleLevelForLayerLevel(n,r);var c=s.getElement(t,a,null,n,l);c?(f(o,!1),o.drawImage(c.texture.canvas,c.x,0,c.width,c.height,a.x1,a.y1,a.w,a.h),f(o,!0)):i.drawElement(o,t)}},c.levelIsComplete=function(e,t){var n=this.layersByLevel[e];if(!n||0===n.length)return!1;for(var r=0,i=0;i0)return!1;if(o.invalid)return!1;r+=o.eles.length}return r===t.length},c.validateLayersElesOrdering=function(e,t){var n=this.layersByLevel[e];if(n)for(var r=0;r0){e=!0;break}}return e},c.invalidateElements=function(e){var t=this;t.lastInvalidationTime=r.performanceNow(),0!==e.length&&t.haveLayers()&&t.updateElementsInLayers(e,(function(e,n,r){t.invalidateLayer(e)}))},c.invalidateLayer=function(e){if(this.lastInvalidationTime=r.performanceNow(),!e.invalid){var t=e.level,n=e.eles,i=this.layersByLevel[t];r.removeFromArray(i,e),e.elesQueue=[],e.invalid=!0,e.replacement&&(e.replacement.invalid=!0);for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:f;e.lineWidth=h,e.lineCap="butt",i.strokeStyle(e,d[0],d[1],d[2],n),i.drawEdgePath(t,e,o.allpts,p)},m=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:f;i.drawArrowheads(e,t,n)};if(e.lineJoin="round","yes"===t.pstyle("ghost").value){var v=t.pstyle("ghost-offset-x").pfValue,b=t.pstyle("ghost-offset-y").pfValue,y=t.pstyle("ghost-opacity").value,x=f*y;e.translate(v,b),g(x),m(x),e.translate(-v,-b)}g(),m(),function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c;e.lineWidth=l,"self"!==o.edgeType||a?e.lineCap="round":e.lineCap="butt",i.strokeStyle(e,u[0],u[1],u[2],n),i.drawEdgePath(t,e,o.allpts,"solid")}(),i.drawElementText(e,t,r),n&&e.translate(s.x1,s.y1)}},drawEdgePath:function(e,t,n,r){var i=e._private.rscratch,o=t,a=void 0,s=!1,l=this.usePaths();if(l){var c=n.join("$");i.pathCacheKey&&i.pathCacheKey===c?(a=t=i.pathCache,s=!0):(a=t=new Path2D,i.pathCacheKey=c,i.pathCache=a)}if(o.setLineDash)switch(r){case"dotted":o.setLineDash([1,1]);break;case"dashed":o.setLineDash([6,3]);break;case"solid":o.setLineDash([])}if(!s&&!i.badLine)switch(t.beginPath&&t.beginPath(),t.moveTo(n[0],n[1]),i.edgeType){case"bezier":case"self":case"compound":case"multibezier":if(e.hasClass("horizontal")){var u=n[4],d=n[5],f=(n[0]+n[4])/2;t.lineTo(n[0]+10,n[1]),t.bezierCurveTo(f,n[1],f,n[5],n[4]-10,n[5]),t.lineTo(u,d)}else if(e.hasClass("vertical")){var p=n[4],h=n[5],g=(n[1]+n[5])/2;t.bezierCurveTo(n[0],g,n[4],g,n[4],n[5]-10),t.lineTo(p,h)}else for(var m=2;m+30||j>0&&O>0){var P=f-T;switch(k){case"left":P-=m;break;case"center":P-=m/2}var D=p-v-T,R=m+2*T,I=v+2*T;if(_>0){var N=e.fillStyle,M=t.pstyle("text-background-color").value;e.fillStyle="rgba("+M[0]+","+M[1]+","+M[2]+","+_*o+")","roundrectangle"==t.pstyle("text-background-shape").strValue?(s=P,l=D,c=R,u=I,d=(d=2)||5,(a=e).beginPath(),a.moveTo(s+d,l),a.lineTo(s+c-d,l),a.quadraticCurveTo(s+c,l,s+c,l+d),a.lineTo(s+c,l+u-d),a.quadraticCurveTo(s+c,l+u,s+c-d,l+u),a.lineTo(s+d,l+u),a.quadraticCurveTo(s,l+u,s,l+u-d),a.lineTo(s,l+d),a.quadraticCurveTo(s,l,s+d,l),a.closePath(),a.fill()):e.fillRect(P,D,R,I),e.fillStyle=N}if(j>0&&O>0){var z=e.strokeStyle,L=e.lineWidth,B=t.pstyle("text-border-color").value,F=t.pstyle("text-border-style").value;if(e.strokeStyle="rgba("+B[0]+","+B[1]+","+B[2]+","+O*o+")",e.lineWidth=j,e.setLineDash)switch(F){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"double":e.lineWidth=j/4,e.setLineDash([]);break;case"solid":e.setLineDash([])}if(e.strokeRect(P,D,R,I),"double"===F){var q=j/2;e.strokeRect(P+q,D+q,R-2*q,I-2*q)}e.setLineDash&&e.setLineDash([]),e.lineWidth=L,e.strokeStyle=z}}var V=2*t.pstyle("text-outline-width").pfValue;if(V>0&&(e.lineWidth=V),"wrap"===t.pstyle("text-wrap").value){var U=r.getPrefixedProperty(i,"labelWrapCachedLines",n),H=v/U.length;switch(A){case"top":p-=(U.length-1)*H;break;case"center":case"bottom":p-=(U.length-1)*H}for(var G=0;G0&&e.strokeText(U[G],f,p),e.fillText(U[G],f,p),p+=H}else V>0&&e.strokeText(h,f,p),e.fillText(h,f,p);0!==E&&(e.rotate(-E),e.translate(-$,-C))}}},e.exports=o},function(e,t,n){"use strict";var r=n(0),i={drawNode:function(e,t,n,i){var o,a,s=this,l=t._private,c=l.rscratch,u=t.position();if(r.number(u.x)&&r.number(u.y)&&t.visible()){var d=t.effectiveOpacity(),f=s.usePaths(),p=void 0,h=!1,g=t.padding();o=t.width()+2*g,a=t.height()+2*g;var m=void 0;n&&(m=n,e.translate(-m.x1,-m.y1));for(var v=t.pstyle("background-image").value,b=new Array(v.length),y=new Array(v.length),x=0,w=0;w0&&void 0!==arguments[0]?arguments[0]:C;s.fillStyle(e,$[0],$[1],$[2],t)},P=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:j;s.strokeStyle(e,_[0],_[1],_[2],t)},D=t.pstyle("shape").strValue,R=t.pstyle("shape-polygon-points").pfValue;if(f){var I=D+"$"+o+"$"+a+("polygon"===D?"$"+R.join("$"):"");e.translate(u.x,u.y),c.pathCacheKey===I?(p=c.pathCache,h=!0):(p=new Path2D,c.pathCacheKey=I,c.pathCache=p)}var N,M,z,L=function(){if(!h){var n=u;f&&(n={x:0,y:0}),s.nodeShapes[s.getNodeShape(t)].draw(p||e,n.x,n.y,o,a)}f?e.fill(p):e.fill()},B=function(){for(var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,r=l.backgrounding,i=0,o=0;o0&&void 0!==arguments[0]&&arguments[0],r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:d;s.hasPie(t)&&(s.drawPie(e,t,r),n&&(f||s.nodeShapes[s.getNodeShape(t)].draw(e,u.x,u.y,o,a)))},q=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,n=(E>0?E:-E)*t,r=E>0?0:255;0!==E&&(s.fillStyle(e,r,r,r,n),f?e.fill(p):e.fill())},V=function(){if(S>0){if(e.lineWidth=S,e.lineCap="butt",e.setLineDash)switch(O){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"solid":case"double":e.setLineDash([])}if(f?e.stroke(p):e.stroke(),"double"===O){e.lineWidth=S/3;var t=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",f?e.stroke(p):e.stroke(),e.globalCompositeOperation=t}e.setLineDash&&e.setLineDash([])}};if("yes"===t.pstyle("ghost").value){var U=t.pstyle("ghost-offset-x").pfValue,H=t.pstyle("ghost-offset-y").pfValue,G=t.pstyle("ghost-opacity").value,W=G*d;e.translate(U,H),T(G*C),L(),B(W),F(0!==E||0!==S),q(W),P(G*j),V(),e.translate(-U,-H)}T(),L(),B(),F(0!==E||0!==S),q(),P(),V(),f&&e.translate(-u.x,-u.y),s.drawElementText(e,t,i),N=t.pstyle("overlay-padding").pfValue,M=t.pstyle("overlay-opacity").value,z=t.pstyle("overlay-color").value,M>0&&(s.fillStyle(e,z[0],z[1],z[2],M),s.nodeShapes.roundrectangle.draw(e,u.x,u.y,o+2*N,a+2*N),e.fill()),n&&e.translate(m.x1,m.y1)}},hasPie:function(e){return(e=e[0])._private.hasPie},drawPie:function(e,t,n,r){t=t[0],r=r||t.position();var i=t.cy().style(),o=t.pstyle("pie-size"),a=r.x,s=r.y,l=t.width(),c=t.height(),u=Math.min(l,c)/2,d=0;this.usePaths()&&(a=0,s=0),"%"===o.units?u*=o.pfValue:void 0!==o.pfValue&&(u=o.pfValue/2);for(var f=1;f<=i.pieBackgroundN;f++){var p=t.pstyle("pie-"+f+"-background-size").value,h=t.pstyle("pie-"+f+"-background-color").value,g=t.pstyle("pie-"+f+"-background-opacity").value*n,m=p/100;m+d>1&&(m=1-d);var v=1.5*Math.PI+2*Math.PI*d,b=v+2*Math.PI*m;0===p||d>=1||d+m>1||(e.beginPath(),e.moveTo(a,s),e.arc(a,s,u,v,b),e.closePath(),this.fillStyle(e,h[0],h[1],h[2],g),e.fill(),d+=m)}}};e.exports=i},function(e,t,n){"use strict";var r={},i=n(1);r.getPixelRatio=function(){var e=this.data.contexts[0];if(null!=this.forcedPixelRatio)return this.forcedPixelRatio;var t=e.backingStorePixelRatio||e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return(window.devicePixelRatio||1)/t},r.paintCache=function(e){for(var t,n=this.paintCaches=this.paintCaches||[],r=!0,i=0;is.minMbLowQualFrames&&(s.motionBlurPxRatio=s.mbPxRBlurry)),s.clearingMotionBlur&&(s.motionBlurPxRatio=1),s.textureDrawLastFrame&&!f&&(d[s.NODE]=!0,d[s.SELECT_BOX]=!0);var y=c.style()._private.coreStyle,x=c.zoom(),w=void 0!==o?o:x,k=c.pan(),A={x:k.x,y:k.y},E={zoom:x,pan:{x:k.x,y:k.y}},S=s.prevViewport;void 0===S||E.zoom!==S.zoom||E.pan.x!==S.pan.x||E.pan.y!==S.pan.y||m&&!g||(s.motionBlurPxRatio=1),a&&(A=a),w*=l,A.x*=l,A.y*=l;var $=s.getCachedZSortedEles();function C(e,t,n,r,i){var o=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",s.fillStyle(e,255,255,255,s.motionBlurTransparency),e.fillRect(t,n,r,i),e.globalCompositeOperation=o}function _(e,r){var i,l,c,d;s.clearingMotionBlur||e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]&&e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]?(i=A,l=w,c=s.canvasWidth,d=s.canvasHeight):(i={x:k.x*h,y:k.y*h},l=x*h,c=s.canvasWidth*h,d=s.canvasHeight*h),e.setTransform(1,0,0,1,0,0),"motionBlur"===r?C(e,0,0,c,d):t||void 0!==r&&!r||e.clearRect(0,0,c,d),n||(e.translate(i.x,i.y),e.scale(l,l)),a&&e.translate(a.x,a.y),o&&e.scale(o,o)}if(f||(s.textureDrawLastFrame=!1),f){if(s.textureDrawLastFrame=!0,!s.textureCache){s.textureCache={},s.textureCache.bb=c.mutableElements().boundingBox(),s.textureCache.texture=s.data.bufferCanvases[s.TEXTURE_BUFFER];var O=s.data.bufferContexts[s.TEXTURE_BUFFER];O.setTransform(1,0,0,1,0,0),O.clearRect(0,0,s.canvasWidth*s.textureMult,s.canvasHeight*s.textureMult),s.render({forcedContext:O,drawOnlyNodeLayer:!0,forcedPxRatio:l*s.textureMult}),(E=s.textureCache.viewport={zoom:c.zoom(),pan:c.pan(),width:s.canvasWidth,height:s.canvasHeight}).mpan={x:(0-E.pan.x)/E.zoom,y:(0-E.pan.y)/E.zoom}}d[s.DRAG]=!1,d[s.NODE]=!1;var j=u.contexts[s.NODE],T=s.textureCache.texture;E=s.textureCache.viewport,s.textureCache.bb,j.setTransform(1,0,0,1,0,0),p?C(j,0,0,E.width,E.height):j.clearRect(0,0,E.width,E.height);var P=y["outside-texture-bg-color"].value,D=y["outside-texture-bg-opacity"].value;s.fillStyle(j,P[0],P[1],P[2],D),j.fillRect(0,0,E.width,E.height),x=c.zoom(),_(j,!1),j.clearRect(E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l),j.drawImage(T,E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l)}else s.textureOnViewport&&!t&&(s.textureCache=null);var R=c.extent(),I=s.pinching||s.hoverData.dragging||s.swipePanning||s.data.wheelZooming||s.hoverData.draggingEles,N=s.hideEdgesOnViewport&&I,M=[];if(M[s.NODE]=!d[s.NODE]&&p&&!s.clearedForMotionBlur[s.NODE]||s.clearingMotionBlur,M[s.NODE]&&(s.clearedForMotionBlur[s.NODE]=!0),M[s.DRAG]=!d[s.DRAG]&&p&&!s.clearedForMotionBlur[s.DRAG]||s.clearingMotionBlur,M[s.DRAG]&&(s.clearedForMotionBlur[s.DRAG]=!0),d[s.NODE]||n||r||M[s.NODE]){var z=p&&!M[s.NODE]&&1!==h;_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]:u.contexts[s.NODE]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.nondrag,l,R):s.drawLayeredElements(j,$.nondrag,l,R),s.debug&&s.drawDebugPoints(j,$.nondrag),n||p||(d[s.NODE]=!1)}if(!r&&(d[s.DRAG]||n||M[s.DRAG])&&(z=p&&!M[s.DRAG]&&1!==h,_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]:u.contexts[s.DRAG]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.drag,l,R):s.drawCachedElements(j,$.drag,l,R),s.debug&&s.drawDebugPoints(j,$.drag),n||p||(d[s.DRAG]=!1)),s.showFps||!r&&d[s.SELECT_BOX]&&!n){if(_(j=t||u.contexts[s.SELECT_BOX]),1==s.selection[4]&&(s.hoverData.selecting||s.touchData.selecting)){x=s.cy.zoom();var L=y["selection-box-border-width"].value/x;j.lineWidth=L,j.fillStyle="rgba("+y["selection-box-color"].value[0]+","+y["selection-box-color"].value[1]+","+y["selection-box-color"].value[2]+","+y["selection-box-opacity"].value+")",j.fillRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]),L>0&&(j.strokeStyle="rgba("+y["selection-box-border-color"].value[0]+","+y["selection-box-border-color"].value[1]+","+y["selection-box-border-color"].value[2]+","+y["selection-box-opacity"].value+")",j.strokeRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]))}if(u.bgActivePosistion&&!s.hoverData.selecting){x=s.cy.zoom();var B=u.bgActivePosistion;j.fillStyle="rgba("+y["active-bg-color"].value[0]+","+y["active-bg-color"].value[1]+","+y["active-bg-color"].value[2]+","+y["active-bg-opacity"].value+")",j.beginPath(),j.arc(B.x,B.y,y["active-bg-size"].pfValue/x,0,2*Math.PI),j.fill()}var F=s.lastRedrawTime;if(s.showFps&&F){F=Math.round(F);var q=Math.round(1e3/F);j.setTransform(1,0,0,1,0,0),j.fillStyle="rgba(255, 0, 0, 0.75)",j.strokeStyle="rgba(255, 0, 0, 0.75)",j.lineWidth=1,j.fillText("1 frame = "+F+" ms = "+q+" fps",0,20),j.strokeRect(0,30,250,20),j.fillRect(0,30,250*Math.min(q/60,1),20)}n||(d[s.SELECT_BOX]=!1)}if(p&&1!==h){var V=u.contexts[s.NODE],U=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_NODE],H=u.contexts[s.DRAG],G=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_DRAG],W=function(e,t,n){e.setTransform(1,0,0,1,0,0),n||!b?e.clearRect(0,0,s.canvasWidth,s.canvasHeight):C(e,0,0,s.canvasWidth,s.canvasHeight);var r=h;e.drawImage(t,0,0,s.canvasWidth*r,s.canvasHeight*r,0,0,s.canvasWidth,s.canvasHeight)};(d[s.NODE]||M[s.NODE])&&(W(V,U,M[s.NODE]),d[s.NODE]=!1),(d[s.DRAG]||M[s.DRAG])&&(W(H,G,M[s.DRAG]),d[s.DRAG]=!1)}s.prevViewport=E,s.clearingMotionBlur&&(s.clearingMotionBlur=!1,s.motionBlurCleared=!0,s.motionBlur=!0),p&&(s.motionBlurTimeout=setTimeout((function(){s.motionBlurTimeout=null,s.clearedForMotionBlur[s.NODE]=!1,s.clearedForMotionBlur[s.DRAG]=!1,s.motionBlur=!1,s.clearingMotionBlur=!f,s.mbFrames=0,d[s.NODE]=!0,d[s.DRAG]=!0,s.redraw()}),100)),t||c.emit("render")},e.exports=r},function(e,t,n){"use strict";for(var r=n(2),i={drawPolygonPath:function(e,t,n,r,i,o){var a=r/2,s=i/2;e.beginPath&&e.beginPath(),e.moveTo(t+a*o[0],n+s*o[1]);for(var l=1;l0&&a>0){p.clearRect(0,0,o,a),p.globalCompositeOperation="source-over";var h=this.getCachedZSortedEles();if(e.full)p.translate(-n.x1*c,-n.y1*c),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(n.x1*c,n.y1*c);else{var g=t.pan(),m={x:g.x*c,y:g.y*c};c*=t.zoom(),p.translate(m.x,m.y),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(-m.x,-m.y)}e.bg&&(p.globalCompositeOperation="destination-over",p.fillStyle=e.bg,p.rect(0,0,o,a),p.fill())}return f},i.png=function(e){return a(e,this.bufferCanvasImage(e),"image/png")},i.jpg=function(e){return a(e,this.bufferCanvasImage(e),"image/jpeg")},e.exports=i},function(e,t,n){"use strict";var r={nodeShapeImpl:function(e,t,n,r,i,o,a){switch(e){case"ellipse":return this.drawEllipsePath(t,n,r,i,o);case"polygon":return this.drawPolygonPath(t,n,r,i,o,a);case"roundrectangle":return this.drawRoundRectanglePath(t,n,r,i,o);case"cutrectangle":return this.drawCutRectanglePath(t,n,r,i,o);case"bottomroundrectangle":return this.drawBottomRoundRectanglePath(t,n,r,i,o);case"barrel":return this.drawBarrelPath(t,n,r,i,o)}}};e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(18),a=function e(){if(!(this instanceof e))return new e;this.length=0},s=a.prototype;s.instanceString=function(){return"stylesheet"},s.selector=function(e){return this[this.length++]={selector:e,properties:[]},this},s.css=function(e,t){var n=this.length-1;if(r.string(e))this[n].properties.push({name:e,value:t});else if(r.plainObject(e))for(var a=e,s=0;s=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(239),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(35))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,i,o,a,s,l=1,c={},u=!1,d=e.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(e);f=f&&f.setTimeout?f:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((o=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){o.port2.postMessage(e)}):d&&"onreadystatechange"in d.createElement("script")?(i=d.documentElement,r=function(e){var t=d.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,i.removeChild(t),t=null},i.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",s=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",s,!1):e.attachEvent("onmessage",s),r=function(t){e.postMessage(a+t,"*")}),f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n=t||n<0||m&&e-c>=o}function w(){var e=p();if(x(e))return k(e);s=setTimeout(w,function(e){var n=t-(e-l);return m?f(n,o-(e-c)):n}(e))}function k(e){return s=void 0,v&&r?b(e):(r=i=void 0,a)}function A(){var e=p(),n=x(e);if(r=arguments,i=this,l=e,n){if(void 0===s)return y(l);if(m)return s=setTimeout(w,t),b(l)}return void 0===s&&(s=setTimeout(w,t)),a}return t=g(t)||0,h(n)&&(u=!!n.leading,o=(m="maxWait"in n)?d(g(n.maxWait)||0,t):o,v="trailing"in n?!!n.trailing:v),A.cancel=function(){void 0!==s&&clearTimeout(s),c=0,r=l=i=s=void 0},A.flush=function(){return void 0===s?a:k(p())},A}}).call(this,n(35))},function(e,t,n){e.exports=n(243)},function(e,t,n){var r,i,o;(function(){var n,a,s,l,c,u,d,f,p,h,g,m,v,b,y;s=Math.floor,h=Math.min,a=function(e,t){return et?1:0},p=function(e,t,n,r,i){var o;if(null==n&&(n=0),null==i&&(i=a),n<0)throw new Error("lo must be non-negative");for(null==r&&(r=e.length);nn;0<=n?t++:t--)c.push(t);return c}.apply(this).reverse()).length;rg;0<=g?++u:--u)m.push(c(e,n));return m},b=function(e,t,n,r){var i,o,s;for(null==r&&(r=a),i=e[n];n>t&&r(i,o=e[s=n-1>>1])<0;)e[n]=o,n=s;return e[n]=i},y=function(e,t,n){var r,i,o,s,l;for(null==n&&(n=a),i=e.length,l=t,o=e[t],r=2*t+1;r'+e.content+"":s+=">"+e.content+"";var l=t(s);return l.data("selector",e.selector),l.data("on-click-function",e.onClickFunction),l.data("show",void 0===e.show||e.show),l}function y(){var e;l("active")&&(e=s.children(),t(e).each((function(){x(t(this))})),i.off("tapstart",n),s.remove(),c(s=void 0,void 0),c("active",!1),c("anyVisibleChild",!1))}function x(e){var n="string"==typeof e?t("#"+e):e,r=n.data("cy-context-menus-cxtfcn"),o=n.data("selector"),a=n.data("call-on-click-function"),s=n.data("cy-context-menus-cxtcorefcn");r&&i.off("cxttap",o,r),s&&i.off("cxttap",s),a&&n.off("click",a),n.remove()}"get"!==e&&(c("options",a=function(e,t){var n={};for(var r in e)n[r]=e[r];for(var r in t)n[r]=t[r];return n}(r,e)),l("active")&&y(),c("active",!0),o=u(a.contextMenuClasses),(s=t("
")).addClass("cy-context-menus-cxt-menu"),c("cxtMenu",s),t("body").append(s),s=s,g(a.menuItems),i.on("tapstart",n=function(){f(s),c("cxtMenuPosition",void 0),c("currentCyEvent",void 0)}),t(".cy-context-menus-cxt-menu").contextmenu((function(){return!1})));return function(e){return{isActive:function(){return l("active")},appendMenuItem:function(t){return m(t),e},appendMenuItems:function(t){return g(t),e},removeMenuItem:function(t){return x(t),e},setTrailingDivider:function(n,r){return function(e,n){var r=t("#"+e);n?r.addClass("cy-context-menus-divider"):r.removeClass("cy-context-menus-divider")}(n,r),e},insertBeforeMenuItem:function(t,n){return v(t,n),e},moveBeforeOtherMenuItem:function(n,r){return function(e,n){if(e!==n){var r=t("#"+e).detach(),i=t("#"+n);r.insertBefore(i)}}(n,r),e},disableMenuItem:function(n){return t("#"+n).attr("disabled",!0),e},enableMenuItem:function(n){return t("#"+n).attr("disabled",!1),e},hideMenuItem:function(n){return t("#"+n).data("show",!1),f(t("#"+n)),e},showMenuItem:function(n){return t("#"+n).data("show",!0),d(t("#"+n)),e},destroy:function(){return y(),e}}}(this)}))}};e.exports&&(e.exports=o),void 0===(r=function(){return o}.call(t,n,t,e))||(e.exports=r),"undefined"!=typeof cytoscape&&i&&o(cytoscape,i)}()},function(e,t,n){var r;r=function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){var r=n(1),i=function(e){e&&e("layout","dagre",r)};"undefined"!=typeof cytoscape&&i(cytoscape),e.exports=i},function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=n(2),o=n(3),a=n(4);function s(e){this.options=o({},i,e)}s.prototype.run=function(){var e=this.options,t=e.cy,n=e.eles,i=function(e,t){return"function"==typeof t?t.apply(e,[e]):t},o=e.boundingBox||{x1:0,y1:0,w:t.width(),h:t.height()};void 0===o.x2&&(o.x2=o.x1+o.w),void 0===o.w&&(o.w=o.x2-o.x1),void 0===o.y2&&(o.y2=o.y1+o.h),void 0===o.h&&(o.h=o.y2-o.y1);var s=new a.graphlib.Graph({multigraph:!0,compound:!0}),l={},c=function(e,t){null!=t&&(l[e]=t)};c("nodesep",e.nodeSep),c("edgesep",e.edgeSep),c("ranksep",e.rankSep),c("rankdir",e.rankDir),c("ranker",e.ranker),s.setGraph(l),s.setDefaultEdgeLabel((function(){return{}})),s.setDefaultNodeLabel((function(){return{}}));for(var u=n.nodes(),d=0;d1?t-1:0),r=1;r-1}},function(e,t,n){var r=n(75);e.exports=function(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}},function(e,t,n){var r=n(74);e.exports=function(){this.__data__=new r,this.size=0}},function(e,t){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},function(e,t){e.exports=function(e){return this.__data__.get(e)}},function(e,t){e.exports=function(e){return this.__data__.has(e)}},function(e,t,n){var r=n(74),i=n(117),o=n(118);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(e,t),this.size=n.size,this}},function(e,t,n){var r=n(64),i=n(262),o=n(23),a=n(151),s=/^\[object .+?Constructor\]$/,l=Function.prototype,c=Object.prototype,u=l.toString,d=c.hasOwnProperty,f=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!o(e)||i(e))&&(r(e)?f:s).test(a(e))}},function(e,t,n){var r=n(58),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i}},function(e,t){var n=Object.prototype.toString;e.exports=function(e){return n.call(e)}},function(e,t,n){var r,i=n(263),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";e.exports=function(e){return!!o&&o in e}},function(e,t,n){var r=n(29)["__core-js_shared__"];e.exports=r},function(e,t){e.exports=function(e,t){return null==e?void 0:e[t]}},function(e,t,n){var r=n(266),i=n(74),o=n(117);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(e,t,n){var r=n(267),i=n(268),o=n(269),a=n(270),s=n(271);function l(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}},function(e,t,n){var r=n(173),i=n(340),o=n(344),a=n(174),s=n(345),l=n(129);e.exports=function(e,t,n){var c=-1,u=i,d=e.length,f=!0,p=[],h=p;if(n)f=!1,u=o;else if(d>=200){var g=t?null:s(e);if(g)return l(g);f=!1,u=a,h=new r}else h=t?[]:p;e:for(;++c-1}},function(e,t,n){var r=n(188),i=n(342),o=n(343);e.exports=function(e,t,n){return t==t?o(e,t,n):r(e,i,n)}},function(e,t){e.exports=function(e){return e!=e}},function(e,t){e.exports=function(e,t,n){for(var r=n-1,i=e.length;++r1||1===t.length&&e.hasEdge(t[0],t[0])}))}},function(e,t,n){var r=n(22);e.exports=function(e,t,n){return function(e,t,n){var r={},i=e.nodes();return i.forEach((function(e){r[e]={},r[e][e]={distance:0},i.forEach((function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})})),n(e).forEach((function(n){var i=n.v===e?n.w:n.v,o=t(n);r[e][i]={distance:o,predecessor:e}}))})),i.forEach((function(e){var t=r[e];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[e],i=t[n],a=o[n],s=r.distance+i.distance;s0;){if(n=l.removeMin(),r.has(s,n))a.setEdge(n,s[n]);else{if(u)throw new Error("Input graph is not connected: "+e);u=!0}e.nodeEdges(n).forEach(c)}return a}},function(e,t,n){"use strict";var r=n(11),i=n(399),o=n(402),a=n(403),s=n(20).normalizeRanks,l=n(405),c=n(20).removeEmptyRanks,u=n(406),d=n(407),f=n(408),p=n(409),h=n(418),g=n(20),m=n(28).Graph;e.exports=function(e,t){var n=t&&t.debugTiming?g.time:g.notime;n("layout",(function(){var t=n(" buildLayoutGraph",(function(){return function(e){var t=new m({multigraph:!0,compound:!0}),n=$(e.graph());return t.setGraph(r.merge({},b,S(n,v),r.pick(n,y))),r.forEach(e.nodes(),(function(n){var i=$(e.node(n));t.setNode(n,r.defaults(S(i,x),w)),t.setParent(n,e.parent(n))})),r.forEach(e.edges(),(function(n){var i=$(e.edge(n));t.setEdge(n,r.merge({},A,S(i,k),r.pick(i,E)))})),t}(e)}));n(" runLayout",(function(){!function(e,t){t(" makeSpaceForEdgeLabels",(function(){!function(e){var t=e.graph();t.ranksep/=2,r.forEach(e.edges(),(function(n){var r=e.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===t.rankdir||"BT"===t.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(e)})),t(" removeSelfEdges",(function(){!function(e){r.forEach(e.edges(),(function(t){if(t.v===t.w){var n=e.node(t.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:t,label:e.edge(t)}),e.removeEdge(t)}}))}(e)})),t(" acyclic",(function(){i.run(e)})),t(" nestingGraph.run",(function(){u.run(e)})),t(" rank",(function(){a(g.asNonCompoundGraph(e))})),t(" injectEdgeLabelProxies",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(n.width&&n.height){var r=e.node(t.v),i={rank:(e.node(t.w).rank-r.rank)/2+r.rank,e:t};g.addDummyNode(e,"edge-proxy",i,"_ep")}}))}(e)})),t(" removeEmptyRanks",(function(){c(e)})),t(" nestingGraph.cleanup",(function(){u.cleanup(e)})),t(" normalizeRanks",(function(){s(e)})),t(" assignRankMinMax",(function(){!function(e){var t=0;r.forEach(e.nodes(),(function(n){var i=e.node(n);i.borderTop&&(i.minRank=e.node(i.borderTop).rank,i.maxRank=e.node(i.borderBottom).rank,t=r.max(t,i.maxRank))})),e.graph().maxRank=t}(e)})),t(" removeEdgeLabelProxies",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);"edge-proxy"===n.dummy&&(e.edge(n.e).labelRank=n.rank,e.removeNode(t))}))}(e)})),t(" normalize.run",(function(){o.run(e)})),t(" parentDummyChains",(function(){l(e)})),t(" addBorderSegments",(function(){d(e)})),t(" order",(function(){p(e)})),t(" insertSelfEdges",(function(){!function(e){var t=g.buildLayerMatrix(e);r.forEach(t,(function(t){var n=0;r.forEach(t,(function(t,i){var o=e.node(t);o.order=i+n,r.forEach(o.selfEdges,(function(t){g.addDummyNode(e,"selfedge",{width:t.label.width,height:t.label.height,rank:o.rank,order:i+ ++n,e:t.e,label:t.label},"_se")})),delete o.selfEdges}))}))}(e)})),t(" adjustCoordinateSystem",(function(){f.adjust(e)})),t(" position",(function(){h(e)})),t(" positionSelfEdges",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);if("selfedge"===n.dummy){var r=e.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,s=r.height/2;e.setEdge(n.e,n.label),e.removeNode(t),n.label.points=[{x:i+2*a/3,y:o-s},{x:i+5*a/6,y:o-s},{x:i+a,y:o},{x:i+5*a/6,y:o+s},{x:i+2*a/3,y:o+s}],n.label.x=n.x,n.label.y=n.y}}))}(e)})),t(" removeBorderNodes",(function(){!function(e){r.forEach(e.nodes(),(function(t){if(e.children(t).length){var n=e.node(t),i=e.node(n.borderTop),o=e.node(n.borderBottom),a=e.node(r.last(n.borderLeft)),s=e.node(r.last(n.borderRight));n.width=Math.abs(s.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(e.nodes(),(function(t){"border"===e.node(t).dummy&&e.removeNode(t)}))}(e)})),t(" normalize.undo",(function(){o.undo(e)})),t(" fixupEdgeLabelCoords",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(e)})),t(" undoCoordinateSystem",(function(){f.undo(e)})),t(" translateGraph",(function(){!function(e){var t=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=e.graph(),s=a.marginx||0,l=a.marginy||0;function c(e){var r=e.x,a=e.y,s=e.width,l=e.height;t=Math.min(t,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,a-l/2),o=Math.max(o,a+l/2)}r.forEach(e.nodes(),(function(t){c(e.node(t))})),r.forEach(e.edges(),(function(t){var n=e.edge(t);r.has(n,"x")&&c(n)})),t-=s,i-=l,r.forEach(e.nodes(),(function(n){var r=e.node(n);r.x-=t,r.y-=i})),r.forEach(e.edges(),(function(n){var o=e.edge(n);r.forEach(o.points,(function(e){e.x-=t,e.y-=i})),r.has(o,"x")&&(o.x-=t),r.has(o,"y")&&(o.y-=i)})),a.width=n-t+s,a.height=o-i+l}(e)})),t(" assignNodeIntersects",(function(){!function(e){r.forEach(e.edges(),(function(t){var n,r,i=e.edge(t),o=e.node(t.v),a=e.node(t.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(e)})),t(" reversePoints",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);n.reversed&&n.points.reverse()}))}(e)})),t(" acyclic.undo",(function(){i.undo(e)}))}(t,n)})),n(" updateInputGraph",(function(){!function(e,t){r.forEach(e.nodes(),(function(n){var r=e.node(n),i=t.node(n);r&&(r.x=i.x,r.y=i.y,t.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(e.edges(),(function(n){var i=e.edge(n),o=t.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),e.graph().width=t.graph().width,e.graph().height=t.graph().height}(e,t)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],b={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},y=["acyclicer","ranker","rankdir","align"],x=["width","height"],w={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function S(e,t){return r.mapValues(r.pick(e,t),Number)}function $(e){var t={};return r.forEach(e,(function(e,n){t[n.toLowerCase()]=e})),t}},function(e,t,n){var r=n(149);e.exports=function(e){return r(e,5)}},function(e,t,n){var r=n(89),i=n(57),o=n(90),a=n(48),s=Object.prototype,l=s.hasOwnProperty,c=r((function(e,t){e=Object(e);var n=-1,r=t.length,c=r>2?t[2]:void 0;for(c&&o(t[0],t[1],c)&&(r=1);++n-1?s[l?t[c]:c]:void 0}}},function(e,t,n){var r=n(188),i=n(37),o=n(365),a=Math.max;e.exports=function(e,t,n){var s=null==e?0:e.length;if(!s)return-1;var l=null==n?0:o(n);return l<0&&(l=a(s+l,0)),r(e,i(t,3),l)}},function(e,t,n){var r=n(196);e.exports=function(e){var t=r(e),n=t%1;return t==t?n?t-n:t:0}},function(e,t,n){var r=n(367),i=n(23),o=n(61),a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,l=/^0o[0-7]+$/i,c=parseInt;e.exports=function(e){if("number"==typeof e)return e;if(o(e))return NaN;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=s.test(e);return n||l.test(e)?c(e.slice(2),n?2:8):a.test(e)?NaN:+e}},function(e,t,n){var r=n(368),i=/^\s+/;e.exports=function(e){return e?e.slice(0,r(e)+1).replace(i,""):e}},function(e,t){var n=/\s/;e.exports=function(e){for(var t=e.length;t--&&n.test(e.charAt(t)););return t}},function(e,t,n){var r=n(128),i=n(169),o=n(48);e.exports=function(e,t){return null==e?e:r(e,i(t),o)}},function(e,t){e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},function(e,t,n){var r=n(79),i=n(127),o=n(37);e.exports=function(e,t){var n={};return t=o(t,3),i(e,(function(e,i,o){r(n,i,t(e,i,o))})),n}},function(e,t,n){var r=n(132),i=n(373),o=n(49);e.exports=function(e){return e&&e.length?r(e,o,i):void 0}},function(e,t){e.exports=function(e,t){return e>t}},function(e,t,n){var r=n(375),i=n(379)((function(e,t,n){r(e,t,n)}));e.exports=i},function(e,t,n){var r=n(73),i=n(198),o=n(128),a=n(376),s=n(23),l=n(48),c=n(199);e.exports=function e(t,n,u,d,f){t!==n&&o(n,(function(o,l){if(f||(f=new r),s(o))a(t,n,l,u,e,d,f);else{var p=d?d(c(t,l),o,l+"",t,n,f):void 0;void 0===p&&(p=o),i(t,l,p)}}),l)}},function(e,t,n){var r=n(198),i=n(155),o=n(164),a=n(156),s=n(165),l=n(66),c=n(13),u=n(189),d=n(59),f=n(64),p=n(23),h=n(377),g=n(67),m=n(199),v=n(378);e.exports=function(e,t,n,b,y,x,w){var k=m(e,n),A=m(t,n),E=w.get(A);if(E)r(e,n,E);else{var S=x?x(k,A,n+"",e,t,w):void 0,$=void 0===S;if($){var C=c(A),_=!C&&d(A),O=!C&&!_&&g(A);S=A,C||_||O?c(k)?S=k:u(k)?S=a(k):_?($=!1,S=i(A,!0)):O?($=!1,S=o(A,!0)):S=[]:h(A)||l(A)?(S=k,l(k)?S=v(k):p(k)&&!f(k)||(S=s(A))):$=!1}$&&(w.set(A,S),y(S,A,b,x,w),w.delete(A)),r(e,n,S)}}},function(e,t,n){var r=n(47),i=n(84),o=n(32),a=Function.prototype,s=Object.prototype,l=a.toString,c=s.hasOwnProperty,u=l.call(Object);e.exports=function(e){if(!o(e)||"[object Object]"!=r(e))return!1;var t=i(e);if(null===t)return!0;var n=c.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==u}},function(e,t,n){var r=n(65),i=n(48);e.exports=function(e){return r(e,i(e))}},function(e,t,n){var r=n(89),i=n(90);e.exports=function(e){return r((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&i(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r1&&a(e,t[0],t[1])?t=[]:n>2&&a(t[0],t[1],t[2])&&(t=[t[0]]),i(e,r(t,1),[])}));e.exports=s},function(e,t,n){var r=n(88),i=n(86),o=n(37),a=n(184),s=n(393),l=n(82),c=n(394),u=n(49),d=n(13);e.exports=function(e,t,n){t=t.length?r(t,(function(e){return d(e)?function(t){return i(t,1===e.length?e[0]:e)}:e})):[u];var f=-1;t=r(t,l(o));var p=a(e,(function(e,n,i){return{criteria:r(t,(function(t){return t(e)})),index:++f,value:e}}));return s(p,(function(e,t){return c(e,t,n)}))}},function(e,t){e.exports=function(e,t){var n=e.length;for(e.sort(t);n--;)e[n]=e[n].value;return e}},function(e,t,n){var r=n(395);e.exports=function(e,t,n){for(var i=-1,o=e.criteria,a=t.criteria,s=o.length,l=n.length;++i=l?c:c*("desc"==n[i]?-1:1)}return e.index-t.index}},function(e,t,n){var r=n(61);e.exports=function(e,t){if(e!==t){var n=void 0!==e,i=null===e,o=e==e,a=r(e),s=void 0!==t,l=null===t,c=t==t,u=r(t);if(!l&&!u&&!a&&e>t||a&&s&&c&&!l&&!u||i&&s&&c||!n&&c||!o)return 1;if(!i&&!a&&!u&&e0;--l)if(r=t[l].dequeue()){i=i.concat(s(e,t,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(t){return e.outEdges(t.v,t.w)})),!0)};var a=r.constant(1);function s(e,t,n,i,o){var a=o?[]:void 0;return r.forEach(e.inEdges(i.v),(function(r){var i=e.edge(r),s=e.node(r.v);o&&a.push({v:r.v,w:r.w}),s.out-=i,l(t,n,s)})),r.forEach(e.outEdges(i.v),(function(r){var i=e.edge(r),o=r.w,a=e.node(o);a.in-=i,l(t,n,a)})),e.removeNode(i.v),a}function l(e,t,n){n.out?n.in?e[n.out-n.in+t].enqueue(n):e[e.length-1].enqueue(n):e[0].enqueue(n)}},function(e,t){function n(){var e={};e._next=e._prev=e,this._sentinel=e}function r(e){e._prev._next=e._next,e._next._prev=e._prev,delete e._next,delete e._prev}function i(e,t){if("_next"!==e&&"_prev"!==e)return t}e.exports=n,n.prototype.dequeue=function(){var e=this._sentinel,t=e._prev;if(t!==e)return r(t),t},n.prototype.enqueue=function(e){var t=this._sentinel;e._prev&&e._next&&r(e),e._next=t._next,t._next._prev=e,t._next=e,e._prev=t},n.prototype.toString=function(){for(var e=[],t=this._sentinel,n=t._prev;n!==t;)e.push(JSON.stringify(n,i)),n=n._prev;return"["+e.join(", ")+"]"}},function(e,t,n){"use strict";var r=n(11),i=n(20);e.exports={run:function(e){e.graph().dummyChains=[],r.forEach(e.edges(),(function(t){!function(e,t){var n,r,o,a=t.v,s=e.node(a).rank,l=t.w,c=e.node(l).rank,u=t.name,d=e.edge(t),f=d.labelRank;if(c===s+1)return;for(e.removeEdge(t),o=0,++s;sl.lim&&(c=l,u=!0);var d=r.filter(t.edges(),(function(t){return u===b(e,e.node(t.v),c)&&u!==b(e,e.node(t.w),c)}));return r.minBy(d,(function(e){return o(t,e)}))}function v(e,t,n,i){var o=n.v,a=n.w;e.removeEdge(o,a),e.setEdge(i.v,i.w,{}),p(e),d(e,t),function(e,t){var n=r.find(e.nodes(),(function(e){return!t.node(e).parent})),i=s(e,n);i=i.slice(1),r.forEach(i,(function(n){var r=e.node(n).parent,i=t.edge(n,r),o=!1;i||(i=t.edge(r,n),o=!0),t.node(n).rank=t.node(r).rank+(o?i.minlen:-i.minlen)}))}(e,t)}function b(e,t,n){return n.low<=t.lim&&t.lim<=n.lim}e.exports=u,u.initLowLimValues=p,u.initCutValues=d,u.calcCutValue=f,u.leaveEdge=g,u.enterEdge=m,u.exchangeEdges=v},function(e,t,n){var r=n(11);e.exports=function(e){var t=function(e){var t={},n=0;function i(o){var a=n;r.forEach(e.children(o),i),t[o]={low:a,lim:n++}}return r.forEach(e.children(),i),t}(e);r.forEach(e.graph().dummyChains,(function(n){for(var r=e.node(n),i=r.edgeObj,o=function(e,t,n,r){var i,o,a=[],s=[],l=Math.min(t[n].low,t[r].low),c=Math.max(t[n].lim,t[r].lim);i=n;do{i=e.parent(i),a.push(i)}while(i&&(t[i].low>l||c>t[i].lim));o=i,i=r;for(;(i=e.parent(i))!==o;)s.push(i);return{path:a.concat(s.reverse()),lca:o}}(e,t,i.v,i.w),a=o.path,s=o.lca,l=0,c=a[l],u=!0;n!==i.w;){if(r=e.node(n),u){for(;(c=a[l])!==s&&e.node(c).maxRank=2),s=u.buildLayerMatrix(e);var m=o(e,s);m0;)t%2&&(n+=l[t+1]),l[t=t-1>>1]+=e.weight;c+=e.weight*n}))),c}e.exports=function(e,t){for(var n=0,r=1;r=e.barycenter)&&function(e,t){var n=0,r=0;e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.vs=t.vs.concat(e.vs),e.barycenter=n/r,e.weight=r,e.i=Math.min(t.i,e.i),t.merged=!0}(e,t)}}function i(t){return function(n){n.in.push(t),0==--n.indegree&&e.push(n)}}for(;e.length;){var o=e.pop();t.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(t,(function(e){return!e.merged})),(function(e){return r.pick(e,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(e){return!e.indegree})))}},function(e,t,n){var r=n(11),i=n(20);function o(e,t,n){for(var i;t.length&&(i=r.last(t)).i<=n;)t.pop(),e.push(i.vs),n++;return n}e.exports=function(e,t){var n=i.partition(e,(function(e){return r.has(e,"barycenter")})),a=n.lhs,s=r.sortBy(n.rhs,(function(e){return-e.i})),l=[],c=0,u=0,d=0;a.sort((f=!!t,function(e,t){return e.barycentert.barycenter?1:f?t.i-e.i:e.i-t.i})),d=o(l,s,d),r.forEach(a,(function(e){d+=e.vs.length,l.push(e.vs),c+=e.barycenter*e.weight,u+=e.weight,d=o(l,s,d)}));var f;var p={vs:r.flatten(l,!0)};u&&(p.barycenter=c/u,p.weight=u);return p}},function(e,t,n){var r=n(11),i=n(28).Graph;e.exports=function(e,t,n){var o=function(e){var t;for(;e.hasNode(t=r.uniqueId("_root")););return t}(e),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(t){return e.node(t)}));return r.forEach(e.nodes(),(function(i){var s=e.node(i),l=e.parent(i);(s.rank===t||s.minRank<=t&&t<=s.maxRank)&&(a.setNode(i),a.setParent(i,l||o),r.forEach(e[n](i),(function(t){var n=t.v===i?t.w:t.v,o=a.edge(n,i),s=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:e.edge(t).weight+s})})),r.has(s,"minRank")&&a.setNode(i,{borderLeft:s.borderLeft[t],borderRight:s.borderRight[t]}))})),a}},function(e,t,n){var r=n(11);e.exports=function(e,t,n){var i,o={};r.forEach(n,(function(n){for(var r,a,s=e.parent(n);s;){if((r=e.parent(s))?(a=o[r],o[r]=s):(a=i,i=s),a&&a!==s)return void t.setEdge(a,s);s=r}}))}},function(e,t,n){"use strict";var r=n(11),i=n(20),o=n(419).positionX;e.exports=function(e){(function(e){var t=i.buildLayerMatrix(e),n=e.graph().ranksep,o=0;r.forEach(t,(function(t){var i=r.max(r.map(t,(function(t){return e.node(t).height})));r.forEach(t,(function(t){e.node(t).y=o+i/2})),o+=i+n}))})(e=i.asNonCompoundGraph(e)),r.forEach(o(e),(function(t,n){e.node(n).x=t}))}},function(e,t,n){"use strict";var r=n(11),i=n(28).Graph,o=n(20);function a(e,t){var n={};return r.reduce(t,(function(t,i){var o=0,a=0,s=t.length,c=r.last(i);return r.forEach(i,(function(t,u){var d=function(e,t){if(e.node(t).dummy)return r.find(e.predecessors(t),(function(t){return e.node(t).dummy}))}(e,t),f=d?e.node(d).order:s;(d||t===c)&&(r.forEach(i.slice(a,u+1),(function(t){r.forEach(e.predecessors(t),(function(r){var i=e.node(r),a=i.order;!(as)&&l(n,t,c)}))}))}return r.reduce(t,(function(t,n){var o,a=-1,s=0;return r.forEach(n,(function(r,l){if("border"===e.node(r).dummy){var c=e.predecessors(r);c.length&&(o=e.node(c[0]).order,i(n,s,l,a,o),s=l,a=o)}i(n,s,n.length,o,t.length)})),n})),n}function l(e,t,n){if(t>n){var r=t;t=n,n=r}var i=e[t];i||(e[t]=i={}),i[n]=!0}function c(e,t,n){if(t>n){var i=t;t=n,n=i}return r.has(e[t],n)}function u(e,t,n,i){var o={},a={},s={};return r.forEach(t,(function(e){r.forEach(e,(function(e,t){o[e]=e,a[e]=e,s[e]=t}))})),r.forEach(t,(function(e){var t=-1;r.forEach(e,(function(e){var l=i(e);if(l.length)for(var u=((l=r.sortBy(l,(function(e){return s[e]}))).length-1)/2,d=Math.floor(u),f=Math.ceil(u);d<=f;++d){var p=l[d];a[e]===e&&t\n.menu ul ul {\n margin-left: 12px;\n}\n\n\n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(425),i=n(21);n(426),angular.module("dbt").directive("modelTreeLine",["$state",function(e){return{scope:{item:"=",depth:"<",resourceType:"@"},replace:!0,templateUrl:r,link:function(t,n,r,o){t.depth||(t.depth=0);var a=t.item.name;if(a){var s=i.last(a,15).join(""),l=i.initial(a,s.length).join("");t.name={name:a,start:l,end:s},t.name_start=l,t.name_end=s,t.onFolderClick=function(n){if(n.active=!n.active,"source"==t.resourceType){var r=n.name;e.go("dbt.source_list",{source:r})}else 0===t.depth&&"database"!==n.type&&e.go("dbt.project_overview",{project_name:n.name})},t.activate=function(n){t.$emit("clearSearch"),n.active=!0;var r="dbt."+n.node.resource_type;e.go(r,{unique_id:n.unique_id})},t.getIcon=function(e,t){return"#"+{header:{on:"icn-down",off:"icn-right"},database:{on:"icn-db-on",off:"icn-db"},schema:{on:"icn-tree-on",off:"icn-tree"},table:{on:"icn-doc-on",off:"icn-doc"},folder:{on:"icn-dir-on",off:"icn-dir"},file:{on:"icn-doc-on",off:"icn-doc"}}[e][t]},t.getClass=function(e){return{active:e.active,"menu-tree":"header"==e.type||"schema"==e.type||"folder"==e.type,"menu-main":"header"==e.type,"menu-node":"file"==e.type||"table"==e.type}}}}}}])},function(e,t){var n="/components/model_tree/model_tree_line.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
  • \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n
      \n \n
    \n
  • \n')}]),e.exports=n},function(e,t,n){var r=n(427);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.unselectable{\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(429);n(31);n(206),r.module("dbt").directive("docsSearch",["$sce","project",function(e,t){return{scope:{query:"=",results:"=",onSelect:"&"},replace:!0,templateUrl:i,link:function(n){n.max_results=20,n.show_all=!1,n.max_results_columns=3,n.limit_columns={},n.checkboxStatus={show_names:!1,show_descriptions:!1,show_columns:!1,show_code:!1,show_tags:!1},n.limit_search=function(e,t,r){return t0&&null!=n.query&&n.query.trim().length>0){let t=e.replace(/\s+/g," "),o=r(i(n.query)[0]),a=t.search(new RegExp(o)),s=a-75<0?0:a-75,l=a+75>t.length?t.length:a+75;return"..."+t.substring(s,l)+"..."}return e},n.highlight=function(t){if(!n.query||!t)return e.trustAsHtml(t);let o="("+i(n.query).map(e=>r(e)).join(")|(")+")";return e.trustAsHtml(t.replace(new RegExp(o,"gi"),'$&'))},n.$watch("query",(function(e,t){0==e.length&&(n.show_all=!1,n.limit_columns={})})),n.columnFilter=function(e){var t=[];let r=i(n.query);for(var o in e)r.every(e=>-1!=o.toLowerCase().indexOf(e))&&t.push(o);return t},n.limitColumns=function(e){return void 0!==n.limit_columns[e]?n.limit_columns[e]:3}}}}])},function(e,t){var n="/components/search/search.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n {{ query }}\n {{ results.length }} search results\n

    \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n

    \n \n {{result.model.resource_type}}\n

    \n

    \n
    \n
    \n
    \n \n columns:\n \n \n \n Show {{ columnFilter(result.model.columns).length - max_results_columns }} more\n
    \n
    \n \n \n \n
    \n
    \n \n tags:\n \n \n \n
    \n
    \n Show {{ results.length - max_results }} more\n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(431);n(432);const i=n(21);angular.module("dbt").directive("tableDetails",["$sce","$filter",function(e,t){return{scope:{model:"=",extras:"=",exclude:"<"},templateUrl:r,link:function(e){function n(e,t){if(0==e)return"0 bytes";if(e<1&&(e*=1e6),isNaN(parseFloat(e))||!isFinite(e))return"-";void 0===t&&(t=0);var n=Math.floor(Math.log(e)/Math.log(1024));return(e/Math.pow(1024,Math.floor(n))).toFixed(t)+" "+["bytes","KB","MB","GB","TB","PB"][n]}function r(e,n){return void 0===n&&(n=2),t("number")(100*e,n)+"%"}function o(e,n){return void 0===n&&(n=0),t("number")(e,n)}e.details=[],e.extended=[],e.exclude=e.exclude||[],e.meta=null,e._show_expanded=!1,e.show_expanded=function(t){return void 0!==t&&(e._show_expanded=t),e._show_expanded},e.hasData=function(e){return!(!e||i.isEmpty(e))&&(1!=e.length||0!=e[0].include)},e.$watch("model",(function(t,a){i.property(["metadata","type"])(t);var s,l,c,u=t.hasOwnProperty("sources")&&null!=t.sources[0]?t.sources[0].source_meta:null;if(e.meta=t.meta||u,e.details=function(e){var t,n,r=!e.metadata,o=e.metadata||{};t=e.database?e.database+".":"",n=r?void 0:"source"==e.resource_type?t+e.schema+"."+e.identifier:t+e.schema+"."+e.alias;var a,s=[{name:"Owner",value:o.owner},{name:"Type",value:r?void 0:(a=o.type,"BASE TABLE"==a?{type:"table",name:"table"}:"LATE BINDING VIEW"==a?{type:"view",name:"late binding view"}:{type:a.toLowerCase(),name:a.toLowerCase()}).name},{name:"Package",value:e.package_name},{name:"Language",value:e.language},{name:"Relation",value:n}];return i.filter(s,(function(e){return void 0!==e.value}))}(t),e.extended=(s=t.stats,l={rows:o,row_count:o,num_rows:o,max_varchar:o,pct_used:r,size:n,bytes:n,num_bytes:n},c=i.sortBy(i.values(s),"label"),i.map(c,(function(e){var t=i.clone(e),n=l[e.id];return n&&(t.value=n(e.value),t.label=e.label.replace("Approximate","~"),t.label=e.label.replace("Utilization","Used")),t}))),e.extras){var d=i.filter(e.extras,(function(e){return void 0!==e.value&&null!==e.value}));e.details=e.details.concat(d)}e.show_extended=i.where(e.extended,{include:!0}).length>0})),e.queryTag=function(t){e.$emit("query",t)}}}}])},function(e,t){var n="/components/table_details/table_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    Details
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Tags
    \n
    \n {{ tag }} \n
    \n
    untagged
    \n
    \n
    \n
    {{ item.name }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ item.label }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){var r=n(433);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n\n.details-content {\n table-layout: fixed;\n}\n\n.detail-body {\n white-space: nowrap;\n overflow-x: scroll;\n}\n",""])},function(e,t,n){"use strict";const r=n(435),i=n(21);angular.module("dbt").directive("columnDetails",["project",function(e){return{scope:{model:"="},templateUrl:r,link:function(t){t.has_test=function(e,t){return-1!=i.pluck(e.tests,"short").indexOf(t)},t.has_more_info=function(e){var t=e.tests||[],n=e.description||"",r=e.meta||{};return t.length||n.length||!i.isEmpty(r)},t.toggle_column_expanded=function(e){t.has_more_info(e)&&(e.expanded=!e.expanded)},t.getState=function(e){return"dbt."+e.resource_type},t.get_col_name=function(t){return e.caseColumn(t)},t.get_columns=function(e){var t=i.chain(e.columns).values().sortBy("index").value();return i.each(t,(function(e,t){e.index=t})),t}}}}])},function(e,t){var n="/components/column_details/column_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n Column information is not available for this seed\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ColumnTypeDescriptionTestsMore?
    \n
    \n {{ get_col_name(column.name) }}\n
    \n
    \n {{ column.type }}

    \n
    \n {{ column.description }}\n \n \n U\n N\n F\n A\n +\n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Details
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Description
    \n \n
    \n\n
    \n
    Generic Tests
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(437);n(31),n(438);function i(e){return"python"===e?"language-python":"language-sql"}angular.module("dbt").directive("codeBlock",["code","$timeout",function(e,t){return{scope:{versions:"=",default:"<",language:"="},restrict:"E",templateUrl:r,link:function(n,r){n.selected_version=n.default,n.language_class=i(n.language),n.source=null,n.setSelected=function(r){n.selected_version=r,n.source=n.versions[r]||"";const i=n.source.trim();n.highlighted=e.highlight(i,n.language),t((function(){Prism.highlightAll()}))},n.titleCase=function(e){return e.charAt(0).toUpperCase()+e.substring(1)},n.copied=!1,n.copy_to_clipboard=function(){e.copy_to_clipboard(n.source),n.copied=!0,setTimeout((function(){n.$apply((function(){n.copied=!1}))}),1e3)},n.$watch("language",(function(e,t){e&&e!=t&&(n.language_class=i(e))}),!0),n.$watch("versions",(function(e,t){if(e)if(n.default)n.setSelected(n.default);else{var r=Object.keys(n.versions);r.length>0&&n.setSelected(r[0])}}),!0)}}}])},function(e,t){var n="/components/code_block/code_block.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    Code
    \n\n')}]),e.exports=n},function(e,t,n){var r=n(439);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"pre.code {\n border: none !important;\n overflow-y: visible !important;\n overflow-x: scroll !important;\n padding-bottom: 10px;\n}\n\npre.code code {\n font-family: Monaco, monospace !important;\n font-weight: 400 !important;\n}\n\n.line-numbers-rows {\n border: none !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(441);angular.module("dbt").directive("macroArguments",[function(){return{scope:{macro:"="},templateUrl:r,link:function(e){_.each(e.macro.arguments,(function(e){e.expanded=!1}))}}}])},function(e,t){var n="/components/macro_arguments/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n
    \n
    \n Details are not available for this macro\n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ArgumentTypeDescriptionMore?
    \n
    \n {{ arg.name }}\n
    \n
    \n {{ arg.type }}

    \n
    \n {{ arg.description }}\n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(443);angular.module("dbt").directive("referenceList",["$state",function(e){return{scope:{references:"=",node:"="},restrict:"E",templateUrl:r,link:function(t){t.selected_type=null,t.setType=function(e){t.selected_type=e,t.nodes=t.references[t.selected_type]},t.getNodeUrl=function(t){var n="dbt."+t.resource_type;return e.href(n,{unique_id:t.unique_id,"#":null})},t.mapResourceType=function(e){return"model"==e?"Models":"seed"==e?"Seeds":"test"==e?"Tests":"snapshot"==e?"Snapshots":"analysis"==e?"Analyses":"macro"==e?"Macros":"exposure"==e?"Exposures":"metric"==e?"Metrics":"operation"==e?"Operations":"Nodes"},t.$watch("references",(function(e){e&&_.size(e)>0?(t.selected_type=_.keys(e)[0],t.has_references=!0,t.nodes=t.references[t.selected_type]):t.has_references=!1}))}}}])},function(e,t){var n="/components/references/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n No resources reference this {{ node.resource_type }}\n
    \n
    \n \n
    \n \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){n(445),n(447),n(448),n(449),n(450),n(451),n(452),n(453),n(454),n(455)},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.copied=!1,e.copy_to_clipboard=function(t){r.copy_to_clipboard(t),e.copied=!0,setTimeout((function(){e.$apply((function(){e.copied=!1}))}),1e3)},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.nav-tabs li.nav-pull-right {\n flex: 1 0 auto;\n text-align: right;\n}\n\ntr.column-row-selected {\n\n}\n\ntd.column-expanded{\n padding: 0px !important;\n}\n\ntd.column-expanded > div {\n padding: 5px 10px;\n margin-left: 20px;\n height: 100%;\n\n border-left: 1px solid #ccc !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SourceCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Sample SQL":r.generateSourceSQL(e.model)},e.extra_table_fields=[{name:"Loader",value:e.model.loader},{name:"Source",value:e.model.source_name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SeedCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Example SQL":r.generateSourceSQL(e.model)}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SnapshotCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"Compiled SQL is not available for this snapshot"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("TestCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(33);n(34),r.module("dbt").controller("MacroCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,a,s,l){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.macro={},n.ready((function(t){let n=t.macros[e.model_uid];if(e.macro=n,e.references=o.getMacroReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=o.getMacroParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.macro.is_adapter_macro){var r=t.metadata.adapter_type;e.versions=n.impls,n.impls[r]?e.default_version=r:n.impls.default?e.default_version="default":e.default_version=i.keys(n.impls)[0]}else e.default_version="Source",e.versions={Source:e.macro.macro_sql}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("AnalysisCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.default_version="Source",e.versions={Source:"",Compiled:""},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language,e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ExposureCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.exposure={},n.ready((function(t){let n=t.nodes[e.model_uid];e.exposure=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.language=n.language,e.extra_table_fields=[{name:"Maturity",value:e.exposure.maturity},{name:"Owner",value:e.exposure.owner.name},{name:"Owner email",value:e.exposure.owner.email},{name:"Exposure name",value:e.exposure.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("MetricCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.metric={},n.ready((function(t){let n=t.nodes[e.model_uid];e.metric=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.versions={Definition:r.generateMetricSQL(e.metric)};const o="expression"===e.metric.type?"Expression metric":"Aggregate metric";e.extra_table_fields=[{name:"Metric Type",value:o},{name:"Metric name",value:e.metric.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("OperationCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("GraphCtrl",["$scope","$state","$window","graph","project","selectorService",function(e,t,n,r,i,o){function a(e){return e&&"source"==e.resource_type?"source:"+e.source_name+"."+e.name:e&&"exposure"==e.resource_type?"exposure:"+e.name:e&&"metric"==e.resource_type?"metric:"+e.name:e.name?e.name:"*"}e.graph=r.graph,e.graphService=r,e.graphRendered=function(e){r.setGraphReady(e)},e.$watch((function(){return t.params.unique_id}),(function(e,t){e&&e!=t&&i.find_by_id(e,(function(e){e&&("sidebar"==r.orientation?r.showVerticalGraph(a(e),!1):r.showFullGraph(a(e)))})),e||o.clearViewNode()}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(31),a=n(458);n(459),n(206),n(467),n(469),n(472),n(476),r.module("dbt").controller("MainController",["$scope","$route","$state","project","graph","selectorService","trackingService","locationService","$transitions",function(e,t,n,r,s,l,c,u,d){function f(t){e.model_uid=t;var n=r.node(t);n&&l.resetSelection(n)}function p(e){e&&setTimeout((function(){var t=o("*[data-nav-unique-id='"+e+"']");t.length&&t[0].scrollIntoView&&t[0].scrollIntoView({behavior:"smooth",block:"center",inline:"center"})}),1)}e.tree={database:{},project:{},sources:{}},e.search={query:"",results:[],is_focused:!1},e.logo=a,e.model_uid=null,e.project={},o("body").bind("keydown",(function(e){"t"==event.key&&"INPUT"!=event.target.tagName&&(console.log("Opening search"),o("#search").focus(),event.preventDefault())})),e.onSearchFocus=function(t,n){e.search.is_focused=n},e.clearSearch=function(){e.search.is_focused=!1,e.search.query="",e.search.results=[],o("#search").blur()},e.$on("clearSearch",(function(){e.clearSearch()})),e.$on("query",(function(t,n){e.search.is_focused=!0,e.search.query=n})),e.onSearchKeypress=function(t){"Escape"==t.key&&(e.clearSearch(),t.preventDefault())},r.getModelTree(n.params.unique_id,(function(t){e.tree.database=t.database,e.tree.project=t.project,e.tree.sources=t.sources,e.tree.exposures=t.exposures,e.tree.metrics=t.metrics,setTimeout((function(){p(e.model_uid)}))})),d.onSuccess({},(function(t,n){var i=t.router.globals.params,o=l.getViewNode(),a=o?o.unique_id:null,s=i.unique_id,u=!0;if(t.from().name==t.to().name&&a==s&&(u=!1),u&&i.unique_id){var d=r.updateSelected(i.unique_id);e.tree.database=d.database,e.tree.project=d.project,e.tree.sources=d.sources,e.search.query="",console.log("updating selected model to: ",i),f(i.unique_id),setTimeout((function(){p(i.unique_id)}))}u&&c.track_pageview()})),e.$watch("search.query",(function(t){e.search.results=function(t){if(""===e.search.query)return t;let n={name:10,tags:5,description:3,raw_code:2,columns:1};return i.each(t,(function(t){t.overallWeight=0,i.each(Object.keys(n),(function(r){if(null!=t.model[r]){let o=0,a=t.model[r],s=e.search.query.toLowerCase();if("columns"===r)i.each(a,(function(e){if(e.name){let t=e.name.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}}));else if("tags"===r)i.each(a,(function(e){let t=e.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}));else{a=a.toLowerCase();let e=0;for(;-1!=e;)e=a.indexOf(s,e),-1!=e&&(o++,e++)}t.overallWeight+=o*n[r]}}))})),t}(r.search(t))})),r.init(),r.ready((function(t){e.project=t,e.search.results=r.search("");var o=i.unique(i.pluck(i.values(t.nodes),"package_name")).sort(),a=[null];i.each(t.nodes,(function(e){var t=e.tags;a=i.union(a,t).sort()})),l.init({packages:o,tags:a}),f(n.params.unique_id);var d=u.parseState(n.params);d.show_graph&&s.ready((function(){i.assign(l.selection.dirty,d.selected);var e=l.updateSelection();s.updateGraph(e)}));var p=t.metadata||{};c.init({track:p.send_anonymous_usage_stats,project_id:p.project_id})}))}])},function(e,t){e.exports="data:image/svg+xml,%3Csvg width='242' height='90' viewBox='0 0 242 90' fill='none' xmlns='http://www.w3.org/2000/svg'%3E %3Cpath d='M240.384 74.5122L239.905 75.8589H239.728L239.249 74.5156V75.8589H238.941V74.0234H239.324L239.816 75.3872L240.309 74.0234H240.691V75.8589H240.384V74.5122ZM238.671 74.3003H238.169V75.8589H237.858V74.3003H237.352V74.0234H238.671V74.3003Z' fill='%23262A38'/%3E %3Cpath d='M154.123 13.915V75.3527H141.672V69.0868C140.37 71.2839 138.499 73.0742 136.22 74.2134C133.779 75.434 131.012 76.085 128.246 76.085C124.828 76.1664 121.41 75.1899 118.562 73.2369C115.633 71.2839 113.354 68.5986 111.889 65.425C110.262 61.7631 109.448 57.8572 109.529 53.8698C109.448 49.8825 110.262 45.9765 111.889 42.3961C113.354 39.3038 115.633 36.6185 118.481 34.7469C121.41 32.8753 124.828 31.9801 128.246 32.0615C130.931 32.0615 133.616 32.6311 135.976 33.8517C138.255 34.991 140.126 36.6999 141.428 38.8156V18.0651L154.123 13.915ZM139.15 63.2279C140.777 61.1121 141.672 58.0199 141.672 54.0326C141.672 50.0452 140.859 47.0344 139.15 44.9187C137.441 42.8029 134.755 41.5823 131.989 41.6637C129.222 41.5009 126.537 42.7215 124.746 44.8373C123.038 46.953 122.142 49.9639 122.142 53.8698C122.142 57.8572 123.038 60.9494 124.746 63.1465C126.455 65.3436 129.222 66.5642 131.989 66.4828C135.081 66.4828 137.522 65.3436 139.15 63.2279Z' fill='%23262A38'/%3E %3Cpath d='M198.635 34.6655C201.564 36.5371 203.843 39.2225 205.226 42.3147C206.853 45.8952 207.667 49.8011 207.586 53.7885C207.667 57.7758 206.853 61.7632 205.226 65.3436C203.761 68.5172 201.483 71.2026 198.553 73.1556C195.705 75.0272 192.287 76.0037 188.87 75.9223C186.103 76.0037 183.336 75.3527 180.895 74.0507C178.617 72.9114 176.745 71.1212 175.524 68.9241V75.2713H162.993V18.0651L175.606 13.915V38.9783C176.826 36.7812 178.698 34.991 180.976 33.8517C183.418 32.5498 186.103 31.8988 188.87 31.9801C192.287 31.8988 195.705 32.8753 198.635 34.6655ZM192.45 63.1465C194.159 60.9494 194.973 57.8572 194.973 53.7885C194.973 49.8825 194.159 46.8716 192.45 44.7559C190.741 42.6402 188.381 41.5823 185.289 41.5823C182.523 41.4196 179.837 42.6402 178.047 44.8373C176.338 47.0344 175.524 50.0452 175.524 53.9512C175.524 57.9386 176.338 61.0308 178.047 63.1465C179.756 65.3436 182.441 66.5642 185.289 66.4015C188.056 66.5642 190.741 65.3436 192.45 63.1465Z' fill='%23262A38'/%3E %3Cpath d='M225 42.4774V58.915C225 61.2749 225.651 62.9838 226.791 64.0416C228.093 65.1809 229.801 65.7505 231.592 65.6691C232.975 65.6691 234.44 65.425 235.742 65.0995V74.8644C233.382 75.6782 230.941 76.085 228.499 76.0037C223.292 76.0037 219.304 74.5389 216.537 71.6094C213.771 68.68 212.387 64.5299 212.387 59.1592V23.1103L225 19.0416V33.038H235.742V42.4774H225Z' fill='%23262A38'/%3E %3Cpath d='M86.1754 3.74322C88.2911 5.77758 89.6745 8.46293 90 11.3924C90 12.613 89.6745 13.4268 88.9421 14.9729C88.2098 16.519 79.1772 32.1429 76.4919 36.4557C74.9458 38.9783 74.132 41.9892 74.132 44.9186C74.132 47.9295 74.9458 50.859 76.4919 53.3816C79.1772 57.6944 88.2098 73.3996 88.9421 74.9457C89.6745 76.4919 90 77.2242 90 78.4448C89.6745 81.3743 88.3725 84.0597 86.2568 86.0127C84.2224 88.1284 81.5371 89.5118 78.689 89.7559C77.4684 89.7559 76.6546 89.4304 75.1899 88.698C73.7251 87.9656 57.7758 79.1772 53.4629 76.4919C53.1374 76.3291 52.8119 76.085 52.4051 75.9222L31.085 63.3092C31.5732 67.3779 33.3635 71.2839 36.2929 74.132C36.8626 74.7016 37.4322 75.1899 38.0832 75.6781C37.5949 75.9222 37.0253 76.1664 36.5371 76.4919C32.2242 79.1772 16.519 88.2098 14.9729 88.9421C13.4268 89.6745 12.6944 90 11.3924 90C8.46293 89.6745 5.77758 88.3725 3.82459 86.2568C1.70886 84.2224 0.325497 81.5371 0 78.6076C0.0813743 77.387 0.406872 76.1664 1.05787 75.1085C1.79024 73.5624 10.8228 57.8571 13.5081 53.5443C15.0542 51.0217 15.868 48.0922 15.868 45.0814C15.868 42.0705 15.0542 39.141 13.5081 36.6184C10.8228 32.1429 1.70886 16.4376 1.05787 14.8915C0.406872 13.8336 0.0813743 12.613 0 11.3924C0.325497 8.46293 1.62749 5.77758 3.74322 3.74322C5.77758 1.62749 8.46293 0.325497 11.3924 0C12.613 0.0813743 13.8336 0.406872 14.9729 1.05787C16.2749 1.62749 27.7486 8.30018 33.8517 11.8807L35.2351 12.6944C35.7233 13.0199 36.1302 13.264 36.4557 13.4268L37.1067 13.8336L58.8336 26.6908C58.3454 21.8083 55.8228 17.3327 51.9168 14.3219C52.4051 14.0778 52.9747 13.8336 53.4629 13.5081C57.7758 10.8228 73.481 1.70886 75.0271 1.05787C76.085 0.406872 77.3056 0.0813743 78.6076 0C81.4557 0.325497 84.1411 1.62749 86.1754 3.74322ZM46.1392 50.7776L50.7776 46.1392C51.4286 45.4882 51.4286 44.5118 50.7776 43.8608L46.1392 39.2224C45.4882 38.5714 44.5118 38.5714 43.8608 39.2224L39.2224 43.8608C38.5714 44.5118 38.5714 45.4882 39.2224 46.1392L43.8608 50.7776C44.4304 51.3472 45.4882 51.3472 46.1392 50.7776Z' fill='%23FF694A'/%3E %3C/svg%3E"},function(e,t,n){"use strict";n.r(t);var r=n(63),i=n.n(r);n(460),n(461),n(462),n(463),n(465);const o=n(9),a=(n(31),n(21));window.Prism=i.a,o.module("dbt").factory("code",["$sce",function(e){var t={copied:!1,highlight:function(t,n="sql"){if("sql"==n)var r=i.a.highlight(t,i.a.languages.sql,"sql");else if("python"==n)r=i.a.highlight(t,i.a.languages.python,"python");return e.trustAsHtml(r)},copy_to_clipboard:function(e){var t=document.createElement("textarea");t.value=e,t.setAttribute("readonly",""),t.style.position="absolute",t.style.left="-9999px",document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)},generateSourceSQL:function(e){var t=["select"],n=a.size(e.columns),r=a.keys(e.columns);a.each(r,(function(e,r){var i=" "+e;r+1!=n&&(i+=","),t.push(i)}));const i=(e.database?e.database+".":"")+e.schema+"."+e.identifier;return t.push("from "+i),t.join("\n")},generateMetricSQL:function(e){if("expression"==e.type)return e.sql;const t=[`select ${e.type}(${e.sql})`,`from {{ ${e.model} }}`];if(e.filters.length>0){const n=e.filters.map(e=>`${e.field} ${e.operator} ${e.value}`).join(" AND ");t.push("where "+n)}return t.join("\n")}};return t}])},function(e,t){Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}},function(e,t){Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},function(e,t){!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(e,t){if("PRE"===e.tagName&&e.classList.contains("line-numbers")){var n=e.querySelector(".line-numbers-rows");if(n){var r=parseInt(e.getAttribute("data-start"),10)||1,i=r+(n.children.length-1);ti&&(t=i);var o=t-r;return n.children[o]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},n=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&n===window.innerWidth||(n=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var n=t.element,i=n.parentNode;if(i&&/pre/i.test(i.nodeName)&&!n.querySelector(".line-numbers-rows")&&Prism.util.isActive(n,"line-numbers")){n.classList.remove("line-numbers"),i.classList.add("line-numbers");var o,a=t.code.match(e),s=a?a.length+1:1,l=new Array(s+1).join("");(o=document.createElement("span")).setAttribute("aria-hidden","true"),o.className="line-numbers-rows",o.innerHTML=l,i.hasAttribute("data-start")&&(i.style.counterReset="linenumber "+(parseInt(i.getAttribute("data-start"),10)-1)),t.element.appendChild(o),r([i]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(t){if(0!=(t=t.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=t.map((function(t){var n=t.querySelector("code"),r=t.querySelector(".line-numbers-rows");if(n&&r){var i=t.querySelector(".line-numbers-sizer"),o=n.textContent.split(e);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:t,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i code {\n\tposition: relative;\n\twhite-space: inherit;\n}\n\n.line-numbers .line-numbers-rows {\n\tposition: absolute;\n\tpointer-events: none;\n\ttop: 0;\n\tfont-size: 100%;\n\tleft: -3.8em;\n\twidth: 3em; /* works for line-numbers below 1000 lines */\n\tletter-spacing: -1px;\n\tborder-right: 1px solid #999;\n\n\t-webkit-user-select: none;\n\t-moz-user-select: none;\n\t-ms-user-select: none;\n\tuser-select: none;\n\n}\n\n\t.line-numbers-rows > span {\n\t\tdisplay: block;\n\t\tcounter-increment: linenumber;\n\t}\n\n\t\t.line-numbers-rows > span:before {\n\t\t\tcontent: counter(linenumber);\n\t\t\tcolor: #999;\n\t\t\tdisplay: block;\n\t\t\tpadding-right: 0.8em;\n\t\t\ttext-align: right;\n\t\t}\n',""])},function(e,t,n){var r=n(466);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,'/**\n * GHColors theme by Avi Aryan (http://aviaryan.in)\n * Inspired by Github syntax coloring\n */\n\ncode[class*="language-"],\npre[class*="language-"] {\n\tcolor: #393A34;\n\tfont-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace;\n\tdirection: ltr;\n\ttext-align: left;\n\twhite-space: pre;\n\tword-spacing: normal;\n\tword-break: normal;\n\tfont-size: .9em;\n\tline-height: 1.2em;\n\n\t-moz-tab-size: 4;\n\t-o-tab-size: 4;\n\ttab-size: 4;\n\n\t-webkit-hyphens: none;\n\t-moz-hyphens: none;\n\t-ms-hyphens: none;\n\thyphens: none;\n}\n\npre > code[class*="language-"] {\n\tfont-size: 1em;\n}\n\npre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,\ncode[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {\n\tbackground: #b3d4fc;\n}\n\npre[class*="language-"]::selection, pre[class*="language-"] ::selection,\ncode[class*="language-"]::selection, code[class*="language-"] ::selection {\n\tbackground: #b3d4fc;\n}\n\n/* Code blocks */\npre[class*="language-"] {\n\tpadding: 1em;\n\tmargin: .5em 0;\n\toverflow: auto;\n\tborder: 1px solid #dddddd;\n\tbackground-color: white;\n}\n\n/* Inline code */\n:not(pre) > code[class*="language-"] {\n\tpadding: .2em;\n\tpadding-top: 1px;\n\tpadding-bottom: 1px;\n\tbackground: #f8f8f8;\n\tborder: 1px solid #dddddd;\n}\n\n.token.comment,\n.token.prolog,\n.token.doctype,\n.token.cdata {\n\tcolor: #999988;\n\tfont-style: italic;\n}\n\n.token.namespace {\n\topacity: .7;\n}\n\n.token.string,\n.token.attr-value {\n\tcolor: #e3116c;\n}\n\n.token.punctuation,\n.token.operator {\n\tcolor: #393A34; /* no highlight */\n}\n\n.token.entity,\n.token.url,\n.token.symbol,\n.token.number,\n.token.boolean,\n.token.variable,\n.token.constant,\n.token.property,\n.token.regex,\n.token.inserted {\n\tcolor: #36acaa;\n}\n\n.token.atrule,\n.token.keyword,\n.token.attr-name,\n.language-autohotkey .token.selector {\n\tcolor: #00a4db;\n}\n\n.token.function,\n.token.deleted,\n.language-autohotkey .token.tag {\n\tcolor: #9a050f;\n}\n\n.token.tag,\n.token.selector,\n.language-autohotkey .token.keyword {\n\tcolor: #00009f;\n}\n\n.token.important,\n.token.function,\n.token.bold {\n\tfont-weight: bold;\n}\n\n.token.italic {\n\tfont-style: italic;\n}\n',""])},function(e,t,n){n(31);const r=n(21),i=n(148),o=n(203),a=n(468);angular.module("dbt").factory("graph",["$state","$window","$q","selectorService","project","locationService",function(e,t,n,s,l,c){var u={vertical:{userPanningEnabled:!1,boxSelectionEnabled:!1,maxZoom:1.5},horizontal:{userPanningEnabled:!0,boxSelectionEnabled:!1,maxZoom:1,minZoom:.05}},d={none:{name:"null"},left_right:{name:"dagre",rankDir:"LR",rankSep:200,edgeSep:30,nodeSep:50},top_down:{name:"preset",positions:function(t){var n=e.params.unique_id;if(!n)return{x:0,y:0};var a=f.graph.pristine.dag,s=r.sortBy(o.ancestorNodes(a,n,1)),l=r.sortBy(o.descendentNodes(a,n,1)),c=r.partial(r.includes,s),u=r.partial(r.includes,l),d=a.filterNodes(c),p=a.filterNodes(u);return function(e,t,n,i){console.log("Getting position for ",i,". Primary: ",e);var o,a=100/(1+Math.max(t.length,n.length));if(e==i)return{x:0,y:0};if(r.includes(t,i))o={set:t,index:r.indexOf(t,i),factor:-1,type:"parent"};else{if(!r.includes(n,i))return{x:0,y:0};o={set:n,index:r.indexOf(n,i),factor:1,type:"child"}}var s=o.set.length;if("parent"==o.type)var l={x:(0+o.index)*a,y:-200-100*(s-o.index-1)};else l={x:(0+o.index)*a,y:200+100*(s-o.index-1)};return l}(n,i.alg.topsort(d),i.alg.topsort(p).reverse(),t.data("id"))}}},f={loading:!0,loaded:n.defer(),graph_element:null,orientation:"sidebar",expanded:!1,graph:{options:u.vertical,pristine:{nodes:{},edges:{},dag:null},elements:[],layout:d.none,style:[{selector:"edge.vertical",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#027599","arrow-scale":1.5,"line-color":"#027599",width:3,"target-distance-from-node":"5px","source-endpoint":"0% 50%","target-endpoint":"0deg"}},{selector:"edge.horizontal",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#006f8a","arrow-scale":1.5,"target-distance-from-node":"10px","source-distance-from-node":"5px","line-color":"#006f8a",width:3,"source-endpoint":"50% 0%","target-endpoint":"270deg"}},{selector:"edge[selected=1]",style:{"line-color":"#bd6bb6","target-arrow-color":"#bd6bb6","z-index":1}},{selector:'node[display="none"]',style:{display:"none"}},{selector:"node.vertical",style:{"text-margin-x":"5px","background-color":"#0094b3","font-size":"16px",shape:"ellipse",color:"#fff",width:"5px",height:"5px",padding:"5px",content:"data(label)","font-weight":300,"text-valign":"center","text-halign":"right"}},{selector:"node.horizontal",style:{"background-color":"#0094b3","font-size":"24px",shape:"roundrectangle",color:"#fff",width:"label",height:"label",padding:"12px",content:"data(label)","font-weight":300,"font-family":'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Helvetica, Arial, sans-serif',"text-valign":"center","text-halign":"center",ghost:"yes","ghost-offset-x":"2px","ghost-offset-y":"4px","ghost-opacity":.5,"text-outline-color":"#000","text-outline-width":"1px","text-outline-opacity":.2}},{selector:'node[resource_type="source"]',style:{"background-color":"#5fb825"}},{selector:'node[resource_type="exposure"]',style:{"background-color":"#ff694b"}},{selector:'node[resource_type="metric"]',style:{"background-color":"#ff5688"}},{selector:"node[node_color]",style:{"background-color":"data(node_color)"}},{selector:"node[selected=1]",style:{"background-color":"#bd6bb6"}},{selector:"node.horizontal[selected=1]",style:{"background-color":"#88447d"}},{selector:"node.horizontal.dirty",style:{"background-color":"#919599"}},{selector:"node[hidden=1]",style:{"background-color":"#919599","background-opacity":.5}}],ready:function(e){console.log("graph ready")}}};function p(e,t,n){var i=r.map(e,(function(e){return f.graph.pristine.nodes[e]})),o=[];r.flatten(r.each(e,(function(t){var n=f.graph.pristine.edges[t];r.each(n,(function(t){r.includes(e,t.data.target)&&r.includes(e,t.data.source)&&o.push(t)}))})));var s=r.compact(i).concat(r.compact(o));return r.each(f.graph.elements,(function(e){e.data.display="none",e.data.selected=0,e.data.hidden=0,e.classes=n})),r.each(s,(function(e){e.data.display="element",e.classes=n,t&&r.includes(t,e.data.unique_id)&&(e.data.selected=1),r.get(e,["data","docs","show"],!0)||(e.data.hidden=1);var i=r.get(e,["data","docs","node_color"]);i&&a.isValidColor(i)&&(e.data.node_color=i)})),f.graph.elements=r.filter(s,(function(e){return"element"==e.data.display})),e}function h(e,t,n){var r=f.graph.pristine.dag;if(r){var i=f.graph.pristine.nodes,o=s.selectNodes(r,i,e),a=n?o.matched:[];return p(o.selected,a,t)}}return f.setGraphReady=function(e){f.loading=!1,f.loaded.resolve(),f.graph_element=e},f.ready=function(e){f.loaded.promise.then((function(){e(f)}))},f.manifest={},f.packages=[],f.selected_node=null,f.getCanvasHeight=function(){return.8*t.innerHeight+"px"},l.ready((function(e){f.manifest=e,f.packages=r.uniq(r.map(f.manifest.nodes,"package_name")),r.each(r.filter(f.manifest.nodes,(function(e){var t=r.includes(["model","seed","source","snapshot","analysis","exposure","metric","operation"],e.resource_type),n="test"==e.resource_type&&!e.hasOwnProperty("test_metadata");return t||n})),(function(e){var t={group:"nodes",data:r.assign(e,{parent:e.package_name,id:e.unique_id,is_group:"false"})};f.graph.pristine.nodes[e.unique_id]=t})),r.each(f.manifest.parent_map,(function(e,t){r.each(e,(function(e){var n=f.manifest.nodes[e],i=f.manifest.nodes[t];if(r.includes(["model","source","seed","snapshot","metric"],n.resource_type)&&("test"!=i.resource_type||!i.hasOwnProperty("test_metadata"))){var o=n.unique_id+"|"+i.unique_id,a={group:"edges",data:{source:n.unique_id,target:i.unique_id,unique_id:o}},s=i.unique_id;f.graph.pristine.edges[s]||(f.graph.pristine.edges[s]=[]),f.graph.pristine.edges[s].push(a)}}))}));var t=new i.Graph({directed:!0});r.each(f.graph.pristine.nodes,(function(e){t.setNode(e.data.unique_id,e.data.name)})),r.each(f.graph.pristine.edges,(function(e){r.each(e,(function(e){t.setEdge(e.data.source,e.data.target)}))})),f.graph.pristine.dag=t,f.graph.elements=r.flatten(r.values(f.graph.pristine.nodes).concat(r.values(f.graph.pristine.edges))),p(t.nodes())})),f.hideGraph=function(){f.orientation="sidebar",f.expanded=!1},f.showVerticalGraph=function(e,t){f.orientation="sidebar",t&&(f.expanded=!0);var n=h(r.assign({},s.options,{include:"+"+e+"+",exclude:"",hops:1}),"vertical",!0);return f.graph.layout=d.top_down,f.graph.options=u.vertical,n},f.showFullGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=r.assign({},s.options);e?(t.include="+"+e+"+",t.exclude=""):(t.include="",t.exclude="");var n=h(t,"horizontal",!0);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(t),n},f.updateGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=h(e,"horizontal",!1);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(e),t},f.deselectNodes=function(){"fullscreen"==f.orientation&&f.graph_element.elements().data("selected",0)},f.selectNode=function(e){if("fullscreen"==f.orientation){f.graph.pristine.nodes[e];var t=f.graph.pristine.dag,n=r.indexBy(o.ancestorNodes(t,e)),i=r.indexBy(o.descendentNodes(t,e));n[e]=e,i[e]=e;var a=f.graph_element;r.each(f.graph.elements,(function(t){var r=a.$id(t.data.id);n[t.data.source]&&n[t.data.target]||i[t.data.source]&&i[t.data.target]||t.data.unique_id==e?r.data("selected",1):r.data("selected",0)}))}},f.markDirty=function(e){f.markAllClean(),r.each(e,(function(e){f.graph_element.$id(e).addClass("dirty")}))},f.markAllClean=function(){f.graph_element&&f.graph_element.elements().removeClass("dirty")},f}])},function(e,t,n){"use strict";n.r(t),n.d(t,"isValidColor",(function(){return i}));const r=new Set(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","ghostwhite","gold","goldenrod","gray","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"]);function i(e){if(!e)return!1;const t=e.trim().toLowerCase();if(""===t)return!1;const n=t.match(/^#([A-Fa-f0-9]{3}){1,2}$/),i=r.has(t);return Boolean(n)||i}},function(e,t,n){n(31);const r=n(21),i=n(470);angular.module("dbt").factory("selectorService",["$state",function(e){var t={include:"",exclude:"",packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"],depth:1},n={view_node:null,selection:{clean:r.clone(t),dirty:r.clone(t)},options:{packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"]},init:function(e){r.each(e,(function(e,r){n.options[r]=e,t[r]=e,n.selection.clean[r]=e,n.selection.dirty[r]=e}))},resetSelection:function(e){var i={include:e&&r.includes(["model","seed","snapshot"],e.resource_type)?"+"+e.name+"+":e&&"source"==e.resource_type?"+source:"+e.source_name+"."+e.name+"+":e&&"exposure"==e.resource_type?"+exposure:"+e.name:e&&"metric"==e.resource_type?"+metric:"+e.name:e&&r.includes(["analysis","test"],e.resource_type)?"+"+e.name:""},o=r.assign({},t,i);n.selection.clean=r.clone(o),n.selection.dirty=r.clone(o),n.view_node=e},getViewNode:function(){return n.view_node},excludeNode:function(e,t){var r,i=n.selection.dirty.exclude,o=t.parents?"+":"",a=t.children?"+":"",s=i.length>0?" ":"";"source"==e.resource_type?(o+="source:",r=e.source_name+"."+e.name):["exposure","metric"].indexOf(e.resource_type)>-1?(o+=e.resource_type+":",r=e.name):r=e.name;var l=i+s+o+r+a;return n.selection.dirty.exclude=l,n.updateSelection()},selectSource:function(e,t){var r="source:"+e+(t.children?"+":"");return n.selection.dirty.include=r,n.updateSelection()},clearViewNode:function(){n.view_node=null},isDirty:function(){return!r.isEqual(n.selection.clean,n.selection.dirty)},updateSelection:function(){return n.selection.clean=r.clone(n.selection.dirty),n.selection.clean},selectNodes:function(e,t,n){return i.selectNodes(e,t,n)}};return n}])},function(e,t,n){const r=n(21),i=n(471);function o(e,t){return t||(t=" "),r.filter(r.uniq(e.split(t)),(function(e){return e.length>0}))}function a(e){var t={raw:e,select_at:!1,select_children:!1,children_depth:null,select_parents:!1,parents_depth:null};const n=new RegExp(""+/^/.source+/(?(\@))?/.source+/(?((?(\d*))\+))?/.source+/((?([\w.]+)):)?/.source+/(?(.*?))/.source+/(?(\+(?(\d*))))?/.source+/$/.source).exec(e).groups;t.select_at="@"==n.childs_parents,t.select_parents=!!n.parents,t.select_children=!!n.children,n.parents_depth&&(t.parents_depth=parseInt(n.parents_depth)),n.children_depth&&(t.children_depth=parseInt(n.children_depth));var r=n.method,i=n.value;return r?-1!=r.indexOf(".")&&([r,selector_modifier]=r.split(".",2),i={config:selector_modifier,value:i}):r="implicit",t.selector_type=r,t.selector_value=i,t}function s(e){var t=o(e," ");return r.map(t,(function(e){var t=o(e,",");return t.length>1?{method:"intersect",selectors:r.map(t,a)}:{method:"none",selectors:r.map([e],a)}}))}function l(e,t){var n=s(e),i=null,o=null;return r.each(n,(function(e){var n="intersect"==e.method?r.intersection:r.union;r.each(e.selectors,(function(e){var r=t(e);null===i?(i=r.matched,o=r.selected):(i=n(i,r.matched),o=n(o,r.selected))}))})),{matched:i||[],selected:o||[]}}e.exports={splitSpecs:o,parseSpec:a,parseSpecs:s,buildSpec:function(e,t,n){return{include:s(e),exclude:s(t),hops:n}},applySpec:l,selectNodes:function(e,t,n){n.include,n.exclude;var o,a=r.partial(i.getNodesFromSpec,e,t,n.hops);r.values(t),o=0==n.include.trim().length?{selected:e.nodes(),matched:[]}:l(n.include,a);var s=l(n.exclude,a),c=o.selected,u=o.matched;c=r.difference(c,s.selected),u=r.difference(u,s.matched);var d=[];return r.each(c,(function(e){var i=t[e];i.data.tags||(i.data.tags=[]);var o=r.includes(n.packages,i.data.package_name),a=r.intersection(n.tags,i.data.tags).length>0,s=r.includes(n.tags,null)&&0==i.data.tags.length,l=r.includes(n.resource_types,i.data.resource_type);o&&(a||s)&&l||d.push(i.data.unique_id)})),{selected:r.difference(c,d),matched:r.difference(u,d)}}}},function(e,t,n){const r=n(21),i=n(203);var o="fqn",a="tag",s="source",l="exposure",c="metric",u="path",d="file",f="package",p="config",h="test_name",g="test_type",m={};function v(e,t){if(t===r.last(e))return!0;var n=e.reduce((e,t)=>e.concat(t.split(".")),[]),i=t.split(".");if(n.length-1||!r.hasOwnProperty("test_metadata")&&["data","singular"].indexOf(t)>-1)&&n.push(r)})),n}function $(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("source"==r.resource_type){var i,o,a=r.source_name,s=r.name;-1!=t.indexOf(".")?[i,o]=t.split(".",2):(i=t,o=null),("*"==i||i==a&&"*"===o||i==a&&o===s||i==a&&null===o)&&n.push(e.data)}})),n}m["implicit"]=function(e,t){var n=b(e,t),i=y(e,t),o=[];t.toLowerCase().endsWith(".sql")&&(o=x(e,t));var a=r.uniq([].concat(r.map(n,"unique_id"),r.map(i,"unique_id"),r.map(o,"unique_id")));return r.map(a,t=>e[t].data)},m[o]=b,m[a]=w,m[s]=$,m[l]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("exposure"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[c]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("metric"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[u]=y,m[d]=x,m[f]=k,m[p]=A,m[h]=E,m[g]=S,e.exports={isFQNMatch:v,getNodesByFQN:b,getNodesByTag:w,getNodesBySource:$,getNodesByPath:y,getNodesByPackage:k,getNodesByConfig:A,getNodesByTestName:E,getNodesByTestType:S,getNodesFromSpec:function(e,t,n,o){const a=m[o.selector_type];if(!a)return console.log("Node matcher for selector",o.selector_type,"is invalid"),{selected:[],matched:[]};var s=a(t,o.selector_value),l=[],c=[];return r.each(s,(function(t){var a=t.unique_id;c.push(t.unique_id);var s=[],u=[],d=[];if(o.select_at&&(d=r.union(i.selectAt(e,a))),o.select_parents){var f=n||o.parents_depth;s=i.ancestorNodes(e,a,f)}if(o.select_children){f=n||o.children_depth;u=i.descendentNodes(e,a,f)}l=r.union([a],l,u,s,d)})),{selected:l,matched:c}}}},function(e,t,n){const r=n(9);n(473);r.module("dbt").factory("trackingService",["$location","selectorService","$rootScope",function(e,t,n){var r={initialized:!1,snowplow:null,project_id:null,init:function(e){r.initialized||(r.initialized=!0,r.project_id=e.project_id,!0===e.track&&r.turn_on_tracking())},isHosted:function(){return window.location.hostname.indexOf(".getdbt.com")>-1},turn_on_tracking:function(){var e,t,n,i,o,a;e=window,t=document,n="script",e[i="snowplow"]||(e.GlobalSnowplowNamespace=e.GlobalSnowplowNamespace||[],e.GlobalSnowplowNamespace.push(i),e[i]=function(){(e[i].q=e[i].q||[]).push(arguments)},e[i].q=e[i].q||[],o=t.createElement(n),a=t.getElementsByTagName(n)[0],o.async=1,o.src="//d1fc8wv8zag5ca.cloudfront.net/2.9.0/sp.js",a.parentNode.insertBefore(o,a));var s={appId:"dbt-docs",forceSecureTracker:!0,respectDoNotTrack:!0,userFingerprint:!1,contexts:{webPage:!0}};r.isHosted()&&(s.cookieDomain=".getdbt.com"),r.snowplow=window.snowplow,r.snowplow("newTracker","sp","fishtownanalytics.sinter-collect.com",s),r.snowplow("enableActivityTracking",30,30),r.track_pageview()},fuzzUrls:function(){r.isHosted()||(r.snowplow("setCustomUrl","https://fuzzed.getdbt.com/"),r.snowplow("setReferrerUrl","https://fuzzed.getdbt.com/"))},getContext:function(){return[{schema:"iglu:com.dbt/dbt_docs/jsonschema/1-0-0",data:{is_cloud_hosted:r.isHosted(),core_project_id:r.project_id}}]},track_pageview:function(){if(r.snowplow){r.fuzzUrls();r.snowplow("trackPageView",null,r.getContext())}},track_event:function(e,t,n,i){r.snowplow&&(r.fuzzUrls(),r.snowplow("trackStructEvent","dbt-docs",e,t,n,i,r.getContext()))},track_graph_interaction:function(e,t){r.snowplow&&(r.fuzzUrls(),r.track_event("graph","interact",e,t))}};return r}])},function(e,t,n){var r,i,o,a,s;r=n(474),i=n(204).utf8,o=n(475),a=n(204).bin,(s=function(e,t){e.constructor==String?e=t&&"binary"===t.encoding?a.stringToBytes(e):i.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var n=r.bytesToWords(e),l=8*e.length,c=1732584193,u=-271733879,d=-1732584194,f=271733878,p=0;p>>24)|4278255360&(n[p]<<24|n[p]>>>8);n[l>>>5]|=128<>>9<<4)]=l;var h=s._ff,g=s._gg,m=s._hh,v=s._ii;for(p=0;p>>0,u=u+y>>>0,d=d+x>>>0,f=f+w>>>0}return r.endian([c,u,d,f])})._ff=function(e,t,n,r,i,o,a){var s=e+(t&n|~t&r)+(i>>>0)+a;return(s<>>32-o)+t},s._gg=function(e,t,n,r,i,o,a){var s=e+(t&r|n&~r)+(i>>>0)+a;return(s<>>32-o)+t},s._hh=function(e,t,n,r,i,o,a){var s=e+(t^n^r)+(i>>>0)+a;return(s<>>32-o)+t},s._ii=function(e,t,n,r,i,o,a){var s=e+(n^(t|~r))+(i>>>0)+a;return(s<>>32-o)+t},s._blocksize=16,s._digestsize=16,e.exports=function(e,t){if(null==e)throw new Error("Illegal argument "+e);var n=r.wordsToBytes(s(e,t));return t&&t.asBytes?n:t&&t.asString?a.bytesToString(n):r.bytesToHex(n)}},function(e,t){var n,r;n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",r={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&r.rotl(e,8)|4278255360&r.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-o)&63)):t.push("=");return t.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var t=[],r=0,i=0;r>>6-2*i);return t}},e.exports=r},function(e,t){function n(e){return!!e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)} +/*! Runge-Kutta spring physics function generator. Adapted from Framer.js, copyright Koen Bok. MIT License: http://en.wikipedia.org/wiki/MIT_License */var r=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,n,r){var i={x:t.x+r.dx*n,v:t.v+r.dv*n,tension:t.tension,friction:t.friction};return{dx:i.v,dv:e(i)}}function n(n,r){var i={dx:n.v,dv:e(n)},o=t(n,.5*r,i),a=t(n,.5*r,o),s=t(n,r,a),l=1/6*(i.dx+2*(o.dx+a.dx)+s.dx),c=1/6*(i.dv+2*(o.dv+a.dv)+s.dv);return n.x=n.x+l*r,n.v=n.v+c*r,n}return function e(t,r,i){var o,a={x:-1,v:0,tension:null,friction:null},s=[0],l=0,c=void 0,u=void 0;for(t=parseFloat(t)||500,r=parseFloat(r)||20,i=i||null,a.tension=t,a.friction=r,c=(o=null!==i)?(l=e(t,r))/i*.016:.016;u=n(u||a,c),s.push(1+u.x),l+=16,Math.abs(u.x)>1e-4&&Math.abs(u.v)>1e-4;);return o?function(e){return s[e*(s.length-1)|0]}:l}}();e.exports=r},function(e,t,n){"use strict";var r=n(0);function i(e,t,n,r,i){if(1===r)return n;var o=i(t,n,r);return null==e||((e.roundValue||e.color)&&(o=Math.round(o)),void 0!==e.min&&(o=Math.max(o,e.min)),void 0!==e.max&&(o=Math.min(o,e.max))),o}function o(e,t){return null!=e.pfValue||null!=e.value?null==e.pfValue||null!=t&&"%"===t.type.units?e.value:e.pfValue:e}e.exports=function(e,t,n,a,s){var l=null!=s?s.type:null;n<0?n=0:n>1&&(n=1);var c=o(e,s),u=o(t,s);if(r.number(c)&&r.number(u))return i(l,c,u,n,a);if(r.array(c)&&r.array(u)){for(var d=[],f=0;f0},startBatch:function(){var e=this._private;return null==e.batchCount&&(e.batchCount=0),0===e.batchCount&&(e.batchingStyle=e.batchingNotify=!0,e.batchStyleEles=this.collection(),e.batchNotifyEles=this.collection(),e.batchNotifyTypes=[],e.batchNotifyTypes.ids={}),e.batchCount++,this},endBatch:function(){var e=this._private;return e.batchCount--,0===e.batchCount&&(e.batchingStyle=!1,e.batchStyleEles.updateStyle(),e.batchingNotify=!1,this.notify({type:e.batchNotifyTypes,eles:e.batchNotifyEles})),this},batch:function(e){return this.startBatch(),e(),this.endBatch(),this},batchData:function(e){var t=this;return this.batch((function(){for(var n=Object.keys(e),r=0;r0;)e.removeChild(e.childNodes[0]);this._private.renderer=null},onRender:function(e){return this.on("render",e)},offRender:function(e){return this.off("render",e)}};i.invalidateDimensions=i.resize,e.exports=i},function(e,t,n){"use strict";var r=n(0),i=n(7),o={collection:function(e,t){return r.string(e)?this.$(e):r.elementOrCollection(e)?e.collection():r.array(e)?new i(this,e,t):new i(this)},nodes:function(e){var t=this.$((function(e){return e.isNode()}));return e?t.filter(e):t},edges:function(e){var t=this.$((function(e){return e.isEdge()}));return e?t.filter(e):t},$:function(e){var t=this._private.elements;return e?t.filter(e):t.spawnSelf()},mutableElements:function(){return this._private.elements}};o.elements=o.filter=o.$,e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(18),o={style:function(e){return e&&this.setStyle(e).update(),this._private.style},setStyle:function(e){var t=this._private;return r.stylesheet(e)?t.style=e.generateStyle(this):r.array(e)?t.style=i.fromJson(this,e):r.string(e)?t.style=i.fromString(this,e):t.style=i(this),t.style}};e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(5),a={apply:function(e){var t=this._private,n=t.cy.collection();t.newStyle&&(t.contextStyles={},t.propDiffs={},this.cleanElements(e,!0));for(var r=0;r0;if(c||u){var d=void 0;c&&u||c?d=l.properties:u&&(d=l.mappedProperties);for(var f=0;f0){n=!0;break}t.hasPie=n;var i=e.pstyle("text-transform").strValue,o=e.pstyle("label").strValue,a=e.pstyle("source-label").strValue,s=e.pstyle("target-label").strValue,l=e.pstyle("font-style").strValue,c=e.pstyle("font-size").pfValue+"px",u=e.pstyle("font-family").strValue,d=e.pstyle("font-weight").strValue,f=l+"$"+c+"$"+u+"$"+d+"$"+i+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-outline-width").pfValue+"$"+e.pstyle("text-wrap").strValue+"$"+e.pstyle("text-max-width").pfValue;t.labelStyleKey=f,t.sourceLabelKey=f+"$"+a,t.targetLabelKey=f+"$"+s,t.labelKey=f+"$"+o,t.fontKey=l+"$"+d+"$"+c+"$"+u,t.styleKey=Date.now()}},applyParsedProperty:function(e,t){var n=this,o=t,a=e._private.style,s=void 0,l=n.types,c=n.properties[o.name].type,u=o.bypass,d=a[o.name],f=d&&d.bypass,p=e._private,h=function(){n.checkZOrderTrigger(e,o.name,d?d.value:null,o.value)};if("curve-style"===t.name&&"haystack"===t.value&&e.isEdge()&&(e.isLoop()||e.source().isParent()||e.target().isParent())&&(o=t=this.parse(t.name,"bezier",u)),o.delete)return a[o.name]=void 0,h(),!0;if(o.deleteBypassed)return d?!!d.bypass&&(d.bypassed=void 0,h(),!0):(h(),!0);if(o.deleteBypass)return d?!!d.bypass&&(a[o.name]=d.bypassed,h(),!0):(h(),!0);var g=function(){r.error("Do not assign mappings to elements without corresponding data (e.g. ele `"+e.id()+"` for property `"+o.name+"` with data field `"+o.field+"`); try a `["+o.field+"]` selector to limit scope to elements with `"+o.field+"` defined")};switch(o.mapped){case l.mapData:for(var m=o.field.split("."),v=p.data,b=0;b1&&(y=1),c.color){var x=o.valueMin[0],w=o.valueMax[0],k=o.valueMin[1],A=o.valueMax[1],E=o.valueMin[2],S=o.valueMax[2],$=null==o.valueMin[3]?1:o.valueMin[3],C=null==o.valueMax[3]?1:o.valueMax[3],_=[Math.round(x+(w-x)*y),Math.round(k+(A-k)*y),Math.round(E+(S-E)*y),Math.round($+(C-$)*y)];s={bypass:o.bypass,name:o.name,value:_,strValue:"rgb("+_[0]+", "+_[1]+", "+_[2]+")"}}else{if(!c.number)return!1;var O=o.valueMin+(o.valueMax-o.valueMin)*y;s=this.parse(o.name,O,o.bypass,"mapping")}s||(s=this.parse(o.name,d.strValue,o.bypass,"mapping")),s||g(),s.mapping=o,o=s;break;case l.data:var j=o.field.split("."),T=p.data;if(T)for(var P=0;P0&&l>0){for(var u={},d=!1,f=0;f0?e.delayAnimation(c).play().promise().then(t):t()})).then((function(){return e.animation({style:u,duration:l,easing:e.pstyle("transition-timing-function").value,queue:!1}).play().promise()})).then((function(){r.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1}))}else a.transitioning&&(this.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1)},checkZOrderTrigger:function(e,t,n,r){var i=this.properties[t];null==i.triggersZOrder||null!=n&&!i.triggersZOrder(n,r)||this._private.cy.notify({type:"zorder",eles:e})}};e.exports=a},function(e,t,n){"use strict";var r=n(0),i=n(1),o={applyBypass:function(e,t,n,o){var a=[];if("*"===t||"**"===t){if(void 0!==n)for(var s=0;sn.length?t.substr(n.length):""}function l(){o=o.length>a.length?o.substr(a.length):""}for(t=t.replace(/[/][*](\s|.)+?[*][/]/g,"");!t.match(/^\s*$/);){var c=t.match(/^\s*((?:.|\s)+?)\s*\{((?:.|\s)+?)\}/);if(!c){r.error("Halting stylesheet parsing: String stylesheet contains more to parse but no selector and block found in: "+t);break}n=c[0];var u=c[1];if("core"!==u&&new i(u)._private.invalid)r.error("Skipping parsing of block: Invalid selector found in string stylesheet: "+u),s();else{var d=c[2],f=!1;o=d;for(var p=[];!o.match(/^\s*$/);){var h=o.match(/^\s*(.+?)\s*:\s*(.+?)\s*;/);if(!h){r.error("Skipping parsing of block: Invalid formatting of style property and value definitions found in:"+d),f=!0;break}a=h[0];var g=h[1],m=h[2];this.properties[g]?this.parse(g,m)?(p.push({name:g,val:m}),l()):(r.error("Skipping property: Invalid property definition in: "+a),l()):(r.error("Skipping property: Invalid property name in: "+a),l())}if(f){s();break}this.selector(u);for(var v=0;v node").css({shape:"rectangle",padding:10,"background-color":"#eee","border-color":"#ccc","border-width":1}).selector("edge").css({width:3,"curve-style":"haystack"}).selector(":parent <-> node").css({"curve-style":"bezier","source-endpoint":"outside-to-line","target-endpoint":"outside-to-line"}).selector(":selected").css({"background-color":"#0169D9","line-color":"#0169D9","source-arrow-color":"#0169D9","target-arrow-color":"#0169D9","mid-source-arrow-color":"#0169D9","mid-target-arrow-color":"#0169D9"}).selector("node:parent:selected").css({"background-color":"#CCE1F9","border-color":"#aec8e5"}).selector(":active").css({"overlay-color":"black","overlay-padding":10,"overlay-opacity":.25}).selector("core").css({"selection-box-color":"#ddd","selection-box-opacity":.65,"selection-box-border-color":"#aaa","selection-box-border-width":1,"active-bg-color":"black","active-bg-opacity":.15,"active-bg-size":30,"outside-texture-bg-color":"#000","outside-texture-bg-opacity":.125}),this.defaultLength=this.length},e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(2),a={parse:function(e,t,n,o){if(i.fn(t))return this.parseImplWarn(e,t,n,o);var a=[e,t,n,"mapping"===o||!0===o||!1===o||null==o?"dontcare":o].join("$"),s=this.propCache=this.propCache||{},l=void 0;return(l=s[a])||(l=s[a]=this.parseImplWarn(e,t,n,o)),(n||"mapping"===o)&&(l=r.copy(l))&&(l.value=r.copy(l.value)),l},parseImplWarn:function(e,t,n,i){var o=this.parseImpl(e,t,n,i);return o||null==t||r.error("The style property `%s: %s` is invalid",e,t),o},parseImpl:function(e,t,n,a){e=r.camel2dash(e);var s=this.properties[e],l=t,c=this.types;if(!s)return null;if(void 0===t)return null;s.alias&&(s=s.pointsTo,e=s.name);var u=i.string(t);u&&(t=t.trim());var d=s.type;if(!d)return null;if(n&&(""===t||null===t))return{name:e,value:t,bypass:!0,deleteBypass:!0};if(i.fn(t))return{name:e,value:t,strValue:"fn",mapped:c.fn,bypass:n};var f=void 0,p=void 0;if(!u||a);else{if(f=new RegExp(c.data.regex).exec(t)){if(n)return!1;var h=c.data;return{name:e,value:f,strValue:""+t,mapped:h,field:f[1],bypass:n}}if(p=new RegExp(c.mapData.regex).exec(t)){if(n)return!1;if(d.multiple)return!1;var g=c.mapData;if(!d.color&&!d.number)return!1;var m=this.parse(e,p[4]);if(!m||m.mapped)return!1;var v=this.parse(e,p[5]);if(!v||v.mapped)return!1;if(m.value===v.value)return!1;if(d.color){var b=m.value,y=v.value;if(!(b[0]!==y[0]||b[1]!==y[1]||b[2]!==y[2]||b[3]!==y[3]&&(null!=b[3]&&1!==b[3]||null!=y[3]&&1!==y[3])))return!1}return{name:e,value:p,strValue:""+t,mapped:g,field:p[1],fieldMin:parseFloat(p[2]),fieldMax:parseFloat(p[3]),valueMin:m.value,valueMax:v.value,bypass:n}}}if(d.multiple&&"multiple"!==a){var x=void 0;if(x=u?t.split(/\s+/):i.array(t)?t:[t],d.evenMultiple&&x.length%2!=0)return null;for(var w=[],k=[],A=[],E=!1,S=0;Sd.max||d.strictMax&&t===d.max))return null;var P={name:e,value:t,strValue:""+t+(_||""),units:_,bypass:n};return d.unitless||"px"!==_&&"em"!==_?P.pfValue=t:P.pfValue="px"!==_&&_?this.getEmSizeInPixels()*t:t,"ms"!==_&&"s"!==_||(P.pfValue="ms"===_?t:1e3*t),"deg"!==_&&"rad"!==_||(P.pfValue="rad"===_?t:o.deg2rad(t)),"%"===_&&(P.pfValue=t/100),P}if(d.propList){var D=[],R=""+t;if("none"===R);else{for(var I=R.split(","),N=0;N0&&s>0&&!isNaN(n.w)&&!isNaN(n.h)&&n.w>0&&n.h>0)return{zoom:l=(l=(l=Math.min((a-2*t)/n.w,(s-2*t)/n.h))>this._private.maxZoom?this._private.maxZoom:l)t.maxZoom?t.maxZoom:s)t.maxZoom||!t.zoomingEnabled?a=!0:(t.zoom=l,o.push("zoom"))}if(i&&(!a||!e.cancelOnFailedZoom)&&t.panningEnabled){var c=e.pan;r.number(c.x)&&(t.pan.x=c.x,s=!1),r.number(c.y)&&(t.pan.y=c.y,s=!1),s||o.push("pan")}return o.length>0&&(o.push("viewport"),this.emit(o.join(" ")),this.notify({type:"viewport"})),this},center:function(e){var t=this.getCenterPan(e);return t&&(this._private.pan=t,this.emit("pan viewport"),this.notify({type:"viewport"})),this},getCenterPan:function(e,t){if(this._private.panningEnabled){if(r.string(e)){var n=e;e=this.mutableElements().filter(n)}else r.elementOrCollection(e)||(e=this.mutableElements());if(0!==e.length){var i=e.boundingBox(),o=this.width(),a=this.height();return{x:(o-(t=void 0===t?this._private.zoom:t)*(i.x1+i.x2))/2,y:(a-t*(i.y1+i.y2))/2}}}},reset:function(){return this._private.panningEnabled&&this._private.zoomingEnabled?(this.viewport({pan:{x:0,y:0},zoom:1}),this):this},invalidateSize:function(){this._private.sizeCache=null},size:function(){var e,t,n=this._private,r=n.container;return n.sizeCache=n.sizeCache||(r?(e=i.getComputedStyle(r),t=function(t){return parseFloat(e.getPropertyValue(t))},{width:r.clientWidth-t("padding-left")-t("padding-right"),height:r.clientHeight-t("padding-top")-t("padding-bottom")}):{width:1,height:1})},width:function(){return this.size().width},height:function(){return this.size().height},extent:function(){var e=this._private.pan,t=this._private.zoom,n=this.renderedExtent(),r={x1:(n.x1-e.x)/t,x2:(n.x2-e.x)/t,y1:(n.y1-e.y)/t,y2:(n.y2-e.y)/t};return r.w=r.x2-r.x1,r.h=r.y2-r.y1,r},renderedExtent:function(){var e=this.width(),t=this.height();return{x1:0,y1:0,x2:e,y2:t,w:e,h:t}}};a.centre=a.center,a.autolockNodes=a.autolock,a.autoungrabifyNodes=a.autoungrabify,e.exports=a},function(e,t,n){"use strict";var r=n(1),i=n(4),o=n(7),a=n(12),s=n(95),l=n(0),c=n(11),u={},d={};function f(e,t,n){var s=n,d=function(n){r.error("Can not register `"+t+"` for `"+e+"` since `"+n+"` already exists in the prototype and can not be overridden")};if("core"===e){if(a.prototype[t])return d(t);a.prototype[t]=n}else if("collection"===e){if(o.prototype[t])return d(t);o.prototype[t]=n}else if("layout"===e){for(var f=function(e){this.options=e,n.call(this,e),l.plainObject(this._private)||(this._private={}),this._private.cy=e.cy,this._private.listeners=[],this.createEmitter()},h=f.prototype=Object.create(n.prototype),g=[],m=0;m0;)m();c=n.collection();for(var v=function(e){var t=h[e],n=t.maxDegree(!1),r=t.filter((function(e){return e.degree(!1)===n}));c=c.add(r)},b=0;by.length-1;)y.push([]);y[J].push(X),Z.depth=J,Z.index=y[J].length-1}N()}var K=0;if(t.avoidOverlap)for(var ee=0;eec||0===t)&&(r+=l/u,i++)}return r/=i=Math.max(1,i),0===i&&(r=void 0),ie[e.id()]=r,r},ae=function(e,t){return oe(e)-oe(t)},se=0;se<3;se++){for(var le=0;le0&&y[0].length<=3?u/2:0),f=2*Math.PI/y[i].length*o;return 0===i&&1===y[0].length&&(d=1),{x:de+d*Math.cos(f),y:fe+d*Math.sin(f)}}return{x:de+(o+1-(a+1)/2)*s,y:(i+1)*c}}var p={x:de+(o+1-(a+1)/2)*s,y:(i+1)*c};return p},he={},ge=y.length-1;ge>=0;ge--)for(var me=y[ge],ve=0;ve1&&t.avoidOverlap){p*=1.75;var b=Math.cos(d)-Math.cos(0),y=Math.sin(d)-Math.sin(0),x=Math.sqrt(p*p/(b*b+y*y));f=Math.max(x,f)}return s.layoutPositions(this,t,(function(e,n){var r=t.startAngle+n*d*(a?1:-1),i=f*Math.cos(r),o=f*Math.sin(r);return{x:c+i,y:u+o}})),this},e.exports=s},function(e,t,n){"use strict";var r=n(1),i=n(2),o={fit:!0,padding:30,startAngle:1.5*Math.PI,sweep:void 0,clockwise:!0,equidistant:!1,minNodeSpacing:10,boundingBox:void 0,avoidOverlap:!0,nodeDimensionsIncludeLabels:!1,height:void 0,width:void 0,spacingFactor:void 0,concentric:function(e){return e.degree()},levelWidth:function(e){return e.maxDegree()/4},animate:!1,animationDuration:500,animationEasing:void 0,animateFilter:function(e,t){return!0},ready:void 0,stop:void 0,transform:function(e,t){return t}};function a(e){this.options=r.extend({},o,e)}a.prototype.run=function(){for(var e=this.options,t=e,n=void 0!==t.counterclockwise?!t.counterclockwise:t.clockwise,r=e.cy,o=t.eles.nodes().not(":parent"),a=i.makeBoundingBox(t.boundingBox?t.boundingBox:{x1:0,y1:0,w:r.width(),h:r.height()}),s=a.x1+a.w/2,l=a.y1+a.h/2,c=[],u=(t.startAngle,0),d=0;d0&&Math.abs(b[0].value-x.value)>=m&&(b=[],v.push(b)),b.push(x)}var w=u+t.minNodeSpacing;if(!t.avoidOverlap){var k=v.length>0&&v[0].length>1,A=(Math.min(a.w,a.h)/2-w)/(v.length+k?1:0);w=Math.min(w,A)}for(var E=0,S=0;S1&&t.avoidOverlap){var O=Math.cos(_)-Math.cos(0),j=Math.sin(_)-Math.sin(0),T=Math.sqrt(w*w/(O*O+j*j));E=Math.max(T,E)}$.r=E,E+=w}if(t.equidistant){for(var P=0,D=0,R=0;R0)var c=(f=r.nodeOverlap*s)*i/(b=Math.sqrt(i*i+o*o)),d=f*o/b;else{var f,p=u(e,i,o),h=u(t,-1*i,-1*o),g=h.x-p.x,m=h.y-p.y,v=g*g+m*m,b=Math.sqrt(v);c=(f=(e.nodeRepulsion+t.nodeRepulsion)/v)*g/b,d=f*m/b}e.isLocked||(e.offsetX-=c,e.offsetY-=d),t.isLocked||(t.offsetX+=c,t.offsetY+=d)}},l=function(e,t,n,r){if(n>0)var i=e.maxX-t.minX;else i=t.maxX-e.minX;if(r>0)var o=e.maxY-t.minY;else o=t.maxY-e.minY;return i>=0&&o>=0?Math.sqrt(i*i+o*o):0},u=function(e,t,n){var r=e.positionX,i=e.positionY,o=e.height||1,a=e.width||1,s=n/t,l=o/a,c={};return 0===t&&0n?(c.x=r,c.y=i+o/2,c):0t&&-1*l<=s&&s<=l?(c.x=r-a/2,c.y=i-a*n/2/t,c):0=l)?(c.x=r+o*t/2/n,c.y=i+o/2,c):0>n&&(s<=-1*l||s>=l)?(c.x=r-o*t/2/n,c.y=i-o/2,c):c},d=function(e,t){for(var n=0;n1){var h=t.gravity*d/p,g=t.gravity*f/p;u.offsetX+=h,u.offsetY+=g}}}}},p=function(e,t){var n=[],r=0,i=-1;for(n.push.apply(n,e.graphSet[0]),i+=e.graphSet[0].length;r<=i;){var o=n[r++],a=e.idToIndex[o],s=e.layoutNodes[a],l=s.children;if(0n)var i={x:n*e/r,y:n*t/r};else i={x:e,y:t};return i},m=function e(t,n){var r=t.parentId;if(null!=r){var i=n.layoutNodes[n.idToIndex[r]],o=!1;return(null==i.maxX||t.maxX+i.padRight>i.maxX)&&(i.maxX=t.maxX+i.padRight,o=!0),(null==i.minX||t.minX-i.padLefti.maxY)&&(i.maxY=t.maxY+i.padBottom,o=!0),(null==i.minY||t.minY-i.padTopg&&(f+=h+t.componentSpacing,d=0,p=0,h=0)}}}(0,i),r})).then((function(e){d.layoutNodes=e.layoutNodes,o.stop(),b()}));var b=function(){!0===e.animate||!1===e.animate?v({force:!0,next:function(){n.one("layoutstop",e.stop),n.emit({type:"layoutstop",layout:n})}}):e.eles.nodes().layoutPositions(n,e,(function(e){var t=d.layoutNodes[d.idToIndex[e.data("id")]];return{x:t.positionX,y:t.positionY}}))};return this},c.prototype.stop=function(){return this.stopped=!0,this.thread&&this.thread.stop(),this.emit("layoutstop"),this},c.prototype.destroy=function(){return this.thread&&this.thread.stop(),this};var u=function(e,t,n){for(var r=n.eles.edges(),i=n.eles.nodes(),s={isCompound:e.hasCompoundNodes(),layoutNodes:[],idToIndex:{},nodeSize:i.size(),graphSet:[],indexToGraph:[],layoutEdges:[],edgeSize:r.size(),temperature:n.initialTemp,clientWidth:e.width(),clientHeight:e.width(),boundingBox:o.makeBoundingBox(n.boundingBox?n.boundingBox:{x1:0,y1:0,w:e.width(),h:e.height()})},l=n.eles.components(),c={},u=0;u0)for(s.graphSet.push(A),u=0;ur.count?0:r.graph},f=function e(t,n,r,i){var o=i.graphSet[r];if(-1a){var h=u(),g=d();(h-1)*g>=a?u(h-1):(g-1)*h>=a&&d(g-1)}else for(;c*l=a?d(v+1):u(m+1)}var b=o.w/c,y=o.h/l;if(t.condense&&(b=0,y=0),t.avoidOverlap)for(var x=0;x=c&&(T=0,j++)},D={},R=0;R(r=i.sqdistToFiniteLine(e,t,w[k],w[k+1],w[k+2],w[k+3])))return b(n,r),!0}else if("bezier"===a.edgeType||"multibezier"===a.edgeType||"self"===a.edgeType||"compound"===a.edgeType)for(w=a.allpts,k=0;k+5(r=i.sqdistToQuadraticBezier(e,t,w[k],w[k+1],w[k+2],w[k+3],w[k+4],w[k+5])))return b(n,r),!0;v=v||o.source,x=x||o.target;var A=l.getArrowWidth(s,u),E=[{name:"source",x:a.arrowStartX,y:a.arrowStartY,angle:a.srcArrowAngle},{name:"target",x:a.arrowEndX,y:a.arrowEndY,angle:a.tgtArrowAngle},{name:"mid-source",x:a.midX,y:a.midY,angle:a.midsrcArrowAngle},{name:"mid-target",x:a.midX,y:a.midY,angle:a.midtgtArrowAngle}];for(k=0;k0&&(y(v),y(x))}function w(e,t,n){return o.getPrefixedProperty(e,t,n)}function k(n,r){var o,a=n._private,s=m;o=r?r+"-":"";var l=n.pstyle(o+"label").value;if("yes"===n.pstyle("text-events").strValue&&l){var c=a.rstyle,u=n.pstyle("text-border-width").pfValue,d=n.pstyle("text-background-padding").pfValue,f=w(c,"labelWidth",r)+u+2*s+2*d,p=w(c,"labelHeight",r)+u+2*s+2*d,h=w(c,"labelX",r),g=w(c,"labelY",r),v=w(a.rscratch,"labelAngle",r),y=h-f/2,x=h+f/2,k=g-p/2,A=g+p/2;if(v){var E=Math.cos(v),S=Math.sin(v),$=function(e,t){return{x:(e-=h)*E-(t-=g)*S+h,y:e*S+t*E+g}},C=$(y,k),_=$(y,A),O=$(x,k),j=$(x,A),T=[C.x,C.y,O.x,O.y,j.x,j.y,_.x,_.y];if(i.pointInsidePolygonPoints(e,t,T))return b(n),!0}else{var P={w:f,h:p,x1:y,x2:x,y1:k,y2:A};if(i.inBoundingBox(P,e,t))return b(n),!0}}}n&&(u=u.interactive);for(var A=u.length-1;A>=0;A--){var E=u[A];E.isNode()?y(E)||k(E):x(E)||k(E)||k(E,"source")||k(E,"target")}return d},getAllInBox:function(e,t,n,r){var o=this.getCachedZSortedEles().interactive,a=[],s=Math.min(e,n),l=Math.max(e,n),c=Math.min(t,r),u=Math.max(t,r);e=s,n=l,t=c,r=u;for(var d=i.makeBoundingBox({x1:e,y1:t,x2:n,y2:r}),f=0;fb?b+"$-$"+v:v+"$-$"+b,g&&(t="unbundled$-$"+h.id);var y=u[t];null==y&&(y=u[t]=[],d.push(t)),y.push(Bt),g&&(y.hasUnbundled=!0),m&&(y.hasBezier=!0)}else f.push(Bt)}for(var x=0;xGt.id()){var k=Ht;Ht=Gt,Gt=k}Wt=Ht.position(),Yt=Gt.position(),Xt=Ht.outerWidth(),Qt=Ht.outerHeight(),Zt=Gt.outerWidth(),Jt=Gt.outerHeight(),n=l.nodeShapes[this.getNodeShape(Ht)],o=l.nodeShapes[this.getNodeShape(Gt)],s=!1;var A={north:0,west:0,south:0,east:0,northwest:0,southwest:0,northeast:0,southeast:0},E=Wt.x,S=Wt.y,$=Xt,C=Qt,_=Yt.x,O=Yt.y,j=Zt,T=Jt,P=w.length;for(p=0;p=d||w){p={cp:b,segment:x};break}}if(p)break}b=p.cp;var k=(d-g)/(x=p.segment).length,A=x.t1-x.t0,E=u?x.t0+A*k:x.t1-A*k;E=r.bound(0,E,1),t=r.qbezierPtAt(b.p0,b.p1,b.p2,E),c=function(e,t,n,i){var o=r.bound(0,i-.001,1),a=r.bound(0,i+.001,1),s=r.qbezierPtAt(e,t,n,o),l=r.qbezierPtAt(e,t,n,a);return f(s,l)}(b.p0,b.p1,b.p2,E);break;case"straight":case"segments":case"haystack":var S,$,C,_,O=0,j=i.allpts.length;for(v=0;v+3=d));v+=2);E=(d-$)/S,E=r.bound(0,E,1),t=r.lineAt(C,_,E),c=f(C,_)}l("labelX",o,t.x),l("labelY",o,t.y),l("labelAutoAngle",o,c)}};c("source"),c("target"),this.applyLabelDimensions(e)}},applyLabelDimensions:function(e){this.applyPrefixedLabelDimensions(e),e.isEdge()&&(this.applyPrefixedLabelDimensions(e,"source"),this.applyPrefixedLabelDimensions(e,"target"))},applyPrefixedLabelDimensions:function(e,t){var n=e._private,r=this.getLabelText(e,t),i=this.calculateLabelDimensions(e,r);o.setPrefixedProperty(n.rstyle,"labelWidth",t,i.width),o.setPrefixedProperty(n.rscratch,"labelWidth",t,i.width),o.setPrefixedProperty(n.rstyle,"labelHeight",t,i.height),o.setPrefixedProperty(n.rscratch,"labelHeight",t,i.height)},getLabelText:function(e,t){var n=e._private,r=t?t+"-":"",i=e.pstyle(r+"label").strValue,a=e.pstyle("text-transform").value,s=function(e,r){return r?(o.setPrefixedProperty(n.rscratch,e,t,r),r):o.getPrefixedProperty(n.rscratch,e,t)};"none"==a||("uppercase"==a?i=i.toUpperCase():"lowercase"==a&&(i=i.toLowerCase()));var l=e.pstyle("text-wrap").value;if("wrap"===l){var c=s("labelKey");if(c&&s("labelWrapKey")===c)return s("labelWrapCachedText");for(var u=i.split("\n"),d=e.pstyle("text-max-width").pfValue,f=[],p=0;pd){for(var g=h.split(/\s+/),m="",v=0;vd);k++)x+=i[k],k===i.length-1&&(w=!0);return w||(x+="…"),x}return i},calculateLabelDimensions:function(e,t,n){var r=e._private.labelStyleKey+"$@$"+t;n&&(r+="$@$"+n);var i=this.labelDimCache||(this.labelDimCache={});if(i[r])return i[r];var o=e.pstyle("font-style").strValue,a=1*e.pstyle("font-size").pfValue+"px",s=e.pstyle("font-family").strValue,l=e.pstyle("font-weight").strValue,c=this.labelCalcDiv;c||(c=this.labelCalcDiv=document.createElement("div"),document.body.appendChild(c));var u=c.style;return u.fontFamily=s,u.fontStyle=o,u.fontSize=a,u.fontWeight=l,u.position="absolute",u.left="-9999px",u.top="-9999px",u.zIndex="-1",u.visibility="hidden",u.pointerEvents="none",u.padding="0",u.lineHeight="1","wrap"===e.pstyle("text-wrap").value?u.whiteSpace="pre":u.whiteSpace="normal",c.textContent=t,i[r]={width:Math.ceil(c.clientWidth/1),height:Math.ceil(c.clientHeight/1)},i[r]},calculateLabelAngles:function(e){var t=e._private.rscratch,n=e.isEdge(),r=e.pstyle("text-rotation"),i=r.strValue;"none"===i?t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle=0:n&&"autorotate"===i?(t.labelAngle=Math.atan(t.midDispY/t.midDispX),t.sourceLabelAngle=t.sourceLabelAutoAngle,t.targetLabelAngle=t.targetLabelAutoAngle):t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle="autorotate"===i?0:r.pfValue}};e.exports=a},function(e,t,n){"use strict";var r={getNodeShape:function(e){var t=e.pstyle("shape").value;if(e.isParent())return"rectangle"===t||"roundrectangle"===t||"cutrectangle"===t||"barrel"===t?t:"rectangle";if("polygon"===t){var n=e.pstyle("shape-polygon-points").value;return this.nodeShapes.makePolygon(n).name}return t}};e.exports=r},function(e,t,n){"use strict";var r={registerCalculationListeners:function(){var e=this.cy,t=e.collection(),n=this,r=function(e,n){var r=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t.merge(e);for(var i=0;i=e.desktopTapThreshold2}var C=n(i);b&&(e.hoverData.tapholdCancelled=!0),s=!0,t(v,["mousemove","vmousemove","tapdrag"],i,{position:{x:p[0],y:p[1]}});var _=function(){e.data.bgActivePosistion=void 0,e.hoverData.selecting||l.emit("boxstart"),m[4]=1,e.hoverData.selecting=!0,e.redrawHint("select",!0),e.redraw()};if(3===e.hoverData.which){if(b){var O={originalEvent:i,type:"cxtdrag",position:{x:p[0],y:p[1]}};x?x.emit(O):l.emit(O),e.hoverData.cxtDragged=!0,e.hoverData.cxtOver&&v===e.hoverData.cxtOver||(e.hoverData.cxtOver&&e.hoverData.cxtOver.emit({originalEvent:i,type:"cxtdragout",position:{x:p[0],y:p[1]}}),e.hoverData.cxtOver=v,v&&v.emit({originalEvent:i,type:"cxtdragover",position:{x:p[0],y:p[1]}}))}}else if(e.hoverData.dragging){if(s=!0,l.panningEnabled()&&l.userPanningEnabled()){var T;if(e.hoverData.justStartedPan){var P=e.hoverData.mdownPos;T={x:(p[0]-P[0])*c,y:(p[1]-P[1])*c},e.hoverData.justStartedPan=!1}else T={x:w[0]*c,y:w[1]*c};l.panBy(T),e.hoverData.dragged=!0}p=e.projectIntoViewport(i.clientX,i.clientY)}else if(1!=m[4]||null!=x&&!x.isEdge()){if(x&&x.isEdge()&&x.active()&&x.unactivate(),x&&x.grabbed()||v==y||(y&&t(y,["mouseout","tapdragout"],i,{position:{x:p[0],y:p[1]}}),v&&t(v,["mouseover","tapdragover"],i,{position:{x:p[0],y:p[1]}}),e.hoverData.last=v),x)if(b){if(l.boxSelectionEnabled()&&C)x&&x.grabbed()&&(f(k),x.emit("free")),_();else if(x&&x.grabbed()&&e.nodeIsDraggable(x)){var D=!e.dragData.didDrag;D&&e.redrawHint("eles",!0),e.dragData.didDrag=!0;var R=[];e.hoverData.draggingEles||u(l.collection(k),{inDragLayer:!0});for(var I=0;I0&&e.redrawHint("eles",!0),e.dragData.possibleDragElements=l=[]),t(s,["mouseup","tapend","vmouseup"],r,{position:{x:o[0],y:o[1]}}),e.dragData.didDrag||e.hoverData.dragged||e.hoverData.selecting||e.hoverData.isOverThresholdDrag||t(c,["click","tap","vclick"],r,{position:{x:o[0],y:o[1]}}),s!=c||e.dragData.didDrag||e.hoverData.selecting||null!=s&&s._private.selectable&&(e.hoverData.dragging||("additive"===i.selectionType()||u?s.selected()?s.unselect():s.select():u||(i.$(":selected").unmerge(s).unselect(),s.select())),e.redrawHint("eles",!0)),e.hoverData.selecting){var h=i.collection(e.getAllInBox(a[0],a[1],a[2],a[3]));e.redrawHint("select",!0),h.length>0&&e.redrawHint("eles",!0),i.emit("boxend");var g=function(e){return e.selectable()&&!e.selected()};"additive"===i.selectionType()||u||i.$(":selected").unmerge(h).unselect(),h.emit("box").stdFilter(g).select().emit("boxselect"),e.redraw()}if(e.hoverData.dragging&&(e.hoverData.dragging=!1,e.redrawHint("select",!0),e.redrawHint("eles",!0),e.redraw()),!a[4]){e.redrawHint("drag",!0),e.redrawHint("eles",!0);var m=c&&c.grabbed();f(l),m&&c.emit("free")}}a[4]=0,e.hoverData.down=null,e.hoverData.cxtStarted=!1,e.hoverData.draggingEles=!1,e.hoverData.selecting=!1,e.hoverData.isOverThresholdDrag=!1,e.dragData.didDrag=!1,e.hoverData.dragged=!1,e.hoverData.dragDelta=[],e.hoverData.mdownPos=null,e.hoverData.mdownGPos=null}}),!1),e.registerBinding(e.container,"wheel",(function(t){if(!e.scrollingPage){var n,r=e.cy,i=e.projectIntoViewport(t.clientX,t.clientY),o=[i[0]*r.zoom()+r.pan().x,i[1]*r.zoom()+r.pan().y];e.hoverData.draggingEles||e.hoverData.dragging||e.hoverData.cxtStarted||0!==e.selection[4]?t.preventDefault():r.panningEnabled()&&r.userPanningEnabled()&&r.zoomingEnabled()&&r.userZoomingEnabled()&&(t.preventDefault(),e.data.wheelZooming=!0,clearTimeout(e.data.wheelTimeout),e.data.wheelTimeout=setTimeout((function(){e.data.wheelZooming=!1,e.redrawHint("eles",!0),e.redraw()}),150),n=null!=t.deltaY?t.deltaY/-250:null!=t.wheelDeltaY?t.wheelDeltaY/1e3:t.wheelDelta/1e3,n*=e.wheelSensitivity,1===t.deltaMode&&(n*=33),r.zoom({level:r.zoom()*Math.pow(10,n),renderedPosition:{x:o[0],y:o[1]}}))}}),!0),e.registerBinding(window,"scroll",(function(t){e.scrollingPage=!0,clearTimeout(e.scrollingPageTimeout),e.scrollingPageTimeout=setTimeout((function(){e.scrollingPage=!1}),250)}),!0),e.registerBinding(e.container,"mouseout",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseout",position:{x:n[0],y:n[1]}})}),!1),e.registerBinding(e.container,"mouseover",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseover",position:{x:n[0],y:n[1]}})}),!1);var T,P,D,R,I=function(e,t,n,r){return Math.sqrt((n-e)*(n-e)+(r-t)*(r-t))},N=function(e,t,n,r){return(n-e)*(n-e)+(r-t)*(r-t)};if(e.registerBinding(e.container,"touchstart",T=function(n){if(j(n)){e.touchData.capture=!0,e.data.bgActivePosistion=void 0;var r=e.cy,i=e.touchData.now,o=e.touchData.earlier;if(n.touches[0]){var a=e.projectIntoViewport(n.touches[0].clientX,n.touches[0].clientY);i[0]=a[0],i[1]=a[1]}if(n.touches[1]&&(a=e.projectIntoViewport(n.touches[1].clientX,n.touches[1].clientY),i[2]=a[0],i[3]=a[1]),n.touches[2]&&(a=e.projectIntoViewport(n.touches[2].clientX,n.touches[2].clientY),i[4]=a[0],i[5]=a[1]),n.touches[1]){f(e.dragData.touchDragEles);var s=e.findContainerClientCoords();S=s[0],$=s[1],C=s[2],_=s[3],v=n.touches[0].clientX-S,b=n.touches[0].clientY-$,y=n.touches[1].clientX-S,x=n.touches[1].clientY-$,O=0<=v&&v<=C&&0<=y&&y<=C&&0<=b&&b<=_&&0<=x&&x<=_;var c=r.pan(),p=r.zoom();if(w=I(v,b,y,x),k=N(v,b,y,x),E=[((A=[(v+y)/2,(b+x)/2])[0]-c.x)/p,(A[1]-c.y)/p],k<4e4&&!n.touches[2]){var h=e.findNearestElement(i[0],i[1],!0,!0),g=e.findNearestElement(i[2],i[3],!0,!0);return h&&h.isNode()?(h.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=h):g&&g.isNode()?(g.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=g):r.emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxt=!0,e.touchData.cxtDragged=!1,e.data.bgActivePosistion=void 0,void e.redraw()}}if(n.touches[2]);else if(n.touches[1]);else if(n.touches[0]){var m=e.findNearestElements(i[0],i[1],!0,!0),T=m[0];if(null!=T&&(T.activate(),e.touchData.start=T,e.touchData.starts=m,e.nodeIsGrabbable(T))){var P=e.dragData.touchDragEles=[],D=null;e.redrawHint("eles",!0),e.redrawHint("drag",!0),T.selected()?(D=r.$((function(t){return t.selected()&&e.nodeIsGrabbable(t)})),u(D,{addToList:P})):d(T,{addToList:P}),l(T);var R=function(e){return{originalEvent:n,type:e,position:{x:i[0],y:i[1]}}};T.emit(R("grabon")),D?D.forEach((function(e){e.emit(R("grab"))})):T.emit(R("grab"))}t(T,["touchstart","tapstart","vmousedown"],n,{position:{x:i[0],y:i[1]}}),null==T&&(e.data.bgActivePosistion={x:a[0],y:a[1]},e.redrawHint("select",!0),e.redraw()),e.touchData.singleTouchMoved=!1,e.touchData.singleTouchStartTime=+new Date,clearTimeout(e.touchData.tapholdTimeout),e.touchData.tapholdTimeout=setTimeout((function(){!1!==e.touchData.singleTouchMoved||e.pinching||e.touchData.selecting||(t(e.touchData.start,["taphold"],n,{position:{x:i[0],y:i[1]}}),e.touchData.start||r.$(":selected").unselect())}),e.tapholdDuration)}if(n.touches.length>=1){for(var M=e.touchData.startPosition=[],z=0;z=e.touchTapThreshold2}if(i&&e.touchData.cxt){n.preventDefault();var D=n.touches[0].clientX-S,R=n.touches[0].clientY-$,M=n.touches[1].clientX-S,z=n.touches[1].clientY-$,L=N(D,R,M,z);if(L/k>=2.25||L>=22500){e.touchData.cxt=!1,e.data.bgActivePosistion=void 0,e.redrawHint("select",!0);var B={originalEvent:n,type:"cxttapend",position:{x:c[0],y:c[1]}};e.touchData.start?(e.touchData.start.unactivate().emit(B),e.touchData.start=null):l.emit(B)}}if(i&&e.touchData.cxt){B={originalEvent:n,type:"cxtdrag",position:{x:c[0],y:c[1]}},e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),e.touchData.start?e.touchData.start.emit(B):l.emit(B),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxtDragged=!0;var F=e.findNearestElement(c[0],c[1],!0,!0);e.touchData.cxtOver&&F===e.touchData.cxtOver||(e.touchData.cxtOver&&e.touchData.cxtOver.emit({originalEvent:n,type:"cxtdragout",position:{x:c[0],y:c[1]}}),e.touchData.cxtOver=F,F&&F.emit({originalEvent:n,type:"cxtdragover",position:{x:c[0],y:c[1]}}))}else if(i&&n.touches[2]&&l.boxSelectionEnabled())n.preventDefault(),e.data.bgActivePosistion=void 0,this.lastThreeTouch=+new Date,e.touchData.selecting||l.emit("boxstart"),e.touchData.selecting=!0,e.redrawHint("select",!0),s&&0!==s.length&&void 0!==s[0]?(s[2]=(c[0]+c[2]+c[4])/3,s[3]=(c[1]+c[3]+c[5])/3):(s[0]=(c[0]+c[2]+c[4])/3,s[1]=(c[1]+c[3]+c[5])/3,s[2]=(c[0]+c[2]+c[4])/3+1,s[3]=(c[1]+c[3]+c[5])/3+1),s[4]=1,e.touchData.selecting=!0,e.redraw();else if(i&&n.touches[1]&&l.zoomingEnabled()&&l.panningEnabled()&&l.userZoomingEnabled()&&l.userPanningEnabled()){if(n.preventDefault(),e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),ee=e.dragData.touchDragEles){e.redrawHint("drag",!0);for(var q=0;q0)return h[0]}return null},p=Object.keys(d),h=0;h0?f:r.roundRectangleIntersectLine(o,a,e,t,n,i,s)},checkPoint:function(e,t,n,i,o,a,s){var l=r.getRoundRectangleRadius(i,o),c=2*l;if(r.pointInsidePolygon(e,t,this.points,a,s,i,o-c,[0,-1],n))return!0;if(r.pointInsidePolygon(e,t,this.points,a,s,i-c,o,[0,-1],n))return!0;var u=i/2+2*n,d=o/2+2*n,f=[a-u,s-d,a-u,s,a+u,s,a+u,s-d];return!!r.pointInsidePolygonPoints(e,t,f)||!!r.checkInEllipse(e,t,c,c,a+i/2-l,s+o/2-l,n)||!!r.checkInEllipse(e,t,c,c,a-i/2+l,s+o/2-l,n)}}},registerNodeShapes:function(){var e=this.nodeShapes={},t=this;this.generateEllipse(),this.generatePolygon("triangle",r.generateUnitNgonPointsFitToSquare(3,0)),this.generatePolygon("rectangle",r.generateUnitNgonPointsFitToSquare(4,0)),e.square=e.rectangle,this.generateRoundRectangle(),this.generateCutRectangle(),this.generateBarrel(),this.generateBottomRoundrectangle(),this.generatePolygon("diamond",[0,1,1,0,0,-1,-1,0]),this.generatePolygon("pentagon",r.generateUnitNgonPointsFitToSquare(5,0)),this.generatePolygon("hexagon",r.generateUnitNgonPointsFitToSquare(6,0)),this.generatePolygon("heptagon",r.generateUnitNgonPointsFitToSquare(7,0)),this.generatePolygon("octagon",r.generateUnitNgonPointsFitToSquare(8,0));var n=new Array(20),i=r.generateUnitNgonPoints(5,0),o=r.generateUnitNgonPoints(5,Math.PI/5),a=.5*(3-Math.sqrt(5));a*=1.57;for(var s=0;s0&&t.data.lyrTxrCache.invalidateElements(n)}))}l.CANVAS_LAYERS=3,l.SELECT_BOX=0,l.DRAG=1,l.NODE=2,l.BUFFER_COUNT=3,l.TEXTURE_BUFFER=0,l.MOTIONBLUR_BUFFER_NODE=1,l.MOTIONBLUR_BUFFER_DRAG=2,l.redrawHint=function(e,t){var n=this;switch(e){case"eles":n.data.canvasNeedsRedraw[l.NODE]=t;break;case"drag":n.data.canvasNeedsRedraw[l.DRAG]=t;break;case"select":n.data.canvasNeedsRedraw[l.SELECT_BOX]=t}};var u="undefined"!=typeof Path2D;l.path2dEnabled=function(e){if(void 0===e)return this.pathsEnabled;this.pathsEnabled=!!e},l.usePaths=function(){return u&&this.pathsEnabled},[n(126),n(127),n(128),n(129),n(130),n(131),n(132),n(133),n(134),n(135)].forEach((function(e){r.extend(l,e)})),e.exports=s},function(e,t,n){"use strict";var r=n(2),i=n(1),o=n(9),a=n(19),s={dequeue:"dequeue",downscale:"downscale",highQuality:"highQuality"},l=function(e){this.renderer=e,this.onDequeues=[],this.setupDequeueing()},c=l.prototype;c.reasons=s,c.getTextureQueue=function(e){return this.eleImgCaches=this.eleImgCaches||{},this.eleImgCaches[e]=this.eleImgCaches[e]||[]},c.getRetiredTextureQueue=function(e){var t=this.eleImgCaches.retired=this.eleImgCaches.retired||{};return t[e]=t[e]||[]},c.getElementQueue=function(){return this.eleCacheQueue=this.eleCacheQueue||new o((function(e,t){return t.reqs-e.reqs}))},c.getElementIdToQueue=function(){return this.eleIdToCacheQueue=this.eleIdToCacheQueue||{}},c.getElement=function(e,t,n,i,o){var a=this,l=this.renderer,c=e._private.rscratch,u=l.cy.zoom();if(0===t.w||0===t.h||!e.visible())return null;if(null==i&&(i=Math.ceil(r.log2(u*n))),i<-4)i=-4;else if(u>=3.99||i>2)return null;var d,f=Math.pow(2,i),p=t.h*f,h=t.w*f,g=c.imgCaches=c.imgCaches||{},m=g[i];if(m)return m;if(d=p<=25?25:p<=50?50:50*Math.ceil(p/50),p>1024||h>1024||e.isEdge()||e.isParent())return null;var v=a.getTextureQueue(d),b=v[v.length-2],y=function(){return a.recycleTexture(d,h)||a.addTexture(d,h)};b||(b=v[v.length-1]),b||(b=y()),b.width-b.usedWidthi;$--)C=a.getElement(e,t,n,$,s.downscale);_()}else{var O;if(!A&&!E&&!S)for($=i-1;$>=-4;$--){var j;if(j=g[$]){O=j;break}}if(k(O))return a.queueElement(e,i),O;b.context.translate(b.usedWidth,0),b.context.scale(f,f),l.drawElement(b.context,e,t,w),b.context.scale(1/f,1/f),b.context.translate(-b.usedWidth,0)}return m=g[i]={ele:e,x:b.usedWidth,texture:b,level:i,scale:f,width:h,height:p,scaledLabelShown:w},b.usedWidth+=Math.ceil(h+8),b.eleCaches.push(m),a.checkTextureFullness(b),m},c.invalidateElement=function(e){var t=e._private.rscratch.imgCaches;if(t)for(var n=-4;n<=2;n++){var r=t[n];if(r){var o=r.texture;o.invalidatedWidth+=r.width,t[n]=null,i.removeFromArray(o.eleCaches,r),this.removeFromQueue(e),this.checkTextureUtility(o)}}},c.checkTextureUtility=function(e){e.invalidatedWidth>=.5*e.width&&this.retireTexture(e)},c.checkTextureFullness=function(e){var t=this.getTextureQueue(e.height);e.usedWidth/e.width>.8&&e.fullnessChecks>=10?i.removeFromArray(t,e):e.fullnessChecks++},c.retireTexture=function(e){var t=e.height,n=this.getTextureQueue(t);i.removeFromArray(n,e),e.retired=!0;for(var r=e.eleCaches,o=0;o=t)return a.retired=!1,a.usedWidth=0,a.invalidatedWidth=0,a.fullnessChecks=0,i.clearArray(a.eleCaches),a.context.setTransform(1,0,0,1,0,0),a.context.clearRect(0,0,a.width,a.height),i.removeFromArray(r,a),n.push(a),a}},c.queueElement=function(e,t){var n=this.getElementQueue(),r=this.getElementIdToQueue(),i=e.id(),o=r[i];if(o)o.level=Math.max(o.level,t),o.reqs++,n.updateItem(o);else{var a={ele:e,level:t,reqs:1};n.push(a),r[i]=a}},c.dequeue=function(e){for(var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=[],i=0;i<1&&t.size()>0;i++){var o=t.pop(),a=o.ele;if(null==a._private.rscratch.imgCaches[o.level]){n[a.id()]=null,r.push(o);var l=a.boundingBox();this.getElement(a,l,e,o.level,s.dequeue)}}return r},c.removeFromQueue=function(e){var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=n[e.id()];null!=r&&(r.reqs=i.MAX_INT,t.updateItem(r),t.pop(),n[e.id()]=null)},c.onDequeue=function(e){this.onDequeues.push(e)},c.offDequeue=function(e){i.removeFromArray(this.onDequeues,e)},c.setupDequeueing=a.setupDequeueing({deqRedrawThreshold:100,deqCost:.15,deqAvgCost:.1,deqNoDrawCost:.9,deqFastCost:.9,deq:function(e,t,n){return e.dequeue(t,n)},onDeqd:function(e,t){for(var n=0;n=3.99||n>2)return null;o.validateLayersElesOrdering(n,e);var l,c,u=o.layersByLevel,d=Math.pow(2,n),f=u[n]=u[n]||[];if(o.levelIsComplete(n,e))return f;!function(){var t=function(t){if(o.validateLayersElesOrdering(t,e),o.levelIsComplete(t,e))return c=u[t],!0},i=function(e){if(!c)for(var r=n+e;-4<=r&&r<=2&&!t(r);r+=e);};i(1),i(-1);for(var a=f.length-1;a>=0;a--){var s=f[a];s.invalid&&r.removeFromArray(f,s)}}();var p=function(t){var r=(t=t||{}).after;if(function(){if(!l){l=i.makeBoundingBox();for(var t=0;t16e6)return null;var a=o.makeLayer(l,n);if(null!=r){var s=f.indexOf(r)+1;f.splice(s,0,a)}else(void 0===t.insert||t.insert)&&f.unshift(a);return a};if(o.skipping&&!s)return null;for(var h=null,g=e.length/1,m=!s,v=0;v=g||!i.boundingBoxInBoundingBox(h.bb,b.boundingBox()))&&!(h=p({insert:!0,after:h})))return null;c||m?o.queueLayer(h,b):o.drawEleInLayer(h,b,n,t),h.eles.push(b),x[n]=h}}return c||(m?null:f)},c.getEleLevelForLayerLevel=function(e,t){return e},c.drawEleInLayer=function(e,t,n,r){var i=this.renderer,o=e.context,a=t.boundingBox();if(0!==a.w&&0!==a.h&&t.visible()){var s=this.eleTxrCache,l=s.reasons.highQuality;n=this.getEleLevelForLayerLevel(n,r);var c=s.getElement(t,a,null,n,l);c?(f(o,!1),o.drawImage(c.texture.canvas,c.x,0,c.width,c.height,a.x1,a.y1,a.w,a.h),f(o,!0)):i.drawElement(o,t)}},c.levelIsComplete=function(e,t){var n=this.layersByLevel[e];if(!n||0===n.length)return!1;for(var r=0,i=0;i0)return!1;if(o.invalid)return!1;r+=o.eles.length}return r===t.length},c.validateLayersElesOrdering=function(e,t){var n=this.layersByLevel[e];if(n)for(var r=0;r0){e=!0;break}}return e},c.invalidateElements=function(e){var t=this;t.lastInvalidationTime=r.performanceNow(),0!==e.length&&t.haveLayers()&&t.updateElementsInLayers(e,(function(e,n,r){t.invalidateLayer(e)}))},c.invalidateLayer=function(e){if(this.lastInvalidationTime=r.performanceNow(),!e.invalid){var t=e.level,n=e.eles,i=this.layersByLevel[t];r.removeFromArray(i,e),e.elesQueue=[],e.invalid=!0,e.replacement&&(e.replacement.invalid=!0);for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:f;e.lineWidth=h,e.lineCap="butt",i.strokeStyle(e,d[0],d[1],d[2],n),i.drawEdgePath(t,e,o.allpts,p)},m=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:f;i.drawArrowheads(e,t,n)};if(e.lineJoin="round","yes"===t.pstyle("ghost").value){var v=t.pstyle("ghost-offset-x").pfValue,b=t.pstyle("ghost-offset-y").pfValue,y=t.pstyle("ghost-opacity").value,x=f*y;e.translate(v,b),g(x),m(x),e.translate(-v,-b)}g(),m(),function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c;e.lineWidth=l,"self"!==o.edgeType||a?e.lineCap="round":e.lineCap="butt",i.strokeStyle(e,u[0],u[1],u[2],n),i.drawEdgePath(t,e,o.allpts,"solid")}(),i.drawElementText(e,t,r),n&&e.translate(s.x1,s.y1)}},drawEdgePath:function(e,t,n,r){var i=e._private.rscratch,o=t,a=void 0,s=!1,l=this.usePaths();if(l){var c=n.join("$");i.pathCacheKey&&i.pathCacheKey===c?(a=t=i.pathCache,s=!0):(a=t=new Path2D,i.pathCacheKey=c,i.pathCache=a)}if(o.setLineDash)switch(r){case"dotted":o.setLineDash([1,1]);break;case"dashed":o.setLineDash([6,3]);break;case"solid":o.setLineDash([])}if(!s&&!i.badLine)switch(t.beginPath&&t.beginPath(),t.moveTo(n[0],n[1]),i.edgeType){case"bezier":case"self":case"compound":case"multibezier":if(e.hasClass("horizontal")){var u=n[4],d=n[5],f=(n[0]+n[4])/2;t.lineTo(n[0]+10,n[1]),t.bezierCurveTo(f,n[1],f,n[5],n[4]-10,n[5]),t.lineTo(u,d)}else if(e.hasClass("vertical")){var p=n[4],h=n[5],g=(n[1]+n[5])/2;t.bezierCurveTo(n[0],g,n[4],g,n[4],n[5]-10),t.lineTo(p,h)}else for(var m=2;m+30||j>0&&O>0){var P=f-T;switch(k){case"left":P-=m;break;case"center":P-=m/2}var D=p-v-T,R=m+2*T,I=v+2*T;if(_>0){var N=e.fillStyle,M=t.pstyle("text-background-color").value;e.fillStyle="rgba("+M[0]+","+M[1]+","+M[2]+","+_*o+")","roundrectangle"==t.pstyle("text-background-shape").strValue?(s=P,l=D,c=R,u=I,d=(d=2)||5,(a=e).beginPath(),a.moveTo(s+d,l),a.lineTo(s+c-d,l),a.quadraticCurveTo(s+c,l,s+c,l+d),a.lineTo(s+c,l+u-d),a.quadraticCurveTo(s+c,l+u,s+c-d,l+u),a.lineTo(s+d,l+u),a.quadraticCurveTo(s,l+u,s,l+u-d),a.lineTo(s,l+d),a.quadraticCurveTo(s,l,s+d,l),a.closePath(),a.fill()):e.fillRect(P,D,R,I),e.fillStyle=N}if(j>0&&O>0){var z=e.strokeStyle,L=e.lineWidth,B=t.pstyle("text-border-color").value,F=t.pstyle("text-border-style").value;if(e.strokeStyle="rgba("+B[0]+","+B[1]+","+B[2]+","+O*o+")",e.lineWidth=j,e.setLineDash)switch(F){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"double":e.lineWidth=j/4,e.setLineDash([]);break;case"solid":e.setLineDash([])}if(e.strokeRect(P,D,R,I),"double"===F){var q=j/2;e.strokeRect(P+q,D+q,R-2*q,I-2*q)}e.setLineDash&&e.setLineDash([]),e.lineWidth=L,e.strokeStyle=z}}var V=2*t.pstyle("text-outline-width").pfValue;if(V>0&&(e.lineWidth=V),"wrap"===t.pstyle("text-wrap").value){var U=r.getPrefixedProperty(i,"labelWrapCachedLines",n),H=v/U.length;switch(A){case"top":p-=(U.length-1)*H;break;case"center":case"bottom":p-=(U.length-1)*H}for(var G=0;G0&&e.strokeText(U[G],f,p),e.fillText(U[G],f,p),p+=H}else V>0&&e.strokeText(h,f,p),e.fillText(h,f,p);0!==E&&(e.rotate(-E),e.translate(-$,-C))}}},e.exports=o},function(e,t,n){"use strict";var r=n(0),i={drawNode:function(e,t,n,i){var o,a,s=this,l=t._private,c=l.rscratch,u=t.position();if(r.number(u.x)&&r.number(u.y)&&t.visible()){var d=t.effectiveOpacity(),f=s.usePaths(),p=void 0,h=!1,g=t.padding();o=t.width()+2*g,a=t.height()+2*g;var m=void 0;n&&(m=n,e.translate(-m.x1,-m.y1));for(var v=t.pstyle("background-image").value,b=new Array(v.length),y=new Array(v.length),x=0,w=0;w0&&void 0!==arguments[0]?arguments[0]:C;s.fillStyle(e,$[0],$[1],$[2],t)},P=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:j;s.strokeStyle(e,_[0],_[1],_[2],t)},D=t.pstyle("shape").strValue,R=t.pstyle("shape-polygon-points").pfValue;if(f){var I=D+"$"+o+"$"+a+("polygon"===D?"$"+R.join("$"):"");e.translate(u.x,u.y),c.pathCacheKey===I?(p=c.pathCache,h=!0):(p=new Path2D,c.pathCacheKey=I,c.pathCache=p)}var N,M,z,L=function(){if(!h){var n=u;f&&(n={x:0,y:0}),s.nodeShapes[s.getNodeShape(t)].draw(p||e,n.x,n.y,o,a)}f?e.fill(p):e.fill()},B=function(){for(var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,r=l.backgrounding,i=0,o=0;o0&&void 0!==arguments[0]&&arguments[0],r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:d;s.hasPie(t)&&(s.drawPie(e,t,r),n&&(f||s.nodeShapes[s.getNodeShape(t)].draw(e,u.x,u.y,o,a)))},q=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,n=(E>0?E:-E)*t,r=E>0?0:255;0!==E&&(s.fillStyle(e,r,r,r,n),f?e.fill(p):e.fill())},V=function(){if(S>0){if(e.lineWidth=S,e.lineCap="butt",e.setLineDash)switch(O){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"solid":case"double":e.setLineDash([])}if(f?e.stroke(p):e.stroke(),"double"===O){e.lineWidth=S/3;var t=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",f?e.stroke(p):e.stroke(),e.globalCompositeOperation=t}e.setLineDash&&e.setLineDash([])}};if("yes"===t.pstyle("ghost").value){var U=t.pstyle("ghost-offset-x").pfValue,H=t.pstyle("ghost-offset-y").pfValue,G=t.pstyle("ghost-opacity").value,W=G*d;e.translate(U,H),T(G*C),L(),B(W),F(0!==E||0!==S),q(W),P(G*j),V(),e.translate(-U,-H)}T(),L(),B(),F(0!==E||0!==S),q(),P(),V(),f&&e.translate(-u.x,-u.y),s.drawElementText(e,t,i),N=t.pstyle("overlay-padding").pfValue,M=t.pstyle("overlay-opacity").value,z=t.pstyle("overlay-color").value,M>0&&(s.fillStyle(e,z[0],z[1],z[2],M),s.nodeShapes.roundrectangle.draw(e,u.x,u.y,o+2*N,a+2*N),e.fill()),n&&e.translate(m.x1,m.y1)}},hasPie:function(e){return(e=e[0])._private.hasPie},drawPie:function(e,t,n,r){t=t[0],r=r||t.position();var i=t.cy().style(),o=t.pstyle("pie-size"),a=r.x,s=r.y,l=t.width(),c=t.height(),u=Math.min(l,c)/2,d=0;this.usePaths()&&(a=0,s=0),"%"===o.units?u*=o.pfValue:void 0!==o.pfValue&&(u=o.pfValue/2);for(var f=1;f<=i.pieBackgroundN;f++){var p=t.pstyle("pie-"+f+"-background-size").value,h=t.pstyle("pie-"+f+"-background-color").value,g=t.pstyle("pie-"+f+"-background-opacity").value*n,m=p/100;m+d>1&&(m=1-d);var v=1.5*Math.PI+2*Math.PI*d,b=v+2*Math.PI*m;0===p||d>=1||d+m>1||(e.beginPath(),e.moveTo(a,s),e.arc(a,s,u,v,b),e.closePath(),this.fillStyle(e,h[0],h[1],h[2],g),e.fill(),d+=m)}}};e.exports=i},function(e,t,n){"use strict";var r={},i=n(1);r.getPixelRatio=function(){var e=this.data.contexts[0];if(null!=this.forcedPixelRatio)return this.forcedPixelRatio;var t=e.backingStorePixelRatio||e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return(window.devicePixelRatio||1)/t},r.paintCache=function(e){for(var t,n=this.paintCaches=this.paintCaches||[],r=!0,i=0;is.minMbLowQualFrames&&(s.motionBlurPxRatio=s.mbPxRBlurry)),s.clearingMotionBlur&&(s.motionBlurPxRatio=1),s.textureDrawLastFrame&&!f&&(d[s.NODE]=!0,d[s.SELECT_BOX]=!0);var y=c.style()._private.coreStyle,x=c.zoom(),w=void 0!==o?o:x,k=c.pan(),A={x:k.x,y:k.y},E={zoom:x,pan:{x:k.x,y:k.y}},S=s.prevViewport;void 0===S||E.zoom!==S.zoom||E.pan.x!==S.pan.x||E.pan.y!==S.pan.y||m&&!g||(s.motionBlurPxRatio=1),a&&(A=a),w*=l,A.x*=l,A.y*=l;var $=s.getCachedZSortedEles();function C(e,t,n,r,i){var o=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",s.fillStyle(e,255,255,255,s.motionBlurTransparency),e.fillRect(t,n,r,i),e.globalCompositeOperation=o}function _(e,r){var i,l,c,d;s.clearingMotionBlur||e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]&&e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]?(i=A,l=w,c=s.canvasWidth,d=s.canvasHeight):(i={x:k.x*h,y:k.y*h},l=x*h,c=s.canvasWidth*h,d=s.canvasHeight*h),e.setTransform(1,0,0,1,0,0),"motionBlur"===r?C(e,0,0,c,d):t||void 0!==r&&!r||e.clearRect(0,0,c,d),n||(e.translate(i.x,i.y),e.scale(l,l)),a&&e.translate(a.x,a.y),o&&e.scale(o,o)}if(f||(s.textureDrawLastFrame=!1),f){if(s.textureDrawLastFrame=!0,!s.textureCache){s.textureCache={},s.textureCache.bb=c.mutableElements().boundingBox(),s.textureCache.texture=s.data.bufferCanvases[s.TEXTURE_BUFFER];var O=s.data.bufferContexts[s.TEXTURE_BUFFER];O.setTransform(1,0,0,1,0,0),O.clearRect(0,0,s.canvasWidth*s.textureMult,s.canvasHeight*s.textureMult),s.render({forcedContext:O,drawOnlyNodeLayer:!0,forcedPxRatio:l*s.textureMult}),(E=s.textureCache.viewport={zoom:c.zoom(),pan:c.pan(),width:s.canvasWidth,height:s.canvasHeight}).mpan={x:(0-E.pan.x)/E.zoom,y:(0-E.pan.y)/E.zoom}}d[s.DRAG]=!1,d[s.NODE]=!1;var j=u.contexts[s.NODE],T=s.textureCache.texture;E=s.textureCache.viewport,s.textureCache.bb,j.setTransform(1,0,0,1,0,0),p?C(j,0,0,E.width,E.height):j.clearRect(0,0,E.width,E.height);var P=y["outside-texture-bg-color"].value,D=y["outside-texture-bg-opacity"].value;s.fillStyle(j,P[0],P[1],P[2],D),j.fillRect(0,0,E.width,E.height),x=c.zoom(),_(j,!1),j.clearRect(E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l),j.drawImage(T,E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l)}else s.textureOnViewport&&!t&&(s.textureCache=null);var R=c.extent(),I=s.pinching||s.hoverData.dragging||s.swipePanning||s.data.wheelZooming||s.hoverData.draggingEles,N=s.hideEdgesOnViewport&&I,M=[];if(M[s.NODE]=!d[s.NODE]&&p&&!s.clearedForMotionBlur[s.NODE]||s.clearingMotionBlur,M[s.NODE]&&(s.clearedForMotionBlur[s.NODE]=!0),M[s.DRAG]=!d[s.DRAG]&&p&&!s.clearedForMotionBlur[s.DRAG]||s.clearingMotionBlur,M[s.DRAG]&&(s.clearedForMotionBlur[s.DRAG]=!0),d[s.NODE]||n||r||M[s.NODE]){var z=p&&!M[s.NODE]&&1!==h;_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]:u.contexts[s.NODE]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.nondrag,l,R):s.drawLayeredElements(j,$.nondrag,l,R),s.debug&&s.drawDebugPoints(j,$.nondrag),n||p||(d[s.NODE]=!1)}if(!r&&(d[s.DRAG]||n||M[s.DRAG])&&(z=p&&!M[s.DRAG]&&1!==h,_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]:u.contexts[s.DRAG]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.drag,l,R):s.drawCachedElements(j,$.drag,l,R),s.debug&&s.drawDebugPoints(j,$.drag),n||p||(d[s.DRAG]=!1)),s.showFps||!r&&d[s.SELECT_BOX]&&!n){if(_(j=t||u.contexts[s.SELECT_BOX]),1==s.selection[4]&&(s.hoverData.selecting||s.touchData.selecting)){x=s.cy.zoom();var L=y["selection-box-border-width"].value/x;j.lineWidth=L,j.fillStyle="rgba("+y["selection-box-color"].value[0]+","+y["selection-box-color"].value[1]+","+y["selection-box-color"].value[2]+","+y["selection-box-opacity"].value+")",j.fillRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]),L>0&&(j.strokeStyle="rgba("+y["selection-box-border-color"].value[0]+","+y["selection-box-border-color"].value[1]+","+y["selection-box-border-color"].value[2]+","+y["selection-box-opacity"].value+")",j.strokeRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]))}if(u.bgActivePosistion&&!s.hoverData.selecting){x=s.cy.zoom();var B=u.bgActivePosistion;j.fillStyle="rgba("+y["active-bg-color"].value[0]+","+y["active-bg-color"].value[1]+","+y["active-bg-color"].value[2]+","+y["active-bg-opacity"].value+")",j.beginPath(),j.arc(B.x,B.y,y["active-bg-size"].pfValue/x,0,2*Math.PI),j.fill()}var F=s.lastRedrawTime;if(s.showFps&&F){F=Math.round(F);var q=Math.round(1e3/F);j.setTransform(1,0,0,1,0,0),j.fillStyle="rgba(255, 0, 0, 0.75)",j.strokeStyle="rgba(255, 0, 0, 0.75)",j.lineWidth=1,j.fillText("1 frame = "+F+" ms = "+q+" fps",0,20),j.strokeRect(0,30,250,20),j.fillRect(0,30,250*Math.min(q/60,1),20)}n||(d[s.SELECT_BOX]=!1)}if(p&&1!==h){var V=u.contexts[s.NODE],U=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_NODE],H=u.contexts[s.DRAG],G=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_DRAG],W=function(e,t,n){e.setTransform(1,0,0,1,0,0),n||!b?e.clearRect(0,0,s.canvasWidth,s.canvasHeight):C(e,0,0,s.canvasWidth,s.canvasHeight);var r=h;e.drawImage(t,0,0,s.canvasWidth*r,s.canvasHeight*r,0,0,s.canvasWidth,s.canvasHeight)};(d[s.NODE]||M[s.NODE])&&(W(V,U,M[s.NODE]),d[s.NODE]=!1),(d[s.DRAG]||M[s.DRAG])&&(W(H,G,M[s.DRAG]),d[s.DRAG]=!1)}s.prevViewport=E,s.clearingMotionBlur&&(s.clearingMotionBlur=!1,s.motionBlurCleared=!0,s.motionBlur=!0),p&&(s.motionBlurTimeout=setTimeout((function(){s.motionBlurTimeout=null,s.clearedForMotionBlur[s.NODE]=!1,s.clearedForMotionBlur[s.DRAG]=!1,s.motionBlur=!1,s.clearingMotionBlur=!f,s.mbFrames=0,d[s.NODE]=!0,d[s.DRAG]=!0,s.redraw()}),100)),t||c.emit("render")},e.exports=r},function(e,t,n){"use strict";for(var r=n(2),i={drawPolygonPath:function(e,t,n,r,i,o){var a=r/2,s=i/2;e.beginPath&&e.beginPath(),e.moveTo(t+a*o[0],n+s*o[1]);for(var l=1;l0&&a>0){p.clearRect(0,0,o,a),p.globalCompositeOperation="source-over";var h=this.getCachedZSortedEles();if(e.full)p.translate(-n.x1*c,-n.y1*c),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(n.x1*c,n.y1*c);else{var g=t.pan(),m={x:g.x*c,y:g.y*c};c*=t.zoom(),p.translate(m.x,m.y),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(-m.x,-m.y)}e.bg&&(p.globalCompositeOperation="destination-over",p.fillStyle=e.bg,p.rect(0,0,o,a),p.fill())}return f},i.png=function(e){return a(e,this.bufferCanvasImage(e),"image/png")},i.jpg=function(e){return a(e,this.bufferCanvasImage(e),"image/jpeg")},e.exports=i},function(e,t,n){"use strict";var r={nodeShapeImpl:function(e,t,n,r,i,o,a){switch(e){case"ellipse":return this.drawEllipsePath(t,n,r,i,o);case"polygon":return this.drawPolygonPath(t,n,r,i,o,a);case"roundrectangle":return this.drawRoundRectanglePath(t,n,r,i,o);case"cutrectangle":return this.drawCutRectanglePath(t,n,r,i,o);case"bottomroundrectangle":return this.drawBottomRoundRectanglePath(t,n,r,i,o);case"barrel":return this.drawBarrelPath(t,n,r,i,o)}}};e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(18),a=function e(){if(!(this instanceof e))return new e;this.length=0},s=a.prototype;s.instanceString=function(){return"stylesheet"},s.selector=function(e){return this[this.length++]={selector:e,properties:[]},this},s.css=function(e,t){var n=this.length-1;if(r.string(e))this[n].properties.push({name:e,value:t});else if(r.plainObject(e))for(var a=e,s=0;s=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(239),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(35))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,i,o,a,s,l=1,c={},u=!1,d=e.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(e);f=f&&f.setTimeout?f:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((o=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){o.port2.postMessage(e)}):d&&"onreadystatechange"in d.createElement("script")?(i=d.documentElement,r=function(e){var t=d.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,i.removeChild(t),t=null},i.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",s=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",s,!1):e.attachEvent("onmessage",s),r=function(t){e.postMessage(a+t,"*")}),f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n=t||n<0||m&&e-c>=o}function w(){var e=p();if(x(e))return k(e);s=setTimeout(w,function(e){var n=t-(e-l);return m?f(n,o-(e-c)):n}(e))}function k(e){return s=void 0,v&&r?b(e):(r=i=void 0,a)}function A(){var e=p(),n=x(e);if(r=arguments,i=this,l=e,n){if(void 0===s)return y(l);if(m)return s=setTimeout(w,t),b(l)}return void 0===s&&(s=setTimeout(w,t)),a}return t=g(t)||0,h(n)&&(u=!!n.leading,o=(m="maxWait"in n)?d(g(n.maxWait)||0,t):o,v="trailing"in n?!!n.trailing:v),A.cancel=function(){void 0!==s&&clearTimeout(s),c=0,r=l=i=s=void 0},A.flush=function(){return void 0===s?a:k(p())},A}}).call(this,n(35))},function(e,t,n){e.exports=n(243)},function(e,t,n){var r,i,o;(function(){var n,a,s,l,c,u,d,f,p,h,g,m,v,b,y;s=Math.floor,h=Math.min,a=function(e,t){return et?1:0},p=function(e,t,n,r,i){var o;if(null==n&&(n=0),null==i&&(i=a),n<0)throw new Error("lo must be non-negative");for(null==r&&(r=e.length);nn;0<=n?t++:t--)c.push(t);return c}.apply(this).reverse()).length;rg;0<=g?++u:--u)m.push(c(e,n));return m},b=function(e,t,n,r){var i,o,s;for(null==r&&(r=a),i=e[n];n>t&&r(i,o=e[s=n-1>>1])<0;)e[n]=o,n=s;return e[n]=i},y=function(e,t,n){var r,i,o,s,l;for(null==n&&(n=a),i=e.length,l=t,o=e[t],r=2*t+1;r'+e.content+"":s+=">"+e.content+"";var l=t(s);return l.data("selector",e.selector),l.data("on-click-function",e.onClickFunction),l.data("show",void 0===e.show||e.show),l}function y(){var e;l("active")&&(e=s.children(),t(e).each((function(){x(t(this))})),i.off("tapstart",n),s.remove(),c(s=void 0,void 0),c("active",!1),c("anyVisibleChild",!1))}function x(e){var n="string"==typeof e?t("#"+e):e,r=n.data("cy-context-menus-cxtfcn"),o=n.data("selector"),a=n.data("call-on-click-function"),s=n.data("cy-context-menus-cxtcorefcn");r&&i.off("cxttap",o,r),s&&i.off("cxttap",s),a&&n.off("click",a),n.remove()}"get"!==e&&(c("options",a=function(e,t){var n={};for(var r in e)n[r]=e[r];for(var r in t)n[r]=t[r];return n}(r,e)),l("active")&&y(),c("active",!0),o=u(a.contextMenuClasses),(s=t("
    ")).addClass("cy-context-menus-cxt-menu"),c("cxtMenu",s),t("body").append(s),s=s,g(a.menuItems),i.on("tapstart",n=function(){f(s),c("cxtMenuPosition",void 0),c("currentCyEvent",void 0)}),t(".cy-context-menus-cxt-menu").contextmenu((function(){return!1})));return function(e){return{isActive:function(){return l("active")},appendMenuItem:function(t){return m(t),e},appendMenuItems:function(t){return g(t),e},removeMenuItem:function(t){return x(t),e},setTrailingDivider:function(n,r){return function(e,n){var r=t("#"+e);n?r.addClass("cy-context-menus-divider"):r.removeClass("cy-context-menus-divider")}(n,r),e},insertBeforeMenuItem:function(t,n){return v(t,n),e},moveBeforeOtherMenuItem:function(n,r){return function(e,n){if(e!==n){var r=t("#"+e).detach(),i=t("#"+n);r.insertBefore(i)}}(n,r),e},disableMenuItem:function(n){return t("#"+n).attr("disabled",!0),e},enableMenuItem:function(n){return t("#"+n).attr("disabled",!1),e},hideMenuItem:function(n){return t("#"+n).data("show",!1),f(t("#"+n)),e},showMenuItem:function(n){return t("#"+n).data("show",!0),d(t("#"+n)),e},destroy:function(){return y(),e}}}(this)}))}};e.exports&&(e.exports=o),void 0===(r=function(){return o}.call(t,n,t,e))||(e.exports=r),"undefined"!=typeof cytoscape&&i&&o(cytoscape,i)}()},function(e,t,n){var r;r=function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){var r=n(1),i=function(e){e&&e("layout","dagre",r)};"undefined"!=typeof cytoscape&&i(cytoscape),e.exports=i},function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=n(2),o=n(3),a=n(4);function s(e){this.options=o({},i,e)}s.prototype.run=function(){var e=this.options,t=e.cy,n=e.eles,i=function(e,t){return"function"==typeof t?t.apply(e,[e]):t},o=e.boundingBox||{x1:0,y1:0,w:t.width(),h:t.height()};void 0===o.x2&&(o.x2=o.x1+o.w),void 0===o.w&&(o.w=o.x2-o.x1),void 0===o.y2&&(o.y2=o.y1+o.h),void 0===o.h&&(o.h=o.y2-o.y1);var s=new a.graphlib.Graph({multigraph:!0,compound:!0}),l={},c=function(e,t){null!=t&&(l[e]=t)};c("nodesep",e.nodeSep),c("edgesep",e.edgeSep),c("ranksep",e.rankSep),c("rankdir",e.rankDir),c("ranker",e.ranker),s.setGraph(l),s.setDefaultEdgeLabel((function(){return{}})),s.setDefaultNodeLabel((function(){return{}}));for(var u=n.nodes(),d=0;d1?t-1:0),r=1;r-1}},function(e,t,n){var r=n(75);e.exports=function(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}},function(e,t,n){var r=n(74);e.exports=function(){this.__data__=new r,this.size=0}},function(e,t){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},function(e,t){e.exports=function(e){return this.__data__.get(e)}},function(e,t){e.exports=function(e){return this.__data__.has(e)}},function(e,t,n){var r=n(74),i=n(117),o=n(118);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(e,t),this.size=n.size,this}},function(e,t,n){var r=n(64),i=n(262),o=n(23),a=n(151),s=/^\[object .+?Constructor\]$/,l=Function.prototype,c=Object.prototype,u=l.toString,d=c.hasOwnProperty,f=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!o(e)||i(e))&&(r(e)?f:s).test(a(e))}},function(e,t,n){var r=n(58),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i}},function(e,t){var n=Object.prototype.toString;e.exports=function(e){return n.call(e)}},function(e,t,n){var r,i=n(263),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";e.exports=function(e){return!!o&&o in e}},function(e,t,n){var r=n(29)["__core-js_shared__"];e.exports=r},function(e,t){e.exports=function(e,t){return null==e?void 0:e[t]}},function(e,t,n){var r=n(266),i=n(74),o=n(117);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(e,t,n){var r=n(267),i=n(268),o=n(269),a=n(270),s=n(271);function l(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}},function(e,t,n){var r=n(173),i=n(340),o=n(344),a=n(174),s=n(345),l=n(129);e.exports=function(e,t,n){var c=-1,u=i,d=e.length,f=!0,p=[],h=p;if(n)f=!1,u=o;else if(d>=200){var g=t?null:s(e);if(g)return l(g);f=!1,u=a,h=new r}else h=t?[]:p;e:for(;++c-1}},function(e,t,n){var r=n(188),i=n(342),o=n(343);e.exports=function(e,t,n){return t==t?o(e,t,n):r(e,i,n)}},function(e,t){e.exports=function(e){return e!=e}},function(e,t){e.exports=function(e,t,n){for(var r=n-1,i=e.length;++r1||1===t.length&&e.hasEdge(t[0],t[0])}))}},function(e,t,n){var r=n(22);e.exports=function(e,t,n){return function(e,t,n){var r={},i=e.nodes();return i.forEach((function(e){r[e]={},r[e][e]={distance:0},i.forEach((function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})})),n(e).forEach((function(n){var i=n.v===e?n.w:n.v,o=t(n);r[e][i]={distance:o,predecessor:e}}))})),i.forEach((function(e){var t=r[e];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[e],i=t[n],a=o[n],s=r.distance+i.distance;s0;){if(n=l.removeMin(),r.has(s,n))a.setEdge(n,s[n]);else{if(u)throw new Error("Input graph is not connected: "+e);u=!0}e.nodeEdges(n).forEach(c)}return a}},function(e,t,n){"use strict";var r=n(11),i=n(399),o=n(402),a=n(403),s=n(20).normalizeRanks,l=n(405),c=n(20).removeEmptyRanks,u=n(406),d=n(407),f=n(408),p=n(409),h=n(418),g=n(20),m=n(28).Graph;e.exports=function(e,t){var n=t&&t.debugTiming?g.time:g.notime;n("layout",(function(){var t=n(" buildLayoutGraph",(function(){return function(e){var t=new m({multigraph:!0,compound:!0}),n=$(e.graph());return t.setGraph(r.merge({},b,S(n,v),r.pick(n,y))),r.forEach(e.nodes(),(function(n){var i=$(e.node(n));t.setNode(n,r.defaults(S(i,x),w)),t.setParent(n,e.parent(n))})),r.forEach(e.edges(),(function(n){var i=$(e.edge(n));t.setEdge(n,r.merge({},A,S(i,k),r.pick(i,E)))})),t}(e)}));n(" runLayout",(function(){!function(e,t){t(" makeSpaceForEdgeLabels",(function(){!function(e){var t=e.graph();t.ranksep/=2,r.forEach(e.edges(),(function(n){var r=e.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===t.rankdir||"BT"===t.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(e)})),t(" removeSelfEdges",(function(){!function(e){r.forEach(e.edges(),(function(t){if(t.v===t.w){var n=e.node(t.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:t,label:e.edge(t)}),e.removeEdge(t)}}))}(e)})),t(" acyclic",(function(){i.run(e)})),t(" nestingGraph.run",(function(){u.run(e)})),t(" rank",(function(){a(g.asNonCompoundGraph(e))})),t(" injectEdgeLabelProxies",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(n.width&&n.height){var r=e.node(t.v),i={rank:(e.node(t.w).rank-r.rank)/2+r.rank,e:t};g.addDummyNode(e,"edge-proxy",i,"_ep")}}))}(e)})),t(" removeEmptyRanks",(function(){c(e)})),t(" nestingGraph.cleanup",(function(){u.cleanup(e)})),t(" normalizeRanks",(function(){s(e)})),t(" assignRankMinMax",(function(){!function(e){var t=0;r.forEach(e.nodes(),(function(n){var i=e.node(n);i.borderTop&&(i.minRank=e.node(i.borderTop).rank,i.maxRank=e.node(i.borderBottom).rank,t=r.max(t,i.maxRank))})),e.graph().maxRank=t}(e)})),t(" removeEdgeLabelProxies",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);"edge-proxy"===n.dummy&&(e.edge(n.e).labelRank=n.rank,e.removeNode(t))}))}(e)})),t(" normalize.run",(function(){o.run(e)})),t(" parentDummyChains",(function(){l(e)})),t(" addBorderSegments",(function(){d(e)})),t(" order",(function(){p(e)})),t(" insertSelfEdges",(function(){!function(e){var t=g.buildLayerMatrix(e);r.forEach(t,(function(t){var n=0;r.forEach(t,(function(t,i){var o=e.node(t);o.order=i+n,r.forEach(o.selfEdges,(function(t){g.addDummyNode(e,"selfedge",{width:t.label.width,height:t.label.height,rank:o.rank,order:i+ ++n,e:t.e,label:t.label},"_se")})),delete o.selfEdges}))}))}(e)})),t(" adjustCoordinateSystem",(function(){f.adjust(e)})),t(" position",(function(){h(e)})),t(" positionSelfEdges",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);if("selfedge"===n.dummy){var r=e.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,s=r.height/2;e.setEdge(n.e,n.label),e.removeNode(t),n.label.points=[{x:i+2*a/3,y:o-s},{x:i+5*a/6,y:o-s},{x:i+a,y:o},{x:i+5*a/6,y:o+s},{x:i+2*a/3,y:o+s}],n.label.x=n.x,n.label.y=n.y}}))}(e)})),t(" removeBorderNodes",(function(){!function(e){r.forEach(e.nodes(),(function(t){if(e.children(t).length){var n=e.node(t),i=e.node(n.borderTop),o=e.node(n.borderBottom),a=e.node(r.last(n.borderLeft)),s=e.node(r.last(n.borderRight));n.width=Math.abs(s.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(e.nodes(),(function(t){"border"===e.node(t).dummy&&e.removeNode(t)}))}(e)})),t(" normalize.undo",(function(){o.undo(e)})),t(" fixupEdgeLabelCoords",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(e)})),t(" undoCoordinateSystem",(function(){f.undo(e)})),t(" translateGraph",(function(){!function(e){var t=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=e.graph(),s=a.marginx||0,l=a.marginy||0;function c(e){var r=e.x,a=e.y,s=e.width,l=e.height;t=Math.min(t,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,a-l/2),o=Math.max(o,a+l/2)}r.forEach(e.nodes(),(function(t){c(e.node(t))})),r.forEach(e.edges(),(function(t){var n=e.edge(t);r.has(n,"x")&&c(n)})),t-=s,i-=l,r.forEach(e.nodes(),(function(n){var r=e.node(n);r.x-=t,r.y-=i})),r.forEach(e.edges(),(function(n){var o=e.edge(n);r.forEach(o.points,(function(e){e.x-=t,e.y-=i})),r.has(o,"x")&&(o.x-=t),r.has(o,"y")&&(o.y-=i)})),a.width=n-t+s,a.height=o-i+l}(e)})),t(" assignNodeIntersects",(function(){!function(e){r.forEach(e.edges(),(function(t){var n,r,i=e.edge(t),o=e.node(t.v),a=e.node(t.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(e)})),t(" reversePoints",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);n.reversed&&n.points.reverse()}))}(e)})),t(" acyclic.undo",(function(){i.undo(e)}))}(t,n)})),n(" updateInputGraph",(function(){!function(e,t){r.forEach(e.nodes(),(function(n){var r=e.node(n),i=t.node(n);r&&(r.x=i.x,r.y=i.y,t.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(e.edges(),(function(n){var i=e.edge(n),o=t.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),e.graph().width=t.graph().width,e.graph().height=t.graph().height}(e,t)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],b={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},y=["acyclicer","ranker","rankdir","align"],x=["width","height"],w={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function S(e,t){return r.mapValues(r.pick(e,t),Number)}function $(e){var t={};return r.forEach(e,(function(e,n){t[n.toLowerCase()]=e})),t}},function(e,t,n){var r=n(149);e.exports=function(e){return r(e,5)}},function(e,t,n){var r=n(89),i=n(57),o=n(90),a=n(48),s=Object.prototype,l=s.hasOwnProperty,c=r((function(e,t){e=Object(e);var n=-1,r=t.length,c=r>2?t[2]:void 0;for(c&&o(t[0],t[1],c)&&(r=1);++n-1?s[l?t[c]:c]:void 0}}},function(e,t,n){var r=n(188),i=n(37),o=n(365),a=Math.max;e.exports=function(e,t,n){var s=null==e?0:e.length;if(!s)return-1;var l=null==n?0:o(n);return l<0&&(l=a(s+l,0)),r(e,i(t,3),l)}},function(e,t,n){var r=n(196);e.exports=function(e){var t=r(e),n=t%1;return t==t?n?t-n:t:0}},function(e,t,n){var r=n(367),i=n(23),o=n(61),a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,l=/^0o[0-7]+$/i,c=parseInt;e.exports=function(e){if("number"==typeof e)return e;if(o(e))return NaN;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=s.test(e);return n||l.test(e)?c(e.slice(2),n?2:8):a.test(e)?NaN:+e}},function(e,t,n){var r=n(368),i=/^\s+/;e.exports=function(e){return e?e.slice(0,r(e)+1).replace(i,""):e}},function(e,t){var n=/\s/;e.exports=function(e){for(var t=e.length;t--&&n.test(e.charAt(t)););return t}},function(e,t,n){var r=n(128),i=n(169),o=n(48);e.exports=function(e,t){return null==e?e:r(e,i(t),o)}},function(e,t){e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},function(e,t,n){var r=n(79),i=n(127),o=n(37);e.exports=function(e,t){var n={};return t=o(t,3),i(e,(function(e,i,o){r(n,i,t(e,i,o))})),n}},function(e,t,n){var r=n(132),i=n(373),o=n(49);e.exports=function(e){return e&&e.length?r(e,o,i):void 0}},function(e,t){e.exports=function(e,t){return e>t}},function(e,t,n){var r=n(375),i=n(379)((function(e,t,n){r(e,t,n)}));e.exports=i},function(e,t,n){var r=n(73),i=n(198),o=n(128),a=n(376),s=n(23),l=n(48),c=n(199);e.exports=function e(t,n,u,d,f){t!==n&&o(n,(function(o,l){if(f||(f=new r),s(o))a(t,n,l,u,e,d,f);else{var p=d?d(c(t,l),o,l+"",t,n,f):void 0;void 0===p&&(p=o),i(t,l,p)}}),l)}},function(e,t,n){var r=n(198),i=n(155),o=n(164),a=n(156),s=n(165),l=n(66),c=n(13),u=n(189),d=n(59),f=n(64),p=n(23),h=n(377),g=n(67),m=n(199),v=n(378);e.exports=function(e,t,n,b,y,x,w){var k=m(e,n),A=m(t,n),E=w.get(A);if(E)r(e,n,E);else{var S=x?x(k,A,n+"",e,t,w):void 0,$=void 0===S;if($){var C=c(A),_=!C&&d(A),O=!C&&!_&&g(A);S=A,C||_||O?c(k)?S=k:u(k)?S=a(k):_?($=!1,S=i(A,!0)):O?($=!1,S=o(A,!0)):S=[]:h(A)||l(A)?(S=k,l(k)?S=v(k):p(k)&&!f(k)||(S=s(A))):$=!1}$&&(w.set(A,S),y(S,A,b,x,w),w.delete(A)),r(e,n,S)}}},function(e,t,n){var r=n(47),i=n(84),o=n(32),a=Function.prototype,s=Object.prototype,l=a.toString,c=s.hasOwnProperty,u=l.call(Object);e.exports=function(e){if(!o(e)||"[object Object]"!=r(e))return!1;var t=i(e);if(null===t)return!0;var n=c.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==u}},function(e,t,n){var r=n(65),i=n(48);e.exports=function(e){return r(e,i(e))}},function(e,t,n){var r=n(89),i=n(90);e.exports=function(e){return r((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&i(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r1&&a(e,t[0],t[1])?t=[]:n>2&&a(t[0],t[1],t[2])&&(t=[t[0]]),i(e,r(t,1),[])}));e.exports=s},function(e,t,n){var r=n(88),i=n(86),o=n(37),a=n(184),s=n(393),l=n(82),c=n(394),u=n(49),d=n(13);e.exports=function(e,t,n){t=t.length?r(t,(function(e){return d(e)?function(t){return i(t,1===e.length?e[0]:e)}:e})):[u];var f=-1;t=r(t,l(o));var p=a(e,(function(e,n,i){return{criteria:r(t,(function(t){return t(e)})),index:++f,value:e}}));return s(p,(function(e,t){return c(e,t,n)}))}},function(e,t){e.exports=function(e,t){var n=e.length;for(e.sort(t);n--;)e[n]=e[n].value;return e}},function(e,t,n){var r=n(395);e.exports=function(e,t,n){for(var i=-1,o=e.criteria,a=t.criteria,s=o.length,l=n.length;++i=l?c:c*("desc"==n[i]?-1:1)}return e.index-t.index}},function(e,t,n){var r=n(61);e.exports=function(e,t){if(e!==t){var n=void 0!==e,i=null===e,o=e==e,a=r(e),s=void 0!==t,l=null===t,c=t==t,u=r(t);if(!l&&!u&&!a&&e>t||a&&s&&c&&!l&&!u||i&&s&&c||!n&&c||!o)return 1;if(!i&&!a&&!u&&e0;--l)if(r=t[l].dequeue()){i=i.concat(s(e,t,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(t){return e.outEdges(t.v,t.w)})),!0)};var a=r.constant(1);function s(e,t,n,i,o){var a=o?[]:void 0;return r.forEach(e.inEdges(i.v),(function(r){var i=e.edge(r),s=e.node(r.v);o&&a.push({v:r.v,w:r.w}),s.out-=i,l(t,n,s)})),r.forEach(e.outEdges(i.v),(function(r){var i=e.edge(r),o=r.w,a=e.node(o);a.in-=i,l(t,n,a)})),e.removeNode(i.v),a}function l(e,t,n){n.out?n.in?e[n.out-n.in+t].enqueue(n):e[e.length-1].enqueue(n):e[0].enqueue(n)}},function(e,t){function n(){var e={};e._next=e._prev=e,this._sentinel=e}function r(e){e._prev._next=e._next,e._next._prev=e._prev,delete e._next,delete e._prev}function i(e,t){if("_next"!==e&&"_prev"!==e)return t}e.exports=n,n.prototype.dequeue=function(){var e=this._sentinel,t=e._prev;if(t!==e)return r(t),t},n.prototype.enqueue=function(e){var t=this._sentinel;e._prev&&e._next&&r(e),e._next=t._next,t._next._prev=e,t._next=e,e._prev=t},n.prototype.toString=function(){for(var e=[],t=this._sentinel,n=t._prev;n!==t;)e.push(JSON.stringify(n,i)),n=n._prev;return"["+e.join(", ")+"]"}},function(e,t,n){"use strict";var r=n(11),i=n(20);e.exports={run:function(e){e.graph().dummyChains=[],r.forEach(e.edges(),(function(t){!function(e,t){var n,r,o,a=t.v,s=e.node(a).rank,l=t.w,c=e.node(l).rank,u=t.name,d=e.edge(t),f=d.labelRank;if(c===s+1)return;for(e.removeEdge(t),o=0,++s;sl.lim&&(c=l,u=!0);var d=r.filter(t.edges(),(function(t){return u===b(e,e.node(t.v),c)&&u!==b(e,e.node(t.w),c)}));return r.minBy(d,(function(e){return o(t,e)}))}function v(e,t,n,i){var o=n.v,a=n.w;e.removeEdge(o,a),e.setEdge(i.v,i.w,{}),p(e),d(e,t),function(e,t){var n=r.find(e.nodes(),(function(e){return!t.node(e).parent})),i=s(e,n);i=i.slice(1),r.forEach(i,(function(n){var r=e.node(n).parent,i=t.edge(n,r),o=!1;i||(i=t.edge(r,n),o=!0),t.node(n).rank=t.node(r).rank+(o?i.minlen:-i.minlen)}))}(e,t)}function b(e,t,n){return n.low<=t.lim&&t.lim<=n.lim}e.exports=u,u.initLowLimValues=p,u.initCutValues=d,u.calcCutValue=f,u.leaveEdge=g,u.enterEdge=m,u.exchangeEdges=v},function(e,t,n){var r=n(11);e.exports=function(e){var t=function(e){var t={},n=0;function i(o){var a=n;r.forEach(e.children(o),i),t[o]={low:a,lim:n++}}return r.forEach(e.children(),i),t}(e);r.forEach(e.graph().dummyChains,(function(n){for(var r=e.node(n),i=r.edgeObj,o=function(e,t,n,r){var i,o,a=[],s=[],l=Math.min(t[n].low,t[r].low),c=Math.max(t[n].lim,t[r].lim);i=n;do{i=e.parent(i),a.push(i)}while(i&&(t[i].low>l||c>t[i].lim));o=i,i=r;for(;(i=e.parent(i))!==o;)s.push(i);return{path:a.concat(s.reverse()),lca:o}}(e,t,i.v,i.w),a=o.path,s=o.lca,l=0,c=a[l],u=!0;n!==i.w;){if(r=e.node(n),u){for(;(c=a[l])!==s&&e.node(c).maxRank=2),s=u.buildLayerMatrix(e);var m=o(e,s);m0;)t%2&&(n+=l[t+1]),l[t=t-1>>1]+=e.weight;c+=e.weight*n}))),c}e.exports=function(e,t){for(var n=0,r=1;r=e.barycenter)&&function(e,t){var n=0,r=0;e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.vs=t.vs.concat(e.vs),e.barycenter=n/r,e.weight=r,e.i=Math.min(t.i,e.i),t.merged=!0}(e,t)}}function i(t){return function(n){n.in.push(t),0==--n.indegree&&e.push(n)}}for(;e.length;){var o=e.pop();t.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(t,(function(e){return!e.merged})),(function(e){return r.pick(e,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(e){return!e.indegree})))}},function(e,t,n){var r=n(11),i=n(20);function o(e,t,n){for(var i;t.length&&(i=r.last(t)).i<=n;)t.pop(),e.push(i.vs),n++;return n}e.exports=function(e,t){var n=i.partition(e,(function(e){return r.has(e,"barycenter")})),a=n.lhs,s=r.sortBy(n.rhs,(function(e){return-e.i})),l=[],c=0,u=0,d=0;a.sort((f=!!t,function(e,t){return e.barycentert.barycenter?1:f?t.i-e.i:e.i-t.i})),d=o(l,s,d),r.forEach(a,(function(e){d+=e.vs.length,l.push(e.vs),c+=e.barycenter*e.weight,u+=e.weight,d=o(l,s,d)}));var f;var p={vs:r.flatten(l,!0)};u&&(p.barycenter=c/u,p.weight=u);return p}},function(e,t,n){var r=n(11),i=n(28).Graph;e.exports=function(e,t,n){var o=function(e){var t;for(;e.hasNode(t=r.uniqueId("_root")););return t}(e),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(t){return e.node(t)}));return r.forEach(e.nodes(),(function(i){var s=e.node(i),l=e.parent(i);(s.rank===t||s.minRank<=t&&t<=s.maxRank)&&(a.setNode(i),a.setParent(i,l||o),r.forEach(e[n](i),(function(t){var n=t.v===i?t.w:t.v,o=a.edge(n,i),s=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:e.edge(t).weight+s})})),r.has(s,"minRank")&&a.setNode(i,{borderLeft:s.borderLeft[t],borderRight:s.borderRight[t]}))})),a}},function(e,t,n){var r=n(11);e.exports=function(e,t,n){var i,o={};r.forEach(n,(function(n){for(var r,a,s=e.parent(n);s;){if((r=e.parent(s))?(a=o[r],o[r]=s):(a=i,i=s),a&&a!==s)return void t.setEdge(a,s);s=r}}))}},function(e,t,n){"use strict";var r=n(11),i=n(20),o=n(419).positionX;e.exports=function(e){(function(e){var t=i.buildLayerMatrix(e),n=e.graph().ranksep,o=0;r.forEach(t,(function(t){var i=r.max(r.map(t,(function(t){return e.node(t).height})));r.forEach(t,(function(t){e.node(t).y=o+i/2})),o+=i+n}))})(e=i.asNonCompoundGraph(e)),r.forEach(o(e),(function(t,n){e.node(n).x=t}))}},function(e,t,n){"use strict";var r=n(11),i=n(28).Graph,o=n(20);function a(e,t){var n={};return r.reduce(t,(function(t,i){var o=0,a=0,s=t.length,c=r.last(i);return r.forEach(i,(function(t,u){var d=function(e,t){if(e.node(t).dummy)return r.find(e.predecessors(t),(function(t){return e.node(t).dummy}))}(e,t),f=d?e.node(d).order:s;(d||t===c)&&(r.forEach(i.slice(a,u+1),(function(t){r.forEach(e.predecessors(t),(function(r){var i=e.node(r),a=i.order;!(as)&&l(n,t,c)}))}))}return r.reduce(t,(function(t,n){var o,a=-1,s=0;return r.forEach(n,(function(r,l){if("border"===e.node(r).dummy){var c=e.predecessors(r);c.length&&(o=e.node(c[0]).order,i(n,s,l,a,o),s=l,a=o)}i(n,s,n.length,o,t.length)})),n})),n}function l(e,t,n){if(t>n){var r=t;t=n,n=r}var i=e[t];i||(e[t]=i={}),i[n]=!0}function c(e,t,n){if(t>n){var i=t;t=n,n=i}return r.has(e[t],n)}function u(e,t,n,i){var o={},a={},s={};return r.forEach(t,(function(e){r.forEach(e,(function(e,t){o[e]=e,a[e]=e,s[e]=t}))})),r.forEach(t,(function(e){var t=-1;r.forEach(e,(function(e){var l=i(e);if(l.length)for(var u=((l=r.sortBy(l,(function(e){return s[e]}))).length-1)/2,d=Math.floor(u),f=Math.ceil(u);d<=f;++d){var p=l[d];a[e]===e&&t\n.menu ul ul {\n margin-left: 12px;\n}\n\n\n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(425),i=n(21);n(426),angular.module("dbt").directive("modelTreeLine",["$state",function(e){return{scope:{item:"=",depth:"<",resourceType:"@"},replace:!0,templateUrl:r,link:function(t,n,r,o){t.depth||(t.depth=0);var a=t.item.name;if(a){var s=i.last(a,15).join(""),l=i.initial(a,s.length).join("");t.name={name:a,start:l,end:s},t.name_start=l,t.name_end=s,t.onFolderClick=function(n){if(n.active=!n.active,"source"==t.resourceType){var r=n.name;e.go("dbt.source_list",{source:r})}else 0===t.depth&&"database"!==n.type&&e.go("dbt.project_overview",{project_name:n.name})},t.activate=function(n){t.$emit("clearSearch"),n.active=!0;var r="dbt."+n.node.resource_type;e.go(r,{unique_id:n.unique_id})},t.getIcon=function(e,t){return"#"+{header:{on:"icn-down",off:"icn-right"},database:{on:"icn-db-on",off:"icn-db"},schema:{on:"icn-tree-on",off:"icn-tree"},table:{on:"icn-doc-on",off:"icn-doc"},folder:{on:"icn-dir-on",off:"icn-dir"},file:{on:"icn-doc-on",off:"icn-doc"}}[e][t]},t.getClass=function(e){return{active:e.active,"menu-tree":"header"==e.type||"schema"==e.type||"folder"==e.type,"menu-main":"header"==e.type,"menu-node":"file"==e.type||"table"==e.type}}}}}}])},function(e,t){var n="/components/model_tree/model_tree_line.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
  • \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n
      \n \n
    \n
  • \n')}]),e.exports=n},function(e,t,n){var r=n(427);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.unselectable{\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(429);n(31);n(206),r.module("dbt").directive("docsSearch",["$sce","project",function(e,t){return{scope:{query:"=",results:"=",onSelect:"&"},replace:!0,templateUrl:i,link:function(n){n.max_results=20,n.show_all=!1,n.max_results_columns=3,n.limit_columns={},n.checkboxStatus={show_names:!1,show_descriptions:!1,show_columns:!1,show_code:!1,show_tags:!1},n.limit_search=function(e,t,r){return t0&&null!=n.query&&n.query.trim().length>0){let t=e.replace(/\s+/g," "),o=r(i(n.query)[0]),a=t.search(new RegExp(o)),s=a-75<0?0:a-75,l=a+75>t.length?t.length:a+75;return"..."+t.substring(s,l)+"..."}return e},n.highlight=function(t){if(!n.query||!t)return e.trustAsHtml(t);let o="("+i(n.query).map(e=>r(e)).join(")|(")+")";return e.trustAsHtml(t.replace(new RegExp(o,"gi"),'$&'))},n.$watch("query",(function(e,t){0==e.length&&(n.show_all=!1,n.limit_columns={})})),n.columnFilter=function(e){var t=[];let r=i(n.query);for(var o in e)r.every(e=>-1!=o.toLowerCase().indexOf(e))&&t.push(o);return t},n.limitColumns=function(e){return void 0!==n.limit_columns[e]?n.limit_columns[e]:3}}}}])},function(e,t){var n="/components/search/search.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n {{ query }}\n {{ results.length }} search results\n

    \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n

    \n \n {{result.model.resource_type}}\n

    \n

    \n
    \n
    \n
    \n \n columns:\n \n \n \n Show {{ columnFilter(result.model.columns).length - max_results_columns }} more\n
    \n
    \n \n \n \n
    \n
    \n \n tags:\n \n \n \n
    \n
    \n Show {{ results.length - max_results }} more\n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(431);n(432);const i=n(21);angular.module("dbt").directive("tableDetails",["$sce","$filter",function(e,t){return{scope:{model:"=",extras:"=",exclude:"<"},templateUrl:r,link:function(e){function n(e,t){if(0==e)return"0 bytes";if(e<1&&(e*=1e6),isNaN(parseFloat(e))||!isFinite(e))return"-";void 0===t&&(t=0);var n=Math.floor(Math.log(e)/Math.log(1024));return(e/Math.pow(1024,Math.floor(n))).toFixed(t)+" "+["bytes","KB","MB","GB","TB","PB"][n]}function r(e,n){return void 0===n&&(n=2),t("number")(100*e,n)+"%"}function o(e,n){return void 0===n&&(n=0),t("number")(e,n)}e.details=[],e.extended=[],e.exclude=e.exclude||[],e.meta=null,e._show_expanded=!1,e.show_expanded=function(t){return void 0!==t&&(e._show_expanded=t),e._show_expanded},e.hasData=function(e){return!(!e||i.isEmpty(e))&&(1!=e.length||0!=e[0].include)},e.$watch("model",(function(t,a){i.property(["metadata","type"])(t);var s,l,c,u=t.hasOwnProperty("sources")&&null!=t.sources[0]?t.sources[0].source_meta:null;if(e.meta=t.meta||u,e.details=function(e){var t,n,r=!e.metadata,o=e.metadata||{};t=e.database?e.database+".":"",n=r?void 0:"source"==e.resource_type?t+e.schema+"."+e.identifier:t+e.schema+"."+e.alias;var a,s=[{name:"Owner",value:o.owner},{name:"Type",value:r?void 0:(a=o.type,"BASE TABLE"==a?{type:"table",name:"table"}:"LATE BINDING VIEW"==a?{type:"view",name:"late binding view"}:{type:a.toLowerCase(),name:a.toLowerCase()}).name},{name:"Package",value:e.package_name},{name:"Language",value:e.language},{name:"Relation",value:n}];return i.filter(s,(function(e){return void 0!==e.value}))}(t),e.extended=(s=t.stats,l={rows:o,row_count:o,num_rows:o,max_varchar:o,pct_used:r,size:n,bytes:n,num_bytes:n},c=i.sortBy(i.values(s),"label"),i.map(c,(function(e){var t=i.clone(e),n=l[e.id];return n&&(t.value=n(e.value),t.label=e.label.replace("Approximate","~"),t.label=e.label.replace("Utilization","Used")),t}))),e.extras){var d=i.filter(e.extras,(function(e){return void 0!==e.value&&null!==e.value}));e.details=e.details.concat(d)}e.show_extended=i.where(e.extended,{include:!0}).length>0})),e.queryTag=function(t){e.$emit("query",t)}}}}])},function(e,t){var n="/components/table_details/table_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    Details
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Tags
    \n
    \n {{ tag }} \n
    \n
    untagged
    \n
    \n
    \n
    {{ item.name }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ item.label }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){var r=n(433);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n\n.details-content {\n table-layout: fixed;\n}\n\n.detail-body {\n white-space: nowrap;\n overflow-x: scroll;\n}\n",""])},function(e,t,n){"use strict";const r=n(435),i=n(21);angular.module("dbt").directive("columnDetails",["project",function(e){return{scope:{model:"="},templateUrl:r,link:function(t){t.has_test=function(e,t){return-1!=i.pluck(e.tests,"short").indexOf(t)},t.has_more_info=function(e){var t=e.tests||[],n=e.description||"",r=e.meta||{};return t.length||n.length||!i.isEmpty(r)},t.toggle_column_expanded=function(e){t.has_more_info(e)&&(e.expanded=!e.expanded)},t.getState=function(e){return"dbt."+e.resource_type},t.get_col_name=function(t){return e.caseColumn(t)},t.get_columns=function(e){var t=i.chain(e.columns).values().sortBy("index").value();return i.each(t,(function(e,t){e.index=t})),t}}}}])},function(e,t){var n="/components/column_details/column_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n Column information is not available for this seed\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ColumnTypeDescriptionTestsMore?
    \n
    \n {{ get_col_name(column.name) }}\n
    \n
    \n {{ column.type }}

    \n
    \n {{ column.description }}\n \n \n U\n N\n F\n A\n +\n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Details
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Description
    \n \n
    \n\n
    \n
    Generic Tests
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(437);n(31),n(438);function i(e){return"python"===e?"language-python":"language-sql"}angular.module("dbt").directive("codeBlock",["code","$timeout",function(e,t){return{scope:{versions:"=",default:"<",language:"="},restrict:"E",templateUrl:r,link:function(n,r){n.selected_version=n.default,n.language_class=i(n.language),n.source=null,n.setSelected=function(r){n.selected_version=r,n.source=n.versions[r]||"";const i=n.source.trim();n.highlighted=e.highlight(i,n.language),t((function(){Prism.highlightAll()}))},n.titleCase=function(e){return e.charAt(0).toUpperCase()+e.substring(1)},n.copied=!1,n.copy_to_clipboard=function(){e.copy_to_clipboard(n.source),n.copied=!0,setTimeout((function(){n.$apply((function(){n.copied=!1}))}),1e3)},n.$watch("language",(function(e,t){e&&e!=t&&(n.language_class=i(e))}),!0),n.$watch("versions",(function(e,t){if(e)if(n.default)n.setSelected(n.default);else{var r=Object.keys(n.versions);r.length>0&&n.setSelected(r[0])}}),!0)}}}])},function(e,t){var n="/components/code_block/code_block.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    Code
    \n\n')}]),e.exports=n},function(e,t,n){var r=n(439);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"pre.code {\n border: none !important;\n overflow-y: visible !important;\n overflow-x: scroll !important;\n padding-bottom: 10px;\n}\n\npre.code code {\n font-family: Monaco, monospace !important;\n font-weight: 400 !important;\n}\n\n.line-numbers-rows {\n border: none !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(441);angular.module("dbt").directive("macroArguments",[function(){return{scope:{macro:"="},templateUrl:r,link:function(e){_.each(e.macro.arguments,(function(e){e.expanded=!1}))}}}])},function(e,t){var n="/components/macro_arguments/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n
    \n
    \n Details are not available for this macro\n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ArgumentTypeDescriptionMore?
    \n
    \n {{ arg.name }}\n
    \n
    \n {{ arg.type }}

    \n
    \n {{ arg.description }}\n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(443);angular.module("dbt").directive("referenceList",["$state",function(e){return{scope:{references:"=",node:"="},restrict:"E",templateUrl:r,link:function(t){t.selected_type=null,t.setType=function(e){t.selected_type=e,t.nodes=t.references[t.selected_type]},t.getNodeUrl=function(t){var n="dbt."+t.resource_type;return e.href(n,{unique_id:t.unique_id,"#":null})},t.mapResourceType=function(e){return"model"==e?"Models":"seed"==e?"Seeds":"test"==e?"Tests":"snapshot"==e?"Snapshots":"analysis"==e?"Analyses":"macro"==e?"Macros":"exposure"==e?"Exposures":"metric"==e?"Metrics":"operation"==e?"Operations":"Nodes"},t.$watch("references",(function(e){e&&_.size(e)>0?(t.selected_type=_.keys(e)[0],t.has_references=!0,t.nodes=t.references[t.selected_type]):t.has_references=!1}))}}}])},function(e,t){var n="/components/references/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n No resources reference this {{ node.resource_type }}\n
    \n
    \n \n
    \n \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){n(445),n(447),n(448),n(449),n(450),n(451),n(452),n(453),n(454),n(455)},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.copied=!1,e.copy_to_clipboard=function(t){r.copy_to_clipboard(t),e.copied=!0,setTimeout((function(){e.$apply((function(){e.copied=!1}))}),1e3)},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.nav-tabs li.nav-pull-right {\n flex: 1 0 auto;\n text-align: right;\n}\n\ntr.column-row-selected {\n\n}\n\ntd.column-expanded{\n padding: 0px !important;\n}\n\ntd.column-expanded > div {\n padding: 5px 10px;\n margin-left: 20px;\n height: 100%;\n\n border-left: 1px solid #ccc !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SourceCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Sample SQL":r.generateSourceSQL(e.model)},e.extra_table_fields=[{name:"Loader",value:e.model.loader},{name:"Source",value:e.model.source_name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SeedCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Example SQL":r.generateSourceSQL(e.model)}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SnapshotCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"Compiled SQL is not available for this snapshot"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("TestCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(33);n(34),r.module("dbt").controller("MacroCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,a,s,l){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.macro={},n.ready((function(t){let n=t.macros[e.model_uid];if(e.macro=n,e.references=o.getMacroReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=o.getMacroParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.macro.is_adapter_macro){var r=t.metadata.adapter_type;e.versions=n.impls,n.impls[r]?e.default_version=r:n.impls.default?e.default_version="default":e.default_version=i.keys(n.impls)[0]}else e.default_version="Source",e.versions={Source:e.macro.macro_sql}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("AnalysisCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.default_version="Source",e.versions={Source:"",Compiled:""},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language,e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ExposureCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.exposure={},n.ready((function(t){let n=t.nodes[e.model_uid];e.exposure=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.language=n.language,e.extra_table_fields=[{name:"Maturity",value:e.exposure.maturity},{name:"Owner",value:e.exposure.owner.name},{name:"Owner email",value:e.exposure.owner.email},{name:"Exposure name",value:e.exposure.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("MetricCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.metric={},n.ready((function(t){let n=t.nodes[e.model_uid];e.metric=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.versions={Definition:r.generateMetricSQL(e.metric)};const o="expression"===e.metric.type?"Expression metric":"Aggregate metric";e.extra_table_fields=[{name:"Metric Type",value:o},{name:"Metric name",value:e.metric.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("OperationCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("GraphCtrl",["$scope","$state","$window","graph","project","selectorService",function(e,t,n,r,i,o){function a(e){return e&&"source"==e.resource_type?"source:"+e.source_name+"."+e.name:e&&"exposure"==e.resource_type?"exposure:"+e.name:e&&"metric"==e.resource_type?"metric:"+e.name:e.name?e.name:"*"}e.graph=r.graph,e.graphService=r,e.graphRendered=function(e){r.setGraphReady(e)},e.$watch((function(){return t.params.unique_id}),(function(e,t){e&&e!=t&&i.find_by_id(e,(function(e){e&&("sidebar"==r.orientation?r.showVerticalGraph(a(e),!1):r.showFullGraph(a(e)))})),e||o.clearViewNode()}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(31),a=n(458);n(459),n(206),n(467),n(469),n(472),n(476),r.module("dbt").controller("MainController",["$scope","$route","$state","project","graph","selectorService","trackingService","locationService","$transitions",function(e,t,n,r,s,l,c,u,d){function f(t){e.model_uid=t;var n=r.node(t);n&&l.resetSelection(n)}function p(e){e&&setTimeout((function(){var t=o("*[data-nav-unique-id='"+e+"']");t.length&&t[0].scrollIntoView&&t[0].scrollIntoView({behavior:"smooth",block:"center",inline:"center"})}),1)}e.tree={database:{},project:{},sources:{}},e.search={query:"",results:[],is_focused:!1},e.logo=a,e.model_uid=null,e.project={},o("body").bind("keydown",(function(e){"t"==event.key&&"INPUT"!=event.target.tagName&&(console.log("Opening search"),o("#search").focus(),event.preventDefault())})),e.onSearchFocus=function(t,n){e.search.is_focused=n},e.clearSearch=function(){e.search.is_focused=!1,e.search.query="",e.search.results=[],o("#search").blur()},e.$on("clearSearch",(function(){e.clearSearch()})),e.$on("query",(function(t,n){e.search.is_focused=!0,e.search.query=n})),e.onSearchKeypress=function(t){"Escape"==t.key&&(e.clearSearch(),t.preventDefault())},r.getModelTree(n.params.unique_id,(function(t){e.tree.database=t.database,e.tree.project=t.project,e.tree.sources=t.sources,e.tree.exposures=t.exposures,e.tree.metrics=t.metrics,setTimeout((function(){p(e.model_uid)}))})),d.onSuccess({},(function(t,n){var i=t.router.globals.params,o=l.getViewNode(),a=o?o.unique_id:null,s=i.unique_id,u=!0;if(t.from().name==t.to().name&&a==s&&(u=!1),u&&i.unique_id){var d=r.updateSelected(i.unique_id);e.tree.database=d.database,e.tree.project=d.project,e.tree.sources=d.sources,e.search.query="",console.log("updating selected model to: ",i),f(i.unique_id),setTimeout((function(){p(i.unique_id)}))}u&&c.track_pageview()})),e.$watch("search.query",(function(t){e.search.results=function(t){if(""===e.search.query)return t;let n={name:10,tags:5,description:3,raw_code:2,columns:1};return i.each(t,(function(t){t.overallWeight=0,i.each(Object.keys(n),(function(r){if(null!=t.model[r]){let o=0,a=t.model[r],s=e.search.query.toLowerCase();if("columns"===r)i.each(a,(function(e){if(e.name){let t=e.name.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}}));else if("tags"===r)i.each(a,(function(e){let t=e.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}));else{a=a.toLowerCase();let e=0;for(;-1!=e;)e=a.indexOf(s,e),-1!=e&&(o++,e++)}t.overallWeight+=o*n[r]}}))})),t}(r.search(t))})),r.init(),r.ready((function(t){e.project=t,e.search.results=r.search("");var o=i.unique(i.pluck(i.values(t.nodes),"package_name")).sort(),a=[null];i.each(t.nodes,(function(e){var t=e.tags;a=i.union(a,t).sort()})),l.init({packages:o,tags:a}),f(n.params.unique_id);var d=u.parseState(n.params);d.show_graph&&s.ready((function(){i.assign(l.selection.dirty,d.selected);var e=l.updateSelection();s.updateGraph(e)}));var p=t.metadata||{};c.init({track:p.send_anonymous_usage_stats,project_id:p.project_id})}))}])},function(e,t){e.exports="data:image/svg+xml,%3Csvg width='242' height='90' viewBox='0 0 242 90' fill='none' xmlns='http://www.w3.org/2000/svg'%3E %3Cpath d='M240.384 74.5122L239.905 75.8589H239.728L239.249 74.5156V75.8589H238.941V74.0234H239.324L239.816 75.3872L240.309 74.0234H240.691V75.8589H240.384V74.5122ZM238.671 74.3003H238.169V75.8589H237.858V74.3003H237.352V74.0234H238.671V74.3003Z' fill='%23262A38'/%3E %3Cpath d='M154.123 13.915V75.3527H141.672V69.0868C140.37 71.2839 138.499 73.0742 136.22 74.2134C133.779 75.434 131.012 76.085 128.246 76.085C124.828 76.1664 121.41 75.1899 118.562 73.2369C115.633 71.2839 113.354 68.5986 111.889 65.425C110.262 61.7631 109.448 57.8572 109.529 53.8698C109.448 49.8825 110.262 45.9765 111.889 42.3961C113.354 39.3038 115.633 36.6185 118.481 34.7469C121.41 32.8753 124.828 31.9801 128.246 32.0615C130.931 32.0615 133.616 32.6311 135.976 33.8517C138.255 34.991 140.126 36.6999 141.428 38.8156V18.0651L154.123 13.915ZM139.15 63.2279C140.777 61.1121 141.672 58.0199 141.672 54.0326C141.672 50.0452 140.859 47.0344 139.15 44.9187C137.441 42.8029 134.755 41.5823 131.989 41.6637C129.222 41.5009 126.537 42.7215 124.746 44.8373C123.038 46.953 122.142 49.9639 122.142 53.8698C122.142 57.8572 123.038 60.9494 124.746 63.1465C126.455 65.3436 129.222 66.5642 131.989 66.4828C135.081 66.4828 137.522 65.3436 139.15 63.2279Z' fill='%23262A38'/%3E %3Cpath d='M198.635 34.6655C201.564 36.5371 203.843 39.2225 205.226 42.3147C206.853 45.8952 207.667 49.8011 207.586 53.7885C207.667 57.7758 206.853 61.7632 205.226 65.3436C203.761 68.5172 201.483 71.2026 198.553 73.1556C195.705 75.0272 192.287 76.0037 188.87 75.9223C186.103 76.0037 183.336 75.3527 180.895 74.0507C178.617 72.9114 176.745 71.1212 175.524 68.9241V75.2713H162.993V18.0651L175.606 13.915V38.9783C176.826 36.7812 178.698 34.991 180.976 33.8517C183.418 32.5498 186.103 31.8988 188.87 31.9801C192.287 31.8988 195.705 32.8753 198.635 34.6655ZM192.45 63.1465C194.159 60.9494 194.973 57.8572 194.973 53.7885C194.973 49.8825 194.159 46.8716 192.45 44.7559C190.741 42.6402 188.381 41.5823 185.289 41.5823C182.523 41.4196 179.837 42.6402 178.047 44.8373C176.338 47.0344 175.524 50.0452 175.524 53.9512C175.524 57.9386 176.338 61.0308 178.047 63.1465C179.756 65.3436 182.441 66.5642 185.289 66.4015C188.056 66.5642 190.741 65.3436 192.45 63.1465Z' fill='%23262A38'/%3E %3Cpath d='M225 42.4774V58.915C225 61.2749 225.651 62.9838 226.791 64.0416C228.093 65.1809 229.801 65.7505 231.592 65.6691C232.975 65.6691 234.44 65.425 235.742 65.0995V74.8644C233.382 75.6782 230.941 76.085 228.499 76.0037C223.292 76.0037 219.304 74.5389 216.537 71.6094C213.771 68.68 212.387 64.5299 212.387 59.1592V23.1103L225 19.0416V33.038H235.742V42.4774H225Z' fill='%23262A38'/%3E %3Cpath d='M86.1754 3.74322C88.2911 5.77758 89.6745 8.46293 90 11.3924C90 12.613 89.6745 13.4268 88.9421 14.9729C88.2098 16.519 79.1772 32.1429 76.4919 36.4557C74.9458 38.9783 74.132 41.9892 74.132 44.9186C74.132 47.9295 74.9458 50.859 76.4919 53.3816C79.1772 57.6944 88.2098 73.3996 88.9421 74.9457C89.6745 76.4919 90 77.2242 90 78.4448C89.6745 81.3743 88.3725 84.0597 86.2568 86.0127C84.2224 88.1284 81.5371 89.5118 78.689 89.7559C77.4684 89.7559 76.6546 89.4304 75.1899 88.698C73.7251 87.9656 57.7758 79.1772 53.4629 76.4919C53.1374 76.3291 52.8119 76.085 52.4051 75.9222L31.085 63.3092C31.5732 67.3779 33.3635 71.2839 36.2929 74.132C36.8626 74.7016 37.4322 75.1899 38.0832 75.6781C37.5949 75.9222 37.0253 76.1664 36.5371 76.4919C32.2242 79.1772 16.519 88.2098 14.9729 88.9421C13.4268 89.6745 12.6944 90 11.3924 90C8.46293 89.6745 5.77758 88.3725 3.82459 86.2568C1.70886 84.2224 0.325497 81.5371 0 78.6076C0.0813743 77.387 0.406872 76.1664 1.05787 75.1085C1.79024 73.5624 10.8228 57.8571 13.5081 53.5443C15.0542 51.0217 15.868 48.0922 15.868 45.0814C15.868 42.0705 15.0542 39.141 13.5081 36.6184C10.8228 32.1429 1.70886 16.4376 1.05787 14.8915C0.406872 13.8336 0.0813743 12.613 0 11.3924C0.325497 8.46293 1.62749 5.77758 3.74322 3.74322C5.77758 1.62749 8.46293 0.325497 11.3924 0C12.613 0.0813743 13.8336 0.406872 14.9729 1.05787C16.2749 1.62749 27.7486 8.30018 33.8517 11.8807L35.2351 12.6944C35.7233 13.0199 36.1302 13.264 36.4557 13.4268L37.1067 13.8336L58.8336 26.6908C58.3454 21.8083 55.8228 17.3327 51.9168 14.3219C52.4051 14.0778 52.9747 13.8336 53.4629 13.5081C57.7758 10.8228 73.481 1.70886 75.0271 1.05787C76.085 0.406872 77.3056 0.0813743 78.6076 0C81.4557 0.325497 84.1411 1.62749 86.1754 3.74322ZM46.1392 50.7776L50.7776 46.1392C51.4286 45.4882 51.4286 44.5118 50.7776 43.8608L46.1392 39.2224C45.4882 38.5714 44.5118 38.5714 43.8608 39.2224L39.2224 43.8608C38.5714 44.5118 38.5714 45.4882 39.2224 46.1392L43.8608 50.7776C44.4304 51.3472 45.4882 51.3472 46.1392 50.7776Z' fill='%23FF694A'/%3E %3C/svg%3E"},function(e,t,n){"use strict";n.r(t);var r=n(63),i=n.n(r);n(460),n(461),n(462),n(463),n(465);const o=n(9),a=(n(31),n(21));window.Prism=i.a,o.module("dbt").factory("code",["$sce",function(e){var t={copied:!1,highlight:function(t,n="sql"){if("sql"==n)var r=i.a.highlight(t,i.a.languages.sql,"sql");else if("python"==n)r=i.a.highlight(t,i.a.languages.python,"python");return e.trustAsHtml(r)},copy_to_clipboard:function(e){var t=document.createElement("textarea");t.value=e,t.setAttribute("readonly",""),t.style.position="absolute",t.style.left="-9999px",document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)},generateSourceSQL:function(e){var t=["select"],n=a.size(e.columns),r=a.keys(e.columns);a.each(r,(function(e,r){var i=" "+e;r+1!=n&&(i+=","),t.push(i)}));const i=(e.database?e.database+".":"")+e.schema+"."+e.identifier;return t.push("from "+i),t.join("\n")},generateMetricSQL:function(e){if("expression"==e.type)return e.sql;const t=[`select ${e.type}(${e.sql})`,`from {{ ${e.model} }}`];if(e.filters.length>0){const n=e.filters.map(e=>`${e.field} ${e.operator} ${e.value}`).join(" AND ");t.push("where "+n)}return t.join("\n")}};return t}])},function(e,t){Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}},function(e,t){Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},function(e,t){!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(e,t){if("PRE"===e.tagName&&e.classList.contains("line-numbers")){var n=e.querySelector(".line-numbers-rows");if(n){var r=parseInt(e.getAttribute("data-start"),10)||1,i=r+(n.children.length-1);ti&&(t=i);var o=t-r;return n.children[o]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},n=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&n===window.innerWidth||(n=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var n=t.element,i=n.parentNode;if(i&&/pre/i.test(i.nodeName)&&!n.querySelector(".line-numbers-rows")&&Prism.util.isActive(n,"line-numbers")){n.classList.remove("line-numbers"),i.classList.add("line-numbers");var o,a=t.code.match(e),s=a?a.length+1:1,l=new Array(s+1).join("");(o=document.createElement("span")).setAttribute("aria-hidden","true"),o.className="line-numbers-rows",o.innerHTML=l,i.hasAttribute("data-start")&&(i.style.counterReset="linenumber "+(parseInt(i.getAttribute("data-start"),10)-1)),t.element.appendChild(o),r([i]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(t){if(0!=(t=t.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=t.map((function(t){var n=t.querySelector("code"),r=t.querySelector(".line-numbers-rows");if(n&&r){var i=t.querySelector(".line-numbers-sizer"),o=n.textContent.split(e);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:t,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i code {\n\tposition: relative;\n\twhite-space: inherit;\n}\n\n.line-numbers .line-numbers-rows {\n\tposition: absolute;\n\tpointer-events: none;\n\ttop: 0;\n\tfont-size: 100%;\n\tleft: -3.8em;\n\twidth: 3em; /* works for line-numbers below 1000 lines */\n\tletter-spacing: -1px;\n\tborder-right: 1px solid #999;\n\n\t-webkit-user-select: none;\n\t-moz-user-select: none;\n\t-ms-user-select: none;\n\tuser-select: none;\n\n}\n\n\t.line-numbers-rows > span {\n\t\tdisplay: block;\n\t\tcounter-increment: linenumber;\n\t}\n\n\t\t.line-numbers-rows > span:before {\n\t\t\tcontent: counter(linenumber);\n\t\t\tcolor: #999;\n\t\t\tdisplay: block;\n\t\t\tpadding-right: 0.8em;\n\t\t\ttext-align: right;\n\t\t}\n',""])},function(e,t,n){var r=n(466);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,'/**\n * GHColors theme by Avi Aryan (http://aviaryan.in)\n * Inspired by Github syntax coloring\n */\n\ncode[class*="language-"],\npre[class*="language-"] {\n\tcolor: #393A34;\n\tfont-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace;\n\tdirection: ltr;\n\ttext-align: left;\n\twhite-space: pre;\n\tword-spacing: normal;\n\tword-break: normal;\n\tfont-size: .9em;\n\tline-height: 1.2em;\n\n\t-moz-tab-size: 4;\n\t-o-tab-size: 4;\n\ttab-size: 4;\n\n\t-webkit-hyphens: none;\n\t-moz-hyphens: none;\n\t-ms-hyphens: none;\n\thyphens: none;\n}\n\npre > code[class*="language-"] {\n\tfont-size: 1em;\n}\n\npre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,\ncode[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {\n\tbackground: #b3d4fc;\n}\n\npre[class*="language-"]::selection, pre[class*="language-"] ::selection,\ncode[class*="language-"]::selection, code[class*="language-"] ::selection {\n\tbackground: #b3d4fc;\n}\n\n/* Code blocks */\npre[class*="language-"] {\n\tpadding: 1em;\n\tmargin: .5em 0;\n\toverflow: auto;\n\tborder: 1px solid #dddddd;\n\tbackground-color: white;\n}\n\n/* Inline code */\n:not(pre) > code[class*="language-"] {\n\tpadding: .2em;\n\tpadding-top: 1px;\n\tpadding-bottom: 1px;\n\tbackground: #f8f8f8;\n\tborder: 1px solid #dddddd;\n}\n\n.token.comment,\n.token.prolog,\n.token.doctype,\n.token.cdata {\n\tcolor: #999988;\n\tfont-style: italic;\n}\n\n.token.namespace {\n\topacity: .7;\n}\n\n.token.string,\n.token.attr-value {\n\tcolor: #e3116c;\n}\n\n.token.punctuation,\n.token.operator {\n\tcolor: #393A34; /* no highlight */\n}\n\n.token.entity,\n.token.url,\n.token.symbol,\n.token.number,\n.token.boolean,\n.token.variable,\n.token.constant,\n.token.property,\n.token.regex,\n.token.inserted {\n\tcolor: #36acaa;\n}\n\n.token.atrule,\n.token.keyword,\n.token.attr-name,\n.language-autohotkey .token.selector {\n\tcolor: #00a4db;\n}\n\n.token.function,\n.token.deleted,\n.language-autohotkey .token.tag {\n\tcolor: #9a050f;\n}\n\n.token.tag,\n.token.selector,\n.language-autohotkey .token.keyword {\n\tcolor: #00009f;\n}\n\n.token.important,\n.token.function,\n.token.bold {\n\tfont-weight: bold;\n}\n\n.token.italic {\n\tfont-style: italic;\n}\n',""])},function(e,t,n){n(31);const r=n(21),i=n(148),o=n(203),a=n(468);angular.module("dbt").factory("graph",["$state","$window","$q","selectorService","project","locationService",function(e,t,n,s,l,c){var u={vertical:{userPanningEnabled:!1,boxSelectionEnabled:!1,maxZoom:1.5},horizontal:{userPanningEnabled:!0,boxSelectionEnabled:!1,maxZoom:1,minZoom:.05}},d={none:{name:"null"},left_right:{name:"dagre",rankDir:"LR",rankSep:200,edgeSep:30,nodeSep:50},top_down:{name:"preset",positions:function(t){var n=e.params.unique_id;if(!n)return{x:0,y:0};var a=f.graph.pristine.dag,s=r.sortBy(o.ancestorNodes(a,n,1)),l=r.sortBy(o.descendentNodes(a,n,1)),c=r.partial(r.includes,s),u=r.partial(r.includes,l),d=a.filterNodes(c),p=a.filterNodes(u);return function(e,t,n,i){console.log("Getting position for ",i,". Primary: ",e);var o,a=100/(1+Math.max(t.length,n.length));if(e==i)return{x:0,y:0};if(r.includes(t,i))o={set:t,index:r.indexOf(t,i),factor:-1,type:"parent"};else{if(!r.includes(n,i))return{x:0,y:0};o={set:n,index:r.indexOf(n,i),factor:1,type:"child"}}var s=o.set.length;if("parent"==o.type)var l={x:(0+o.index)*a,y:-200-100*(s-o.index-1)};else l={x:(0+o.index)*a,y:200+100*(s-o.index-1)};return l}(n,i.alg.topsort(d),i.alg.topsort(p).reverse(),t.data("id"))}}},f={loading:!0,loaded:n.defer(),graph_element:null,orientation:"sidebar",expanded:!1,graph:{options:u.vertical,pristine:{nodes:{},edges:{},dag:null},elements:[],layout:d.none,style:[{selector:"edge.vertical",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#027599","arrow-scale":1.5,"line-color":"#027599",width:3,"target-distance-from-node":"5px","source-endpoint":"0% 50%","target-endpoint":"0deg"}},{selector:"edge.horizontal",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#006f8a","arrow-scale":1.5,"target-distance-from-node":"10px","source-distance-from-node":"5px","line-color":"#006f8a",width:3,"source-endpoint":"50% 0%","target-endpoint":"270deg"}},{selector:"edge[selected=1]",style:{"line-color":"#bd6bb6","target-arrow-color":"#bd6bb6","z-index":1}},{selector:'node[display="none"]',style:{display:"none"}},{selector:"node.vertical",style:{"text-margin-x":"5px","background-color":"#0094b3","font-size":"16px",shape:"ellipse",color:"#fff",width:"5px",height:"5px",padding:"5px",content:"data(label)","font-weight":300,"text-valign":"center","text-halign":"right"}},{selector:"node.horizontal",style:{"background-color":"#0094b3","font-size":"24px",shape:"roundrectangle",color:"#fff",width:"label",height:"label",padding:"12px",content:"data(label)","font-weight":300,"font-family":'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Helvetica, Arial, sans-serif',"text-valign":"center","text-halign":"center",ghost:"yes","ghost-offset-x":"2px","ghost-offset-y":"4px","ghost-opacity":.5,"text-outline-color":"#000","text-outline-width":"1px","text-outline-opacity":.2}},{selector:'node[resource_type="source"]',style:{"background-color":"#5fb825"}},{selector:'node[resource_type="exposure"]',style:{"background-color":"#ff694b"}},{selector:'node[resource_type="metric"]',style:{"background-color":"#ff5688"}},{selector:'node[language="python"]',style:{"background-color":"#6a5acd"}},{selector:"node[node_color]",style:{"background-color":"data(node_color)"}},{selector:"node[selected=1]",style:{"background-color":"#bd6bb6"}},{selector:"node.horizontal[selected=1]",style:{"background-color":"#88447d"}},{selector:"node.horizontal.dirty",style:{"background-color":"#919599"}},{selector:"node[hidden=1]",style:{"background-color":"#919599","background-opacity":.5}}],ready:function(e){console.log("graph ready")}}};function p(e,t,n){var i=r.map(e,(function(e){return f.graph.pristine.nodes[e]})),o=[];r.flatten(r.each(e,(function(t){var n=f.graph.pristine.edges[t];r.each(n,(function(t){r.includes(e,t.data.target)&&r.includes(e,t.data.source)&&o.push(t)}))})));var s=r.compact(i).concat(r.compact(o));return r.each(f.graph.elements,(function(e){e.data.display="none",e.data.selected=0,e.data.hidden=0,e.classes=n})),r.each(s,(function(e){e.data.display="element",e.classes=n,t&&r.includes(t,e.data.unique_id)&&(e.data.selected=1),r.get(e,["data","docs","show"],!0)||(e.data.hidden=1);var i=r.get(e,["data","docs","node_color"]);i&&a.isValidColor(i)&&(e.data.node_color=i)})),f.graph.elements=r.filter(s,(function(e){return"element"==e.data.display})),e}function h(e,t,n){var r=f.graph.pristine.dag;if(r){var i=f.graph.pristine.nodes,o=s.selectNodes(r,i,e),a=n?o.matched:[];return p(o.selected,a,t)}}return f.setGraphReady=function(e){f.loading=!1,f.loaded.resolve(),f.graph_element=e},f.ready=function(e){f.loaded.promise.then((function(){e(f)}))},f.manifest={},f.packages=[],f.selected_node=null,f.getCanvasHeight=function(){return.8*t.innerHeight+"px"},l.ready((function(e){f.manifest=e,f.packages=r.uniq(r.map(f.manifest.nodes,"package_name")),r.each(r.filter(f.manifest.nodes,(function(e){var t=r.includes(["model","seed","source","snapshot","analysis","exposure","metric","operation"],e.resource_type),n="test"==e.resource_type&&!e.hasOwnProperty("test_metadata");return t||n})),(function(e){var t={group:"nodes",data:r.assign(e,{parent:e.package_name,id:e.unique_id,is_group:"false"})};f.graph.pristine.nodes[e.unique_id]=t})),r.each(f.manifest.parent_map,(function(e,t){r.each(e,(function(e){var n=f.manifest.nodes[e],i=f.manifest.nodes[t];if(r.includes(["model","source","seed","snapshot","metric"],n.resource_type)&&("test"!=i.resource_type||!i.hasOwnProperty("test_metadata"))){var o=n.unique_id+"|"+i.unique_id,a={group:"edges",data:{source:n.unique_id,target:i.unique_id,unique_id:o}},s=i.unique_id;f.graph.pristine.edges[s]||(f.graph.pristine.edges[s]=[]),f.graph.pristine.edges[s].push(a)}}))}));var t=new i.Graph({directed:!0});r.each(f.graph.pristine.nodes,(function(e){t.setNode(e.data.unique_id,e.data.name)})),r.each(f.graph.pristine.edges,(function(e){r.each(e,(function(e){t.setEdge(e.data.source,e.data.target)}))})),f.graph.pristine.dag=t,f.graph.elements=r.flatten(r.values(f.graph.pristine.nodes).concat(r.values(f.graph.pristine.edges))),p(t.nodes())})),f.hideGraph=function(){f.orientation="sidebar",f.expanded=!1},f.showVerticalGraph=function(e,t){f.orientation="sidebar",t&&(f.expanded=!0);var n=h(r.assign({},s.options,{include:"+"+e+"+",exclude:"",hops:1}),"vertical",!0);return f.graph.layout=d.top_down,f.graph.options=u.vertical,n},f.showFullGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=r.assign({},s.options);e?(t.include="+"+e+"+",t.exclude=""):(t.include="",t.exclude="");var n=h(t,"horizontal",!0);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(t),n},f.updateGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=h(e,"horizontal",!1);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(e),t},f.deselectNodes=function(){"fullscreen"==f.orientation&&f.graph_element.elements().data("selected",0)},f.selectNode=function(e){if("fullscreen"==f.orientation){f.graph.pristine.nodes[e];var t=f.graph.pristine.dag,n=r.indexBy(o.ancestorNodes(t,e)),i=r.indexBy(o.descendentNodes(t,e));n[e]=e,i[e]=e;var a=f.graph_element;r.each(f.graph.elements,(function(t){var r=a.$id(t.data.id);n[t.data.source]&&n[t.data.target]||i[t.data.source]&&i[t.data.target]||t.data.unique_id==e?r.data("selected",1):r.data("selected",0)}))}},f.markDirty=function(e){f.markAllClean(),r.each(e,(function(e){f.graph_element.$id(e).addClass("dirty")}))},f.markAllClean=function(){f.graph_element&&f.graph_element.elements().removeClass("dirty")},f}])},function(e,t,n){"use strict";n.r(t),n.d(t,"isValidColor",(function(){return i}));const r=new Set(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","ghostwhite","gold","goldenrod","gray","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"]);function i(e){if(!e)return!1;const t=e.trim().toLowerCase();if(""===t)return!1;const n=t.match(/^#([A-Fa-f0-9]{3}){1,2}$/),i=r.has(t);return Boolean(n)||i}},function(e,t,n){n(31);const r=n(21),i=n(470);angular.module("dbt").factory("selectorService",["$state",function(e){var t={include:"",exclude:"",packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"],depth:1},n={view_node:null,selection:{clean:r.clone(t),dirty:r.clone(t)},options:{packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"]},init:function(e){r.each(e,(function(e,r){n.options[r]=e,t[r]=e,n.selection.clean[r]=e,n.selection.dirty[r]=e}))},resetSelection:function(e){var i={include:e&&r.includes(["model","seed","snapshot"],e.resource_type)?"+"+e.name+"+":e&&"source"==e.resource_type?"+source:"+e.source_name+"."+e.name+"+":e&&"exposure"==e.resource_type?"+exposure:"+e.name:e&&"metric"==e.resource_type?"+metric:"+e.name:e&&r.includes(["analysis","test"],e.resource_type)?"+"+e.name:""},o=r.assign({},t,i);n.selection.clean=r.clone(o),n.selection.dirty=r.clone(o),n.view_node=e},getViewNode:function(){return n.view_node},excludeNode:function(e,t){var r,i=n.selection.dirty.exclude,o=t.parents?"+":"",a=t.children?"+":"",s=i.length>0?" ":"";"source"==e.resource_type?(o+="source:",r=e.source_name+"."+e.name):["exposure","metric"].indexOf(e.resource_type)>-1?(o+=e.resource_type+":",r=e.name):r=e.name;var l=i+s+o+r+a;return n.selection.dirty.exclude=l,n.updateSelection()},selectSource:function(e,t){var r="source:"+e+(t.children?"+":"");return n.selection.dirty.include=r,n.updateSelection()},clearViewNode:function(){n.view_node=null},isDirty:function(){return!r.isEqual(n.selection.clean,n.selection.dirty)},updateSelection:function(){return n.selection.clean=r.clone(n.selection.dirty),n.selection.clean},selectNodes:function(e,t,n){return i.selectNodes(e,t,n)}};return n}])},function(e,t,n){const r=n(21),i=n(471);function o(e,t){return t||(t=" "),r.filter(r.uniq(e.split(t)),(function(e){return e.length>0}))}function a(e){var t={raw:e,select_at:!1,select_children:!1,children_depth:null,select_parents:!1,parents_depth:null};const n=new RegExp(""+/^/.source+/(?(\@))?/.source+/(?((?(\d*))\+))?/.source+/((?([\w.]+)):)?/.source+/(?(.*?))/.source+/(?(\+(?(\d*))))?/.source+/$/.source).exec(e).groups;t.select_at="@"==n.childs_parents,t.select_parents=!!n.parents,t.select_children=!!n.children,n.parents_depth&&(t.parents_depth=parseInt(n.parents_depth)),n.children_depth&&(t.children_depth=parseInt(n.children_depth));var r=n.method,i=n.value;return r?-1!=r.indexOf(".")&&([r,selector_modifier]=r.split(".",2),i={config:selector_modifier,value:i}):r="implicit",t.selector_type=r,t.selector_value=i,t}function s(e){var t=o(e," ");return r.map(t,(function(e){var t=o(e,",");return t.length>1?{method:"intersect",selectors:r.map(t,a)}:{method:"none",selectors:r.map([e],a)}}))}function l(e,t){var n=s(e),i=null,o=null;return r.each(n,(function(e){var n="intersect"==e.method?r.intersection:r.union;r.each(e.selectors,(function(e){var r=t(e);null===i?(i=r.matched,o=r.selected):(i=n(i,r.matched),o=n(o,r.selected))}))})),{matched:i||[],selected:o||[]}}e.exports={splitSpecs:o,parseSpec:a,parseSpecs:s,buildSpec:function(e,t,n){return{include:s(e),exclude:s(t),hops:n}},applySpec:l,selectNodes:function(e,t,n){n.include,n.exclude;var o,a=r.partial(i.getNodesFromSpec,e,t,n.hops);r.values(t),o=0==n.include.trim().length?{selected:e.nodes(),matched:[]}:l(n.include,a);var s=l(n.exclude,a),c=o.selected,u=o.matched;c=r.difference(c,s.selected),u=r.difference(u,s.matched);var d=[];return r.each(c,(function(e){var i=t[e];i.data.tags||(i.data.tags=[]);var o=r.includes(n.packages,i.data.package_name),a=r.intersection(n.tags,i.data.tags).length>0,s=r.includes(n.tags,null)&&0==i.data.tags.length,l=r.includes(n.resource_types,i.data.resource_type);o&&(a||s)&&l||d.push(i.data.unique_id)})),{selected:r.difference(c,d),matched:r.difference(u,d)}}}},function(e,t,n){const r=n(21),i=n(203);var o="fqn",a="tag",s="source",l="exposure",c="metric",u="path",d="file",f="package",p="config",h="test_name",g="test_type",m={};function v(e,t){if(t===r.last(e))return!0;var n=e.reduce((e,t)=>e.concat(t.split(".")),[]),i=t.split(".");if(n.length-1||!r.hasOwnProperty("test_metadata")&&["data","singular"].indexOf(t)>-1)&&n.push(r)})),n}function $(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("source"==r.resource_type){var i,o,a=r.source_name,s=r.name;-1!=t.indexOf(".")?[i,o]=t.split(".",2):(i=t,o=null),("*"==i||i==a&&"*"===o||i==a&&o===s||i==a&&null===o)&&n.push(e.data)}})),n}m["implicit"]=function(e,t){var n=b(e,t),i=y(e,t),o=[];t.toLowerCase().endsWith(".sql")&&(o=x(e,t));var a=r.uniq([].concat(r.map(n,"unique_id"),r.map(i,"unique_id"),r.map(o,"unique_id")));return r.map(a,t=>e[t].data)},m[o]=b,m[a]=w,m[s]=$,m[l]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("exposure"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[c]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("metric"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[u]=y,m[d]=x,m[f]=k,m[p]=A,m[h]=E,m[g]=S,e.exports={isFQNMatch:v,getNodesByFQN:b,getNodesByTag:w,getNodesBySource:$,getNodesByPath:y,getNodesByPackage:k,getNodesByConfig:A,getNodesByTestName:E,getNodesByTestType:S,getNodesFromSpec:function(e,t,n,o){const a=m[o.selector_type];if(!a)return console.log("Node matcher for selector",o.selector_type,"is invalid"),{selected:[],matched:[]};var s=a(t,o.selector_value),l=[],c=[];return r.each(s,(function(t){var a=t.unique_id;c.push(t.unique_id);var s=[],u=[],d=[];if(o.select_at&&(d=r.union(i.selectAt(e,a))),o.select_parents){var f=n||o.parents_depth;s=i.ancestorNodes(e,a,f)}if(o.select_children){f=n||o.children_depth;u=i.descendentNodes(e,a,f)}l=r.union([a],l,u,s,d)})),{selected:l,matched:c}}}},function(e,t,n){const r=n(9);n(473);r.module("dbt").factory("trackingService",["$location","selectorService","$rootScope",function(e,t,n){var r={initialized:!1,snowplow:null,project_id:null,init:function(e){r.initialized||(r.initialized=!0,r.project_id=e.project_id,!0===e.track&&r.turn_on_tracking())},isHosted:function(){return window.location.hostname.indexOf(".getdbt.com")>-1},turn_on_tracking:function(){var e,t,n,i,o,a;e=window,t=document,n="script",e[i="snowplow"]||(e.GlobalSnowplowNamespace=e.GlobalSnowplowNamespace||[],e.GlobalSnowplowNamespace.push(i),e[i]=function(){(e[i].q=e[i].q||[]).push(arguments)},e[i].q=e[i].q||[],o=t.createElement(n),a=t.getElementsByTagName(n)[0],o.async=1,o.src="//d1fc8wv8zag5ca.cloudfront.net/2.9.0/sp.js",a.parentNode.insertBefore(o,a));var s={appId:"dbt-docs",forceSecureTracker:!0,respectDoNotTrack:!0,userFingerprint:!1,contexts:{webPage:!0}};r.isHosted()&&(s.cookieDomain=".getdbt.com"),r.snowplow=window.snowplow,r.snowplow("newTracker","sp","fishtownanalytics.sinter-collect.com",s),r.snowplow("enableActivityTracking",30,30),r.track_pageview()},fuzzUrls:function(){r.isHosted()||(r.snowplow("setCustomUrl","https://fuzzed.getdbt.com/"),r.snowplow("setReferrerUrl","https://fuzzed.getdbt.com/"))},getContext:function(){return[{schema:"iglu:com.dbt/dbt_docs/jsonschema/1-0-0",data:{is_cloud_hosted:r.isHosted(),core_project_id:r.project_id}}]},track_pageview:function(){if(r.snowplow){r.fuzzUrls();r.snowplow("trackPageView",null,r.getContext())}},track_event:function(e,t,n,i){r.snowplow&&(r.fuzzUrls(),r.snowplow("trackStructEvent","dbt-docs",e,t,n,i,r.getContext()))},track_graph_interaction:function(e,t){r.snowplow&&(r.fuzzUrls(),r.track_event("graph","interact",e,t))}};return r}])},function(e,t,n){var r,i,o,a,s;r=n(474),i=n(204).utf8,o=n(475),a=n(204).bin,(s=function(e,t){e.constructor==String?e=t&&"binary"===t.encoding?a.stringToBytes(e):i.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var n=r.bytesToWords(e),l=8*e.length,c=1732584193,u=-271733879,d=-1732584194,f=271733878,p=0;p>>24)|4278255360&(n[p]<<24|n[p]>>>8);n[l>>>5]|=128<>>9<<4)]=l;var h=s._ff,g=s._gg,m=s._hh,v=s._ii;for(p=0;p>>0,u=u+y>>>0,d=d+x>>>0,f=f+w>>>0}return r.endian([c,u,d,f])})._ff=function(e,t,n,r,i,o,a){var s=e+(t&n|~t&r)+(i>>>0)+a;return(s<>>32-o)+t},s._gg=function(e,t,n,r,i,o,a){var s=e+(t&r|n&~r)+(i>>>0)+a;return(s<>>32-o)+t},s._hh=function(e,t,n,r,i,o,a){var s=e+(t^n^r)+(i>>>0)+a;return(s<>>32-o)+t},s._ii=function(e,t,n,r,i,o,a){var s=e+(n^(t|~r))+(i>>>0)+a;return(s<>>32-o)+t},s._blocksize=16,s._digestsize=16,e.exports=function(e,t){if(null==e)throw new Error("Illegal argument "+e);var n=r.wordsToBytes(s(e,t));return t&&t.asBytes?n:t&&t.asString?a.bytesToString(n):r.bytesToHex(n)}},function(e,t){var n,r;n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",r={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&r.rotl(e,8)|4278255360&r.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-o)&63)):t.push("=");return t.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var t=[],r=0,i=0;r>>6-2*i);return t}},e.exports=r},function(e,t){function n(e){return!!e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)} /*! * Determine if an object is a Buffer * From 8e28f5906e60f7c97e874e9dd5962625267d336e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Nov 2022 08:40:51 -0700 Subject: [PATCH 017/156] Bump python from 3.10.7-slim-bullseye to 3.11.0-slim-bullseye in /docker (#6180) * Bump python from 3.10.7-slim-bullseye to 3.11.0-slim-bullseye in /docker Bumps python from 3.10.7-slim-bullseye to 3.11.0-slim-bullseye. --- updated-dependencies: - dependency-name: python dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Add automated changelog yaml from template for bot PR Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Github Build Bot --- .changes/unreleased/Dependency-20221031-000329.yaml | 7 +++++++ docker/Dockerfile | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Dependency-20221031-000329.yaml diff --git a/.changes/unreleased/Dependency-20221031-000329.yaml b/.changes/unreleased/Dependency-20221031-000329.yaml new file mode 100644 index 00000000000..6d19e098e3f --- /dev/null +++ b/.changes/unreleased/Dependency-20221031-000329.yaml @@ -0,0 +1,7 @@ +kind: "Dependency" +body: "Bump python from 3.10.7-slim-bullseye to 3.11.0-slim-bullseye in /docker" +time: 2022-10-31T00:03:29.00000Z +custom: + Author: dependabot[bot] + Issue: 4904 + PR: 6180 diff --git a/docker/Dockerfile b/docker/Dockerfile index 8d3756ca786..afda5e9ce72 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -9,7 +9,7 @@ ARG build_for=linux/amd64 ## # base image (abstract) ## -FROM --platform=$build_for python:3.10.7-slim-bullseye as base +FROM --platform=$build_for python:3.11.0-slim-bullseye as base # N.B. The refs updated automagically every release via bumpversion # N.B. dbt-postgres is currently found in the core codebase so a value of dbt-core@ is correct From c3ccbe3357d15e45153c70b62bea626d37fc0d05 Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Thu, 3 Nov 2022 09:13:00 -0500 Subject: [PATCH 018/156] add python version and upgrade action (#6204) --- .github/workflows/main.yml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 00339110483..4de07d83c07 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -45,7 +45,9 @@ jobs: uses: actions/checkout@v2 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4.3.0 + with: + python-version: '3.8' - name: Install python dependencies run: | @@ -82,7 +84,7 @@ jobs: uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4.3.0 with: python-version: ${{ matrix.python-version }} @@ -137,7 +139,7 @@ jobs: uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4.3.0 with: python-version: ${{ matrix.python-version }} @@ -190,9 +192,9 @@ jobs: uses: actions/checkout@v2 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4.3.0 with: - python-version: 3.8 + python-version: '3.8' - name: Install python dependencies run: | From cfad27f963f431554b2e90ecec9eff431a5b4371 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Thu, 3 Nov 2022 17:35:16 -0400 Subject: [PATCH 019/156] add typing to DepsTask.run (#6192) --- core/dbt/deps/base.py | 2 +- core/dbt/events/types.py | 2 +- core/dbt/task/deps.py | 14 ++++++++++---- tests/unit/test_events.py | 5 +++-- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/core/dbt/deps/base.py b/core/dbt/deps/base.py index 1557b0d7a35..27567440a52 100644 --- a/core/dbt/deps/base.py +++ b/core/dbt/deps/base.py @@ -74,7 +74,7 @@ def _fetch_metadata(self, project, renderer): raise NotImplementedError @abc.abstractmethod - def install(self, project): + def install(self, project, renderer): raise NotImplementedError @abc.abstractmethod diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index de562fb62aa..21abaa16383 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -1706,7 +1706,7 @@ def code(self): def message(self) -> str: return "Updates available for packages: {} \ \nUpdate your versions in packages.yml, then run dbt deps".format( - self.packages + self.packages.value ) diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 5e8beff43f3..14ba794cd4d 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -1,3 +1,5 @@ +from typing import Optional + import dbt.utils import dbt.deprecations import dbt.exceptions @@ -6,7 +8,9 @@ from dbt.config.renderer import DbtProjectYamlRenderer from dbt.deps.base import downloads_directory from dbt.deps.resolver import resolve_packages +from dbt.deps.registry import RegistryPinnedPackage +from dbt.events.proto_types import ListOfStrings from dbt.events.functions import fire_event from dbt.events.types import ( DepsNoPackagesFound, @@ -29,7 +33,9 @@ class DepsTask(BaseTask): def __init__(self, args, config: UnsetProfileConfig): super().__init__(args=args, config=config) - def track_package_install(self, package_name: str, source_type: str, version: str) -> None: + def track_package_install( + self, package_name: str, source_type: str, version: Optional[str] + ) -> None: # Hub packages do not need to be hashed, as they are public # Use the string 'local' for local package versions if source_type == "local": @@ -45,7 +51,7 @@ def track_package_install(self, package_name: str, source_type: str, version: st {"name": package_name, "source": source_type, "version": version}, ) - def run(self): + def run(self) -> None: system.make_directory(self.config.packages_install_path) packages = self.config.packages.packages if not packages: @@ -66,7 +72,7 @@ def run(self): fire_event(DepsStartPackageInstall(package_name=package_name)) package.install(self.config, renderer) fire_event(DepsInstallInfo(version_name=package.nice_version_name())) - if source_type == "hub": + if isinstance(package, RegistryPinnedPackage): version_latest = package.get_version_latest() if version_latest != version: packages_to_upgrade.append(package_name) @@ -81,7 +87,7 @@ def run(self): ) if packages_to_upgrade: fire_event(EmptyLine()) - fire_event(DepsNotifyUpdatesAvailable(packages=packages_to_upgrade)) + fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade))) @classmethod def from_args(cls, args): diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 6ba1b1ba69c..8d7aeaa7aae 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -13,7 +13,7 @@ ErrorLevel, TestLevel, ) -from dbt.events.proto_types import NodeInfo, RunResultMsg, ReferenceKeyMsg +from dbt.events.proto_types import ListOfStrings, NodeInfo, RunResultMsg, ReferenceKeyMsg from importlib import reload import dbt.events.functions as event_funcs import dbt.flags as flags @@ -321,7 +321,8 @@ def MockNode(): DepsInstallInfo(version_name=""), DepsUpdateAvailable(version_latest=""), DepsListSubdirectory(subdirectory=""), - DepsNotifyUpdatesAvailable(packages=[]), + DepsNotifyUpdatesAvailable(packages=ListOfStrings()), + DepsNotifyUpdatesAvailable(packages=ListOfStrings(['dbt-utils'])), DatabaseErrorRunningHook(hook_type=""), EmptyLine(), HooksRunning(num_hooks=0, hook_type=""), From d0543c92426b8d95e4a3e7c79a5050c8a6e322bd Mon Sep 17 00:00:00 2001 From: Rachel <41338402+racheldaniel@users.noreply.github.com> Date: Fri, 4 Nov 2022 12:05:24 -0500 Subject: [PATCH 020/156] Updates lib to use new profile name functionality (#6202) * Updates lib to use new profile name functionality * Adds changie entry * Fixes formatting --- .changes/unreleased/Features-20221102-150003.yaml | 8 ++++++++ core/dbt/lib.py | 14 +++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 .changes/unreleased/Features-20221102-150003.yaml diff --git a/.changes/unreleased/Features-20221102-150003.yaml b/.changes/unreleased/Features-20221102-150003.yaml new file mode 100644 index 00000000000..ca45893dfe9 --- /dev/null +++ b/.changes/unreleased/Features-20221102-150003.yaml @@ -0,0 +1,8 @@ +kind: Features +body: This pulls the profile name from args when constructing a RuntimeConfig in lib.py, + enabling the dbt-server to override the value that's in the dbt_project.yml +time: 2022-11-02T15:00:03.000805-05:00 +custom: + Author: racheldaniel + Issue: "6201" + PR: "6202" diff --git a/core/dbt/lib.py b/core/dbt/lib.py index ff8f06c88a8..5b2ee2ea29f 100644 --- a/core/dbt/lib.py +++ b/core/dbt/lib.py @@ -1,4 +1,6 @@ import os +from dbt.config.project import Project +from dbt.config.renderer import DbtProjectYamlRenderer from dbt.contracts.results import RunningStatus, collect_timing_info from dbt.events.functions import fire_event from dbt.events.types import NodeCompiling, NodeExecuting @@ -71,16 +73,22 @@ def get_dbt_config(project_dir, args=None, single_threaded=False): else: profiles_dir = flags.DEFAULT_PROFILES_DIR + profile_name = getattr(args, "profile", None) + runtime_args = RuntimeArgs( project_dir=project_dir, profiles_dir=profiles_dir, single_threaded=single_threaded, - profile=getattr(args, "profile", None), + profile=profile_name, target=getattr(args, "target", None), ) - # Construct a RuntimeConfig from phony args - config = RuntimeConfig.from_args(runtime_args) + profile = RuntimeConfig.collect_profile(args=runtime_args, profile_name=profile_name) + project_renderer = DbtProjectYamlRenderer(profile, None) + project = RuntimeConfig.collect_project(args=runtime_args, project_renderer=project_renderer) + assert type(project) is Project + + config = RuntimeConfig.from_parts(project, profile, runtime_args) # Set global flags from arguments flags.set_from_args(args, config) From 68d06d8a9cea28600ed9c3f375605c9467247316 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Fri, 4 Nov 2022 14:26:37 -0400 Subject: [PATCH 021/156] Combine various print result log events with different levels (#6174) * Combine various print result log events with different levels * Changie * more merge cleanup * Specify DynamicLevel for event classes that must specify level --- .../Under the Hood-20221028-110344.yaml | 7 + core/dbt/events/base_types.py | 20 +- core/dbt/events/functions.py | 41 ++- core/dbt/events/proto_types.py | 162 ++------- core/dbt/events/types.proto | 140 ++------ core/dbt/events/types.py | 338 ++++++------------ core/dbt/task/base.py | 8 +- core/dbt/task/freshness.py | 66 +--- core/dbt/task/run.py | 50 ++- core/dbt/task/run_operation.py | 6 +- core/dbt/task/runnable.py | 4 +- core/dbt/task/seed.py | 44 +-- core/dbt/task/snapshot.py | 39 +- core/dbt/task/test.py | 65 +--- tests/unit/test_events.py | 49 +-- tests/unit/test_proto_events.py | 20 +- 16 files changed, 319 insertions(+), 740 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221028-110344.yaml diff --git a/.changes/unreleased/Under the Hood-20221028-110344.yaml b/.changes/unreleased/Under the Hood-20221028-110344.yaml new file mode 100644 index 00000000000..4ee0a7dc214 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221028-110344.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Combine certain logging events with different levels +time: 2022-10-28T11:03:44.887836-04:00 +custom: + Author: gshank + Issue: "6173" + PR: "6174" diff --git a/core/dbt/events/base_types.py b/core/dbt/events/base_types.py index cd3275c02a9..de010c89ceb 100644 --- a/core/dbt/events/base_types.py +++ b/core/dbt/events/base_types.py @@ -49,7 +49,9 @@ class BaseEvent: def __post_init__(self): super().__post_init__() - self.info.level = self.level_tag() + if not self.info.level: + self.info.level = self.level_tag() + assert self.info.level in ["info", "warn", "error", "debug", "test"] if not hasattr(self.info, "msg") or not self.info.msg: self.info.msg = self.message() self.info.invocation_id = get_invocation_id() @@ -60,13 +62,25 @@ def __post_init__(self): self.info.code = self.code() self.info.name = type(self).__name__ - def level_tag(self): - raise Exception("level_tag() not implemented for event") + def level_tag(self) -> str: + return "debug" + + # This is here because although we know that info should always + # exist, mypy doesn't. + def log_level(self) -> str: + return self.info.level # type: ignore def message(self): raise Exception("message() not implemented for event") +# DynamicLevel requires that the level be supplied on the +# event construction call using the "info" function from functions.py +@dataclass # type: ignore[misc] +class DynamicLevel(BaseEvent): + pass + + @dataclass class TestLevel(BaseEvent): __test__ = False diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index 2425f0abd7f..d69fa63eb6f 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -2,12 +2,9 @@ from colorama import Style from dbt.events.base_types import NoStdOut, BaseEvent, NoFile, Cache +from dbt.events.types import EventBufferFull, MainReportVersion, EmptyLine +from dbt.events.proto_types import EventInfo from dbt.events.helpers import env_secrets, scrub_secrets -from dbt.events.types import ( - EventBufferFull, - MainReportVersion, - EmptyLine, -) import dbt.flags as flags from dbt.constants import METADATA_ENV_PREFIX @@ -163,7 +160,7 @@ def create_debug_text_log_line(e: BaseEvent) -> str: ts: str = get_ts().strftime("%H:%M:%S.%f") scrubbed_msg: str = scrub_secrets(e.message(), env_secrets()) # Make the levels all 5 characters so they line up - level: str = f"{e.level_tag():<5}" + level: str = f"{e.log_level():<5}" thread = "" if threading.current_thread().name: thread_name = threading.current_thread().name @@ -195,23 +192,23 @@ def create_log_line(e: BaseEvent, file_output=False) -> Optional[str]: # allows for reuse of this obnoxious if else tree. # do not use for exceptions, it doesn't pass along exc_info, stack_info, or extra -def send_to_logger(l: Union[Logger, logbook.Logger], level_tag: str, log_line: str): +def send_to_logger(l: Union[Logger, logbook.Logger], level: str, log_line: str): if not log_line: return - if level_tag == "test": + if level == "test": # TODO after implmenting #3977 send to new test level l.debug(log_line) - elif level_tag == "debug": + elif level == "debug": l.debug(log_line) - elif level_tag == "info": + elif level == "info": l.info(log_line) - elif level_tag == "warn": + elif level == "warn": l.warning(log_line) - elif level_tag == "error": + elif level == "error": l.error(log_line) else: raise AssertionError( - f"While attempting to log {log_line}, encountered the unhandled level: {level_tag}" + f"While attempting to log {log_line}, encountered the unhandled level: {level}" ) @@ -248,7 +245,7 @@ def fire_event(e: BaseEvent) -> None: # destination log_line = create_log_line(e) if log_line: - send_to_logger(GLOBAL_LOGGER, e.level_tag(), log_line) + send_to_logger(GLOBAL_LOGGER, level=e.log_level(), log_line=log_line) return # exit the function to avoid using the current logger as well # always logs debug level regardless of user input @@ -256,19 +253,19 @@ def fire_event(e: BaseEvent) -> None: log_line = create_log_line(e, file_output=True) # doesn't send exceptions to exception logger if log_line: - send_to_logger(FILE_LOG, level_tag=e.level_tag(), log_line=log_line) + send_to_logger(FILE_LOG, level=e.log_level(), log_line=log_line) if not isinstance(e, NoStdOut): # explicitly checking the debug flag here so that potentially expensive-to-construct # log messages are not constructed if debug messages are never shown. - if e.level_tag() == "debug" and not flags.DEBUG: + if e.log_level() == "debug" and not flags.DEBUG: return # eat the message in case it was one of the expensive ones - if e.level_tag() != "error" and flags.QUIET: + if e.log_level() != "error" and flags.QUIET: return # eat all non-exception messages in quiet mode log_line = create_log_line(e) if log_line: - send_to_logger(STDOUT_LOG, level_tag=e.level_tag(), log_line=log_line) + send_to_logger(STDOUT_LOG, level=e.log_level(), log_line=log_line) def get_metadata_vars() -> Dict[str, str]: @@ -329,3 +326,11 @@ def add_to_event_history(event): def reset_event_history(): global EVENT_HISTORY EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) + + +# Currently used to set the level in EventInfo, so logging events can +# provide more than one "level". Might be used in the future to set +# more fields in EventInfo, once some of that information is no longer global +def info(level="info"): + info = EventInfo(level=level) + return info diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 53ad7620bd3..09d29b5563b 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -52,7 +52,6 @@ class NodeInfo(betterproto.Message): class RunResultMsg(betterproto.Message): """RunResult""" - # status: Union[RunStatus, TestStatus, FreshnessStatus] status: str = betterproto.string_field(1) message: str = betterproto.string_field(2) timing_info: List["TimingInfoMsg"] = betterproto.message_field(3) @@ -1543,57 +1542,21 @@ class SQLRunnerException(betterproto.Message): @dataclass -class PrintErrorTestResult(betterproto.Message): +class LogTestResult(betterproto.Message): """Q007""" info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - - -@dataclass -class PrintPassTestResult(betterproto.Message): - """Q008""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - - -@dataclass -class PrintWarnTestResult(betterproto.Message): - """Q009""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - num_failures: int = betterproto.int32_field(7) - - -@dataclass -class PrintFailureTestResult(betterproto.Message): - """Q010""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - num_models: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - num_failures: int = betterproto.int32_field(7) + status: str = betterproto.string_field(4) + index: int = betterproto.int32_field(5) + num_models: int = betterproto.int32_field(6) + execution_time: float = betterproto.float_field(7) + num_failures: int = betterproto.int32_field(8) @dataclass -class PrintStartLine(betterproto.Message): +class LogStartLine(betterproto.Message): """Q011""" info: "EventInfo" = betterproto.message_field(1) @@ -1604,7 +1567,7 @@ class PrintStartLine(betterproto.Message): @dataclass -class PrintModelResultLine(betterproto.Message): +class LogModelResult(betterproto.Message): """Q012""" info: "EventInfo" = betterproto.message_field(1) @@ -1613,40 +1576,11 @@ class PrintModelResultLine(betterproto.Message): status: str = betterproto.string_field(4) index: int = betterproto.int32_field(5) total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - - -@dataclass -class PrintModelErrorResultLine(betterproto.Message): - """Q013""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) + execution_time: int = betterproto.int32_field(7) @dataclass -class PrintSnapshotErrorResultLine(betterproto.Message): - """Q014""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - cfg: Dict[str, str] = betterproto.map_field( - 8, betterproto.TYPE_STRING, betterproto.TYPE_STRING - ) - - -@dataclass -class PrintSnapshotResultLine(betterproto.Message): +class LogSnapshotResult(betterproto.Message): """Q015""" info: "EventInfo" = betterproto.message_field(1) @@ -1662,7 +1596,7 @@ class PrintSnapshotResultLine(betterproto.Message): @dataclass -class PrintSeedErrorResultLine(betterproto.Message): +class LogSeedResult(betterproto.Message): """Q016""" info: "EventInfo" = betterproto.message_field(1) @@ -1676,73 +1610,21 @@ class PrintSeedErrorResultLine(betterproto.Message): @dataclass -class PrintSeedResultLine(betterproto.Message): - """Q017""" +class LogFreshnessResult(betterproto.Message): + """Q018""" info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - status: str = betterproto.string_field(3) + status: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(3) index: int = betterproto.int32_field(4) total: int = betterproto.int32_field(5) execution_time: float = betterproto.float_field(6) - schema: str = betterproto.string_field(7) - relation: str = betterproto.string_field(8) - - -@dataclass -class PrintFreshnessErrorLine(betterproto.Message): - """Q018""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) - - -@dataclass -class PrintFreshnessErrorStaleLine(betterproto.Message): - """Q019""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) - - -@dataclass -class PrintFreshnessWarnLine(betterproto.Message): - """Q020""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) - - -@dataclass -class PrintFreshnessPassLine(betterproto.Message): - """Q021""" - - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - index: int = betterproto.int32_field(3) - total: int = betterproto.int32_field(4) - execution_time: float = betterproto.float_field(5) - source_name: str = betterproto.string_field(6) - table_name: str = betterproto.string_field(7) + source_name: str = betterproto.string_field(7) + table_name: str = betterproto.string_field(8) @dataclass -class PrintCancelLine(betterproto.Message): +class LogCancelLine(betterproto.Message): """Q022""" info: "EventInfo" = betterproto.message_field(1) @@ -1828,7 +1710,7 @@ class NodeExecuting(betterproto.Message): @dataclass -class PrintHookStartLine(betterproto.Message): +class LogHookStartLine(betterproto.Message): """Q032""" info: "EventInfo" = betterproto.message_field(1) @@ -1839,7 +1721,7 @@ class PrintHookStartLine(betterproto.Message): @dataclass -class PrintHookEndLine(betterproto.Message): +class LogHookEndLine(betterproto.Message): """Q033""" info: "EventInfo" = betterproto.message_field(1) @@ -2024,7 +1906,7 @@ class TimingInfoCollected(betterproto.Message): @dataclass -class PrintDebugStackTrace(betterproto.Message): +class LogDebugStackTrace(betterproto.Message): """Z011""" info: "EventInfo" = betterproto.message_field(1) @@ -2191,7 +2073,7 @@ class EndOfRunSummary(betterproto.Message): @dataclass -class PrintSkipBecauseError(betterproto.Message): +class LogSkipBecauseError(betterproto.Message): """Z034""" info: "EventInfo" = betterproto.message_field(1) diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 8f7e1e94fc4..8cafe71bd95 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -38,7 +38,6 @@ message NodeInfo { // RunResult message RunResultMsg { - // status: Union[RunStatus, TestStatus, FreshnessStatus] string status = 1; string message = 2; repeated TimingInfoMsg timing_info = 3; @@ -1174,49 +1173,23 @@ message SQLRunnerException { } // Q007 -message PrintErrorTestResult { +message LogTestResult { EventInfo info = 1; NodeInfo node_info = 2; string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; + string status = 4; + int32 index = 5; + int32 num_models = 6; + float execution_time = 7; + int32 num_failures = 8; } -// Q008 -message PrintPassTestResult { - EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; -} -// Q009 -message PrintWarnTestResult { - EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; - int32 num_failures = 7; -} +// Skipped Q008, Q009, Q010 -// Q010 -message PrintFailureTestResult { - EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - int32 index = 4; - int32 num_models = 5; - float execution_time = 6; - int32 num_failures = 7; -} // Q011 -message PrintStartLine { +message LogStartLine { EventInfo info = 1; NodeInfo node_info = 2; string description = 3; @@ -1225,41 +1198,20 @@ message PrintStartLine { } // Q012 -message PrintModelResultLine { +message LogModelResult { EventInfo info = 1; NodeInfo node_info = 2; string description = 3; string status = 4; int32 index = 5; int32 total = 6; - float execution_time = 7; + int32 execution_time = 7; } -// Q013 -message PrintModelErrorResultLine { - EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; -} - -// Q014 -message PrintSnapshotErrorResultLine { - EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; - map cfg = 8; -} +// skipped Q013, Q014 // Q015 -message PrintSnapshotResultLine { +message LogSnapshotResult { EventInfo info = 1; NodeInfo node_info = 2; string description = 3; @@ -1271,7 +1223,7 @@ message PrintSnapshotResultLine { } // Q016 -message PrintSeedErrorResultLine { +message LogSeedResult { EventInfo info = 1; NodeInfo node_info = 2; string status = 3; @@ -1282,64 +1234,26 @@ message PrintSeedErrorResultLine { string relation = 8; } -// Q017 -message PrintSeedResultLine { +// Skipped Q017 + +// Q018 +message LogFreshnessResult { EventInfo info = 1; - NodeInfo node_info = 2; - string status = 3; + string status = 2; + NodeInfo node_info = 3; int32 index = 4; int32 total = 5; float execution_time = 6; - string schema = 7; - string relation = 8; -} - -// Q018 -message PrintFreshnessErrorLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; + string source_name = 7; + string table_name = 8; } -// Q019 -message PrintFreshnessErrorStaleLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} -// Q020 -message PrintFreshnessWarnLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} +// Skipped Q019, Q020, Q021 -// Q021 -message PrintFreshnessPassLine { - EventInfo info = 1; - NodeInfo node_info = 2; - int32 index = 3; - int32 total = 4; - float execution_time = 5; - string source_name = 6; - string table_name = 7; -} // Q022 -message PrintCancelLine { +message LogCancelLine { EventInfo info = 1; string conn_name = 2; } @@ -1405,7 +1319,7 @@ message NodeExecuting { } // Q032 -message PrintHookStartLine { +message LogHookStartLine { EventInfo info = 1; NodeInfo node_info = 2; string statement = 3; @@ -1414,7 +1328,7 @@ message PrintHookStartLine { } // Q033 -message PrintHookEndLine { +message LogHookEndLine { EventInfo info = 1; NodeInfo node_info = 2; string statement = 3; @@ -1563,7 +1477,7 @@ message TimingInfoCollected { } // Z011 -message PrintDebugStackTrace { +message LogDebugStackTrace { EventInfo info = 1; string exc_info = 2; } @@ -1690,7 +1604,7 @@ message EndOfRunSummary { // Skipped Z031, Z032, Z033 // Z034 -message PrintSkipBecauseError { +message LogSkipBecauseError { EventInfo info = 1; string schema = 2; string relation = 3; diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 21abaa16383..de738df9487 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -2,6 +2,7 @@ from dbt.ui import line_wrap_message, warning_tag, red, green, yellow from dbt.constants import MAXIMUM_SEED_SIZE_NAME, PIN_PACKAGE_URL from dbt.events.base_types import ( + DynamicLevel, NoFile, DebugLevel, InfoLevel, @@ -1883,76 +1884,53 @@ def message(self) -> str: @dataclass -@dataclass -class PrintErrorTestResult(ErrorLevel, pt.PrintErrorTestResult): +class LogTestResult(DynamicLevel, pt.LogTestResult): def code(self): return "Q007" def message(self) -> str: - info = "ERROR" + if self.status == "error": + info = "ERROR" + status = red(info) + elif self.status == "pass": + info = "PASS" + status = green(info) + elif self.status == "warn": + info = f"WARN {self.num_failures}" + status = yellow(info) + else: # self.status == "fail": + info = f"FAIL {self.num_failures}" + status = red(info) msg = f"{info} {self.name}" - return format_fancy_output_line( - msg=msg, - status=red(info), - index=self.index, - total=self.num_models, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintPassTestResult(InfoLevel, pt.PrintPassTestResult): - def code(self): - return "Q008" - def message(self) -> str: - info = "PASS" - msg = f"{info} {self.name}" return format_fancy_output_line( msg=msg, - status=green(info), + status=status, index=self.index, total=self.num_models, execution_time=self.execution_time, ) - -@dataclass -class PrintWarnTestResult(WarnLevel, pt.PrintWarnTestResult): - def code(self): - return "Q009" - - def message(self) -> str: - info = f"WARN {self.num_failures}" - msg = f"{info} {self.name}" - return format_fancy_output_line( - msg=msg, - status=yellow(info), - index=self.index, - total=self.num_models, - execution_time=self.execution_time, - ) + @classmethod + def status_to_level(cls, status): + # The statuses come from TestStatus + level_lookup = { + "fail": "error", + "pass": "info", + "warn": "warn", + "error": "error", + } + if status in level_lookup: + return level_lookup[status] + else: + return "info" -@dataclass -class PrintFailureTestResult(ErrorLevel, pt.PrintFailureTestResult): - def code(self): - return "Q010" - - def message(self) -> str: - info = f"FAIL {self.num_failures}" - msg = f"{info} {self.name}" - return format_fancy_output_line( - msg=msg, - status=red(info), - index=self.index, - total=self.num_models, - execution_time=self.execution_time, - ) +# Skipped Q008, Q009, Q010 @dataclass -class PrintStartLine(InfoLevel, pt.PrintStartLine): # noqa +class LogStartLine(InfoLevel, pt.LogStartLine): # noqa def code(self): return "Q011" @@ -1962,67 +1940,48 @@ def message(self) -> str: @dataclass -class PrintModelResultLine(InfoLevel, pt.PrintModelResultLine): +class LogModelResult(DynamicLevel, pt.LogModelResult): def code(self): return "Q012" def message(self) -> str: - info = "OK created" - msg = f"{info} {self.description}" - return format_fancy_output_line( - msg=msg, - status=green(self.status), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintModelErrorResultLine(ErrorLevel, pt.PrintModelErrorResultLine): - def code(self): - return "Q013" + if self.status == "error": + info = "ERROR creating" + status = red(self.status.upper()) + else: + info = "OK created" + status = green(self.status) - def message(self) -> str: - info = "ERROR creating" msg = f"{info} {self.description}" return format_fancy_output_line( msg=msg, - status=red(self.status.upper()), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, ) -@dataclass -class PrintSnapshotErrorResultLine(ErrorLevel, pt.PrintSnapshotErrorResultLine): - def code(self): - return "Q014" - - def message(self) -> str: - info = "ERROR snapshotting" - msg = "{info} {description}".format(info=info, description=self.description, **self.cfg) - return format_fancy_output_line( - msg=msg, - status=red(self.status.upper()), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) +# Skipped Q013, Q014 @dataclass -class PrintSnapshotResultLine(InfoLevel, pt.PrintSnapshotResultLine): +class LogSnapshotResult(DynamicLevel, pt.LogSnapshotResult): def code(self): return "Q015" def message(self) -> str: - info = "OK snapshotted" + if self.status == "error": + info = "ERROR snapshotting" + status = red(self.status.upper()) + else: + info = "OK snapshotted" + status = green(self.status) + msg = "{info} {description}".format(info=info, description=self.description, **self.cfg) return format_fancy_output_line( msg=msg, - status=green(self.status), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, @@ -2030,109 +1989,77 @@ def message(self) -> str: @dataclass -class PrintSeedErrorResultLine(ErrorLevel, pt.PrintSeedErrorResultLine): +class LogSeedResult(DynamicLevel, pt.LogSeedResult): def code(self): return "Q016" def message(self) -> str: - info = "ERROR loading" + if self.status == "error": + info = "ERROR loading" + status = red(self.status.upper()) + else: + info = "OK loaded" + status = green(self.status) msg = f"{info} seed file {self.schema}.{self.relation}" return format_fancy_output_line( msg=msg, - status=red(self.status.upper()), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, ) -@dataclass -class PrintSeedResultLine(InfoLevel, pt.PrintSeedResultLine): - def code(self): - return "Q017" - - def message(self) -> str: - info = "OK loaded" - msg = f"{info} seed file {self.schema}.{self.relation}" - return format_fancy_output_line( - msg=msg, - status=green(self.status), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) +# Skipped Q017 @dataclass -class PrintFreshnessErrorLine(ErrorLevel, pt.PrintFreshnessErrorLine): +class LogFreshnessResult(DynamicLevel, pt.LogFreshnessResult): def code(self): return "Q018" def message(self) -> str: - info = "ERROR" - msg = f"{info} freshness of {self.source_name}.{self.table_name}" - return format_fancy_output_line( - msg=msg, - status=red(info), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintFreshnessErrorStaleLine(ErrorLevel, pt.PrintFreshnessErrorStaleLine): - def code(self): - return "Q019" - - def message(self) -> str: - info = "ERROR STALE" + if self.status == "runtime error": + info = "ERROR" + status = red(info) + elif self.status == "error": + info = "ERROR STALE" + status = red(info) + elif self.status == "warn": + info = "WARN" + status = yellow(info) + else: + info = "PASS" + status = green(info) msg = f"{info} freshness of {self.source_name}.{self.table_name}" return format_fancy_output_line( msg=msg, - status=red(info), + status=status, index=self.index, total=self.total, execution_time=self.execution_time, ) + @classmethod + def status_to_level(cls, status): + # The statuses come from FreshnessStatus + level_lookup = { + "runtime error": "error", + "pass": "info", + "warn": "warn", + "error": "error", + } + if status in level_lookup: + return level_lookup[status] + else: + return "info" -@dataclass -class PrintFreshnessWarnLine(WarnLevel, pt.PrintFreshnessWarnLine): - def code(self): - return "Q020" - def message(self) -> str: - info = "WARN" - msg = f"{info} freshness of {self.source_name}.{self.table_name}" - return format_fancy_output_line( - msg=msg, - status=yellow(info), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) +# Skipped Q019, Q020, Q021 @dataclass -class PrintFreshnessPassLine(InfoLevel, pt.PrintFreshnessPassLine): - def code(self): - return "Q021" - - def message(self) -> str: - info = "PASS" - msg = f"{info} freshness of {self.source_name}.{self.table_name}" - return format_fancy_output_line( - msg=msg, - status=green(info), - index=self.index, - total=self.total, - execution_time=self.execution_time, - ) - - -@dataclass -class PrintCancelLine(ErrorLevel, pt.PrintCancelLine): +class LogCancelLine(ErrorLevel, pt.LogCancelLine): def code(self): return "Q022" @@ -2228,7 +2155,7 @@ def message(self) -> str: @dataclass -class PrintHookStartLine(InfoLevel, pt.PrintHookStartLine): # noqa +class LogHookStartLine(InfoLevel, pt.LogHookStartLine): # noqa def code(self): return "Q032" @@ -2240,7 +2167,7 @@ def message(self) -> str: @dataclass -class PrintHookEndLine(InfoLevel, pt.PrintHookEndLine): # noqa +class LogHookEndLine(InfoLevel, pt.LogHookEndLine): # noqa def code(self): return "Q033" @@ -2472,7 +2399,7 @@ def message(self) -> str: # This prints the stack trace at the debug level while allowing just the nice exception message # at the error level - or whatever other level chosen. Used in multiple places. @dataclass -class PrintDebugStackTrace(DebugLevel, pt.PrintDebugStackTrace): # noqa +class LogDebugStackTrace(DebugLevel, pt.LogDebugStackTrace): # noqa def code(self): return "Z011" @@ -2680,7 +2607,7 @@ def message(self) -> str: @dataclass -class PrintSkipBecauseError(ErrorLevel, pt.PrintSkipBecauseError): +class LogSkipBecauseError(ErrorLevel, pt.LogSkipBecauseError): def code(self): return "Z034" @@ -3032,48 +2959,22 @@ def message(self) -> str: SeedHeader(header="") SeedHeaderSeparator(len_header=0) SQLRunnerException(exc="") - PrintErrorTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - ) - PrintPassTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - ) - PrintWarnTestResult( + LogTestResult( name="", index=0, num_models=0, execution_time=0, num_failures=0, ) - PrintFailureTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - num_failures=0, - ) - PrintStartLine(description="", index=0, total=0, node_info=NodeInfo()) - PrintModelResultLine( + LogStartLine(description="", index=0, total=0, node_info=NodeInfo()) + LogModelResult( description="", status="", index=0, total=0, execution_time=0, ) - PrintModelErrorResultLine( - description="", - status="", - index=0, - total=0, - execution_time=0, - ) - PrintSnapshotErrorResultLine( + LogSnapshotResult( status="", description="", cfg={}, @@ -3081,23 +2982,7 @@ def message(self) -> str: total=0, execution_time=0, ) - PrintSnapshotResultLine( - status="", - description="", - cfg={}, - index=0, - total=0, - execution_time=0, - ) - PrintSeedErrorResultLine( - status="", - index=0, - total=0, - execution_time=0, - schema="", - relation="", - ) - PrintSeedResultLine( + LogSeedResult( status="", index=0, total=0, @@ -3105,35 +2990,14 @@ def message(self) -> str: schema="", relation="", ) - PrintFreshnessErrorLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintFreshnessErrorStaleLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintFreshnessWarnLine( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - PrintFreshnessPassLine( + LogFreshnessResult( source_name="", table_name="", index=0, total=0, execution_time=0, ) - PrintCancelLine(conn_name="") + LogCancelLine(conn_name="") DefaultSelector(name="") NodeStart(unique_id="") NodeFinished(unique_id="") @@ -3143,12 +3007,12 @@ def message(self) -> str: WritingInjectedSQLForNode(unique_id="") NodeCompiling(unique_id="") NodeExecuting(unique_id="") - PrintHookStartLine( + LogHookStartLine( statement="", index=0, total=0, ) - PrintHookEndLine( + LogHookEndLine( statement="", status="", index=0, @@ -3189,7 +3053,7 @@ def message(self) -> str: SystemStdErrMsg(bmsg=b"") SystemReportReturnCode(returncode=0) TimingInfoCollected() - PrintDebugStackTrace() + LogDebugStackTrace() CheckCleanPath(path="") ConfirmCleanPath(path="") ProtectedCleanPath(path="") @@ -3209,7 +3073,7 @@ def message(self) -> str: FirstRunResultError(msg="") AfterFirstRunResultError(msg="") EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False) - PrintSkipBecauseError(schema="", relation="", index=0, total=0) + LogSkipBecauseError(schema="", relation="", index=0, total=0) EnsureGitInstalled() DepsCreatingLocalSymlink() DepsSymlinkNotAvailable() diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 1b067d79af8..90f53c1f3f2 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -37,9 +37,9 @@ InternalExceptionOnRun, GenericExceptionOnRun, NodeConnectionReleaseError, - PrintDebugStackTrace, + LogDebugStackTrace, SkippingDetails, - PrintSkipBecauseError, + LogSkipBecauseError, NodeCompiling, NodeExecuting, ) @@ -362,7 +362,7 @@ def _handle_generic_exception(self, e, ctx): exc=str(e), ) ) - fire_event(PrintDebugStackTrace(exc_info=traceback.format_exc())) + fire_event(LogDebugStackTrace(exc_info=traceback.format_exc())) return str(e) @@ -451,7 +451,7 @@ def on_skip(self): # failure, print a special 'error skip' message. if self._skip_caused_by_ephemeral_failure(): fire_event( - PrintSkipBecauseError( + LogSkipBecauseError( schema=schema_name, relation=node_name, index=self.node_index, diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index ab256334271..51944cb4508 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -16,14 +16,11 @@ FreshnessStatus, ) from dbt.exceptions import RuntimeException, InternalException -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, info from dbt.events.types import ( FreshnessCheckComplete, - PrintStartLine, - PrintFreshnessErrorLine, - PrintFreshnessErrorStaleLine, - PrintFreshnessWarnLine, - PrintFreshnessPassLine, + LogStartLine, + LogFreshnessResult, ) from dbt.node_types import NodeType @@ -41,7 +38,7 @@ def on_skip(self): def before_execute(self): description = "freshness of {0.source_name}.{0.name}".format(self.node) fire_event( - PrintStartLine( + LogStartLine( description=description, index=self.node_index, total=self.num_nodes, @@ -56,50 +53,19 @@ def after_execute(self, result): else: source_name = result.source_name table_name = result.table_name - if result.status == FreshnessStatus.RuntimeErr: - fire_event( - PrintFreshnessErrorLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) - elif result.status == FreshnessStatus.Error: - fire_event( - PrintFreshnessErrorStaleLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) - elif result.status == FreshnessStatus.Warn: - fire_event( - PrintFreshnessWarnLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) - else: - fire_event( - PrintFreshnessPassLine( - source_name=source_name, - table_name=table_name, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) + level = LogFreshnessResult.status_to_level(str(result.status)) + fire_event( + LogFreshnessResult( + info=info(level=level), + status=result.status, + source_name=source_name, + table_name=table_name, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + node_info=self.node.node_info, ) + ) def error_result(self, node, message, start_time, timing_info): return self._build_run_result( diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 21550017202..93cb2c1569a 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -28,17 +28,16 @@ ValidationException, missing_materialization, ) -from dbt.events.functions import fire_event, get_invocation_id +from dbt.events.functions import fire_event, get_invocation_id, info from dbt.events.types import ( DatabaseErrorRunningHook, EmptyLine, HooksRunning, HookFinished, - PrintModelErrorResultLine, - PrintModelResultLine, - PrintStartLine, - PrintHookEndLine, - PrintHookStartLine, + LogModelResult, + LogStartLine, + LogHookEndLine, + LogHookStartLine, ) from dbt.logger import ( TextOnly, @@ -176,7 +175,7 @@ def describe_node(self): def print_start_line(self): fire_event( - PrintStartLine( + LogStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, @@ -187,27 +186,22 @@ def print_start_line(self): def print_result_line(self, result): description = self.describe_node() if result.status == NodeStatus.Error: - fire_event( - PrintModelErrorResultLine( - description=description, - status=result.status, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) - ) + status = result.status + level = "error" else: - fire_event( - PrintModelResultLine( - description=description, - status=result.message, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=self.node.node_info, - ) + status = result.message + level = "info" + fire_event( + LogModelResult( + description=description, + status=status, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + node_info=self.node.node_info, + info=info(level=level), ) + ) def before_execute(self): self.print_start_line() @@ -355,7 +349,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): with UniqueID(hook.unique_id): with hook_meta_ctx, startctx: fire_event( - PrintHookStartLine( + LogHookStartLine( statement=hook_text, index=idx, total=num_hooks, @@ -375,7 +369,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): with finishctx, DbtModelState({"node_status": "passed"}): hook._event_status["node_status"] = RunStatus.Success fire_event( - PrintHookEndLine( + LogHookEndLine( statement=hook_text, status=status, index=idx, diff --git a/core/dbt/task/run_operation.py b/core/dbt/task/run_operation.py index f867824c408..e510c70c37d 100644 --- a/core/dbt/task/run_operation.py +++ b/core/dbt/task/run_operation.py @@ -15,7 +15,7 @@ from dbt.events.types import ( RunningOperationCaughtError, RunningOperationUncaughtError, - PrintDebugStackTrace, + LogDebugStackTrace, ) @@ -57,11 +57,11 @@ def run(self) -> RunOperationResultsArtifact: self._run_unsafe() except dbt.exceptions.Exception as exc: fire_event(RunningOperationCaughtError(exc=str(exc))) - fire_event(PrintDebugStackTrace(exc_info=traceback.format_exc())) + fire_event(LogDebugStackTrace(exc_info=traceback.format_exc())) success = False except Exception as exc: fire_event(RunningOperationUncaughtError(exc=str(exc))) - fire_event(PrintDebugStackTrace(exc_info=traceback.format_exc())) + fire_event(LogDebugStackTrace(exc_info=traceback.format_exc())) success = False else: success = True diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index f12ce94f830..b4ee8152994 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -29,7 +29,7 @@ from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( EmptyLine, - PrintCancelLine, + LogCancelLine, DefaultSelector, NodeStart, NodeFinished, @@ -363,7 +363,7 @@ def _cancel_connections(self, pool): continue # if we don't have a manifest/don't have a node, print # anyway. - fire_event(PrintCancelLine(conn_name=conn_name)) + fire_event(LogCancelLine(conn_name=conn_name)) pool.join() diff --git a/core/dbt/task/seed.py b/core/dbt/task/seed.py index 01535916ad8..16b731e4f7d 100644 --- a/core/dbt/task/seed.py +++ b/core/dbt/task/seed.py @@ -9,14 +9,13 @@ from dbt.exceptions import InternalException from dbt.graph import ResourceTypeSelector from dbt.logger import TextOnly -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, info from dbt.events.types import ( SeedHeader, SeedHeaderSeparator, EmptyLine, - PrintSeedErrorResultLine, - PrintSeedResultLine, - PrintStartLine, + LogSeedResult, + LogStartLine, ) from dbt.node_types import NodeType from dbt.contracts.results import NodeStatus @@ -28,7 +27,7 @@ def describe_node(self): def before_execute(self): fire_event( - PrintStartLine( + LogStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, @@ -47,30 +46,19 @@ def compile(self, manifest): def print_result_line(self, result): model = result.node - if result.status == NodeStatus.Error: - fire_event( - PrintSeedErrorResultLine( - status=result.status, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - schema=self.node.schema, - relation=model.alias, - node_info=model.node_info, - ) - ) - else: - fire_event( - PrintSeedResultLine( - status=result.message, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - schema=self.node.schema, - relation=model.alias, - node_info=model.node_info, - ) + level = "error" if result.status == NodeStatus.Error else "info" + fire_event( + LogSeedResult( + info=info(level=level), + status=result.status, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + schema=self.node.schema, + relation=model.alias, + node_info=model.node_info, ) + ) class SeedTask(RunTask): diff --git a/core/dbt/task/snapshot.py b/core/dbt/task/snapshot.py index 7bd62ffb55b..44ccbd88361 100644 --- a/core/dbt/task/snapshot.py +++ b/core/dbt/task/snapshot.py @@ -1,8 +1,8 @@ from .run import ModelRunner, RunTask from dbt.exceptions import InternalException -from dbt.events.functions import fire_event -from dbt.events.types import PrintSnapshotErrorResultLine, PrintSnapshotResultLine +from dbt.events.functions import fire_event, info +from dbt.events.types import LogSnapshotResult from dbt.graph import ResourceTypeSelector from dbt.node_types import NodeType from dbt.contracts.results import NodeStatus @@ -15,30 +15,19 @@ def describe_node(self): def print_result_line(self, result): model = result.node cfg = model.config.to_dict(omit_none=True) - if result.status == NodeStatus.Error: - fire_event( - PrintSnapshotErrorResultLine( - status=result.status, - description=self.get_node_representation(), - cfg=cfg, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) - ) - else: - fire_event( - PrintSnapshotResultLine( - status=result.message, - description=self.get_node_representation(), - cfg=cfg, - index=self.node_index, - total=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) + level = "error" if result.status == NodeStatus.Error else "info" + fire_event( + LogSnapshotResult( + info=info(level=level), + status=result.status, + description=self.get_node_representation(), + cfg=cfg, + index=self.node_index, + total=self.num_nodes, + execution_time=result.execution_time, + node_info=model.node_info, ) + ) class SnapshotTask(RunTask): diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index ee871b6179d..a2f64a80315 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -19,13 +19,10 @@ from dbt.contracts.results import TestStatus, PrimitiveDict, RunResult from dbt.context.providers import generate_runtime_model_context from dbt.clients.jinja import MacroGenerator -from dbt.events.functions import fire_event +from dbt.events.functions import fire_event, info from dbt.events.types import ( - PrintErrorTestResult, - PrintPassTestResult, - PrintWarnTestResult, - PrintFailureTestResult, - PrintStartLine, + LogTestResult, + LogStartLine, ) from dbt.exceptions import InternalException, invalid_bool_error, missing_materialization from dbt.graph import ( @@ -67,54 +64,22 @@ def describe_node(self): def print_result_line(self, result): model = result.node - if result.status == TestStatus.Error: - fire_event( - PrintErrorTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) - ) - elif result.status == TestStatus.Pass: - fire_event( - PrintPassTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - node_info=model.node_info, - ) - ) - elif result.status == TestStatus.Warn: - fire_event( - PrintWarnTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - num_failures=result.failures, - node_info=model.node_info, - ) - ) - elif result.status == TestStatus.Fail: - fire_event( - PrintFailureTestResult( - name=model.name, - index=self.node_index, - num_models=self.num_nodes, - execution_time=result.execution_time, - num_failures=result.failures, - node_info=model.node_info, - ) + fire_event( + LogTestResult( + name=model.name, + info=info(level=LogTestResult.status_to_level(str(result.status))), + status=str(result.status), + index=self.node_index, + num_models=self.num_nodes, + execution_time=result.execution_time, + node_info=model.node_info, + num_failures=result.failures, ) - else: - raise RuntimeError("unexpected status: {}".format(result.status)) + ) def print_start_line(self): fire_event( - PrintStartLine( + LogStartLine( description=self.describe_node(), index=self.node_index, total=self.num_nodes, diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 8d7aeaa7aae..050d5153c8d 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -30,7 +30,7 @@ def get_all_subclasses(cls): all_subclasses = [] for subclass in cls.__subclasses__(): # If the test breaks because of abcs this list might have to be updated. - if subclass in [TestLevel, DebugLevel, WarnLevel, InfoLevel, ErrorLevel]: + if subclass in [TestLevel, DebugLevel, WarnLevel, InfoLevel, ErrorLevel, DynamicLevel]: continue all_subclasses.append(subclass) all_subclasses.extend(get_all_subclasses(subclass)) @@ -96,7 +96,7 @@ def test_event_codes(self): for event in all_concrete: if not inspect.isabstract(event): # must be in the form 1 capital letter, 3 digits - assert re.match("^[A-Z][0-9]{3}", event.code) + assert re.match("^[A-Z][0-9]{3}", event.info.code) # cannot have been used already assert ( event.info.code not in all_codes @@ -348,54 +348,31 @@ def MockNode(): FirstRunResultError(msg=""), AfterFirstRunResultError(msg=""), EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False), - PrintStartLine(description="", index=0, total=0, node_info=NodeInfo()), - PrintHookStartLine(statement="", index=0, total=0, node_info=NodeInfo()), - PrintHookEndLine( + LogStartLine(description="", index=0, total=0, node_info=NodeInfo()), + LogHookStartLine(statement="", index=0, total=0, node_info=NodeInfo()), + LogHookEndLine( statement="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() ), SkippingDetails( resource_type="", schema="", node_name="", index=0, total=0, node_info=NodeInfo() ), - PrintErrorTestResult(name="", index=0, num_models=0, execution_time=0, node_info=NodeInfo()), - PrintPassTestResult(name="", index=0, num_models=0, execution_time=0, node_info=NodeInfo()), - PrintWarnTestResult( + LogTestResult( name="", index=0, num_models=0, execution_time=0, num_failures=0, node_info=NodeInfo() ), - PrintFailureTestResult( - name="", index=0, num_models=0, execution_time=0, num_failures=0, node_info=NodeInfo() - ), - PrintSkipBecauseError(schema="", relation="", index=0, total=0), - PrintModelErrorResultLine( - description="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintModelResultLine( + LogSkipBecauseError(schema="", relation="", index=0, total=0), + LogModelResult( description="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() ), - PrintSnapshotErrorResultLine( + LogSnapshotResult( status="", description="", cfg={}, index=0, total=0, execution_time=0, node_info=NodeInfo() ), - PrintSnapshotResultLine( - status="", description="", cfg={}, index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintSeedErrorResultLine( - status="", index=0, total=0, execution_time=0, schema="", relation="", node_info=NodeInfo() - ), - PrintSeedResultLine( + LogSeedResult( status="", index=0, total=0, execution_time=0, schema="", relation="", node_info=NodeInfo() ), - PrintFreshnessErrorLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintFreshnessErrorStaleLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintFreshnessWarnLine( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - PrintFreshnessPassLine( + LogFreshnessResult( source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() ), - PrintCancelLine(conn_name=""), + LogCancelLine(conn_name=""), DefaultSelector(name=""), NodeStart(unique_id="", node_info=NodeInfo()), NodeCompiling(unique_id="", node_info=NodeInfo()), @@ -432,7 +409,7 @@ def MockNode(): AdapterEventInfo(name="", base_msg="", args=()), AdapterEventWarning(name="", base_msg="", args=()), AdapterEventError(name="", base_msg="", args=()), - PrintDebugStackTrace(), + LogDebugStackTrace(), MainReportArgs(args={}), RegistryProgressGETRequest(url=""), RegistryIndexProgressGETRequest(url=""), diff --git a/tests/unit/test_proto_events.py b/tests/unit/test_proto_events.py index 46e9479ef39..31837ed0271 100644 --- a/tests/unit/test_proto_events.py +++ b/tests/unit/test_proto_events.py @@ -5,9 +5,10 @@ RollbackFailed, MainEncounteredError, PluginLoadError, - PrintStartLine, + LogStartLine, + LogTestResult, ) -from dbt.events.functions import event_to_dict, LOG_VERSION, reset_metadata_vars +from dbt.events.functions import event_to_dict, LOG_VERSION, reset_metadata_vars, info from dbt.events import proto_types as pl from dbt.version import installed @@ -89,7 +90,7 @@ def test_node_info_events(): "node_started_at": "some_time", "node_finished_at": "another_time", } - event = PrintStartLine( + event = LogStartLine( description="some description", index=123, total=111, @@ -121,3 +122,16 @@ def test_extra_dict_on_event(monkeypatch): # clean up reset_metadata_vars() + + +def test_dynamic_level_events(): + event = LogTestResult( + name="model_name", + info=info(level=LogTestResult.status_to_level("pass")), + status="pass", + index=1, + num_models=3, + num_failures=0 + ) + assert event + assert event.info.level == "info" From 6c76137da4e72bbcad6e7789a78c31f1bf5cffac Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Fri, 4 Nov 2022 16:38:26 -0400 Subject: [PATCH 022/156] CT 1443 remove root path (#6172) * Remove root_path * Bump manifest schema to 8 * Update tests and compability utility for v8, root_path removal --- .../Under the Hood-20221028-104837.yaml | 7 + core/dbt/context/providers.py | 1 + core/dbt/contracts/graph/compiled.py | 1 + core/dbt/contracts/graph/manifest.py | 4 +- core/dbt/contracts/graph/parsed.py | 3 + core/dbt/contracts/graph/unparsed.py | 2 - core/dbt/contracts/util.py | 23 +- core/dbt/graph/selector_methods.py | 2 - core/dbt/parser/base.py | 2 - core/dbt/parser/docs.py | 1 - core/dbt/parser/generic_test.py | 2 - core/dbt/parser/macros.py | 2 - core/dbt/parser/schemas.py | 4 - core/dbt/parser/seeds.py | 2 + core/dbt/parser/sources.py | 1 - core/dbt/parser/sql.py | 1 - schemas/dbt/manifest/v8.json | 6503 +++++++++++++++++ test/unit/test_compiler.py | 15 - test/unit/test_context.py | 3 - test/unit/test_contracts_graph_compiled.py | 10 - test/unit/test_contracts_graph_parsed.py | 44 - test/unit/test_contracts_graph_unparsed.py | 15 - test/unit/test_docs_blocks.py | 2 - test/unit/test_graph_selector_methods.py | 8 - test/unit/test_manifest.py | 25 +- test/unit/test_parser.py | 15 - test/unit/test_partial_parsing.py | 2 - test/unit/utils.py | 1 - .../artifacts/data/state/v8/manifest.json | 1 + .../functional/artifacts/expected_manifest.py | 26 - .../artifacts/test_previous_version_state.py | 2 +- 31 files changed, 6546 insertions(+), 184 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221028-104837.yaml create mode 100644 schemas/dbt/manifest/v8.json create mode 100644 tests/functional/artifacts/data/state/v8/manifest.json diff --git a/.changes/unreleased/Under the Hood-20221028-104837.yaml b/.changes/unreleased/Under the Hood-20221028-104837.yaml new file mode 100644 index 00000000000..22ad4901794 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221028-104837.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Remove the 'root_path' field from most nodes +time: 2022-10-28T10:48:37.687886-04:00 +custom: + Author: gshank + Issue: "6171" + PR: "6172" diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 280b272d553..f8d5af889be 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -803,6 +803,7 @@ def load_agate_table(self) -> agate.Table: raise_compiler_error( "can only load_agate_table for seeds (got a {})".format(self.model.resource_type) ) + assert self.model.root_path path = os.path.join(self.model.root_path, self.model.original_file_path) column_types = self.model.config.column_types try: diff --git a/core/dbt/contracts/graph/compiled.py b/core/dbt/contracts/graph/compiled.py index 118d104f537..28930932299 100644 --- a/core/dbt/contracts/graph/compiled.py +++ b/core/dbt/contracts/graph/compiled.py @@ -97,6 +97,7 @@ class CompiledSeedNode(CompiledNode): # keep this in sync with ParsedSeedNode! resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) config: SeedConfig = field(default_factory=SeedConfig) + root_path: Optional[str] = None @property def empty(self): diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index a2d22e6e315..8b4565fc7e9 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -1183,7 +1183,7 @@ def __init__(self, macros): @dataclass -@schema_version("manifest", 7) +@schema_version("manifest", 8) class WritableManifest(ArtifactMixin): nodes: Mapping[UniqueID, ManifestNode] = field( metadata=dict(description=("The nodes defined in the dbt project and its dependencies")) @@ -1229,7 +1229,7 @@ class WritableManifest(ArtifactMixin): @classmethod def compatible_previous_versions(self): - return [("manifest", 4), ("manifest", 5), ("manifest", 6)] + return [("manifest", 4), ("manifest", 5), ("manifest", 6), ("manifest", 7)] def __post_serialize__(self, dct): for unique_id, node in dct["nodes"].items(): diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/parsed.py index f4de6e6155d..3bf47b324d5 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/parsed.py @@ -412,6 +412,9 @@ class ParsedSeedNode(ParsedNode): # keep this in sync with CompiledSeedNode! resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) config: SeedConfig = field(default_factory=SeedConfig) + # seeds need the root_path because the contents are not loaded initially + # and we need the root_path to load the seed later + root_path: Optional[str] = None @property def empty(self): diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py index 662ec6f01ad..453dc883d7b 100644 --- a/core/dbt/contracts/graph/unparsed.py +++ b/core/dbt/contracts/graph/unparsed.py @@ -24,7 +24,6 @@ @dataclass class UnparsedBaseNode(dbtClassMixin, Replaceable): package_name: str - root_path: str path: str original_file_path: str @@ -364,7 +363,6 @@ def get_table_named(self, name: str) -> Optional[SourceTablePatch]: @dataclass class UnparsedDocumentation(dbtClassMixin, Replaceable): package_name: str - root_path: str path: str original_file_path: str diff --git a/core/dbt/contracts/util.py b/core/dbt/contracts/util.py index f0975fda10b..354052b67e4 100644 --- a/core/dbt/contracts/util.py +++ b/core/dbt/contracts/util.py @@ -240,13 +240,32 @@ def rename_sql_attr(node_content: dict) -> dict: def upgrade_manifest_json(manifest: dict) -> dict: for node_content in manifest.get("nodes", {}).values(): node_content = rename_sql_attr(node_content) + if node_content["resource_type"] != "seed" and "root_path" in node_content: + del node_content["root_path"] for disabled in manifest.get("disabled", {}).values(): # There can be multiple disabled nodes for the same unique_id # so make sure all the nodes get the attr renamed - disabled = [rename_sql_attr(n) for n in disabled] + for node_content in disabled: + rename_sql_attr(node_content) + if node_content["resource_type"] != "seed" and "root_path" in node_content: + del node_content["root_path"] for metric_content in manifest.get("metrics", {}).values(): # handle attr renames + value translation ("expression" -> "derived") metric_content = rename_metric_attr(metric_content) + if "root_path" in metric_content: + del metric_content["root_path"] + for exposure_content in manifest.get("exposures", {}).values(): + if "root_path" in exposure_content: + del exposure_content["root_path"] + for source_content in manifest.get("sources", {}).values(): + if "root_path" in exposure_content: + del source_content["root_path"] + for macro_content in manifest.get("macros", {}).values(): + if "root_path" in macro_content: + del macro_content["root_path"] + for doc_content in manifest.get("docs", {}).values(): + if "root_path" in doc_content: + del doc_content["root_path"] return manifest @@ -291,7 +310,7 @@ def read_and_check_versions(cls, path: str): expected=str(cls.dbt_schema_version), found=previous_schema_version, ) - if get_manifest_schema_version(data) <= 6: + if get_manifest_schema_version(data) <= 7: data = upgrade_manifest_json(data) return cls.from_dict(data) # type: ignore diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py index 577cf825512..0e59da38a16 100644 --- a/core/dbt/graph/selector_methods.py +++ b/core/dbt/graph/selector_methods.py @@ -286,8 +286,6 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu root = Path.cwd() paths = set(p.relative_to(root) for p in root.glob(selector)) for node, real_node in self.all_nodes(included_nodes): - if Path(real_node.root_path) != root: - continue ofp = Path(real_node.original_file_path) if ofp in paths: yield node diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 4b9e666a421..b6d349803f6 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -169,7 +169,6 @@ def _create_error_node( resource_type=self.resource_type, path=path, original_file_path=original_file_path, - root_path=self.project.project_root, package_name=self.project.project_name, raw_code=raw_code, language=language, @@ -202,7 +201,6 @@ def _create_parsetime_node( "database": self.default_database, "fqn": fqn, "name": name, - "root_path": self.project.project_root, "resource_type": self.resource_type, "path": path, "original_file_path": block.path.original_file_path, diff --git a/core/dbt/parser/docs.py b/core/dbt/parser/docs.py index f24f70544d5..a1130eda0da 100644 --- a/core/dbt/parser/docs.py +++ b/core/dbt/parser/docs.py @@ -32,7 +32,6 @@ def parse_block(self, block: BlockContents) -> Iterable[ParsedDocumentation]: contents = get_rendered(block.contents, {}).strip() doc = ParsedDocumentation( - root_path=self.project.project_root, path=block.file.path.relative_path, original_file_path=block.path.original_file_path, package_name=self.project.project_name, diff --git a/core/dbt/parser/generic_test.py b/core/dbt/parser/generic_test.py index 4706119585b..3a7d49c0cf3 100644 --- a/core/dbt/parser/generic_test.py +++ b/core/dbt/parser/generic_test.py @@ -35,7 +35,6 @@ def parse_generic_test( macro_sql=block.full_block, original_file_path=base_node.original_file_path, package_name=base_node.package_name, - root_path=base_node.root_path, resource_type=base_node.resource_type, name=name, unique_id=unique_id, @@ -96,7 +95,6 @@ def parse_file(self, block: FileBlock): original_file_path=original_file_path, package_name=self.project.project_name, raw_code=source_file.contents, - root_path=self.project.project_root, resource_type=NodeType.Macro, language="sql", ) diff --git a/core/dbt/parser/macros.py b/core/dbt/parser/macros.py index 4fe6b422595..396d39f57cc 100644 --- a/core/dbt/parser/macros.py +++ b/core/dbt/parser/macros.py @@ -41,7 +41,6 @@ def parse_macro( macro_sql=block.full_block, original_file_path=base_node.original_file_path, package_name=base_node.package_name, - root_path=base_node.root_path, resource_type=base_node.resource_type, name=name, unique_id=unique_id, @@ -103,7 +102,6 @@ def parse_file(self, block: FileBlock): original_file_path=original_file_path, package_name=self.project.project_name, raw_code=source_file.contents, - root_path=self.project.project_root, resource_type=NodeType.Macro, language="sql", ) diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index d47c2a29684..21521c85e53 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -244,7 +244,6 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List "database": self.default_database, "fqn": fqn, "name": name, - "root_path": self.project.project_root, "resource_type": self.resource_type, "tags": tags, "path": path, @@ -728,7 +727,6 @@ def add_source_definitions(self, source: UnparsedSourceDefinition) -> None: table=table, path=original_file_path, original_file_path=original_file_path, - root_path=self.project.project_root, package_name=package_name, unique_id=unique_id, resource_type=NodeType.Source, @@ -1031,7 +1029,6 @@ def parse_exposure(self, unparsed: UnparsedExposure): parsed = ParsedExposure( package_name=package_name, - root_path=self.project.project_root, path=path, original_file_path=self.yaml.path.original_file_path, unique_id=unique_id, @@ -1135,7 +1132,6 @@ def parse_metric(self, unparsed: UnparsedMetric): parsed = ParsedMetric( package_name=package_name, - root_path=self.project.project_root, path=path, original_file_path=self.yaml.path.original_file_path, unique_id=unique_id, diff --git a/core/dbt/parser/seeds.py b/core/dbt/parser/seeds.py index 63550e3f30f..0cd5aeb6307 100644 --- a/core/dbt/parser/seeds.py +++ b/core/dbt/parser/seeds.py @@ -7,6 +7,8 @@ class SeedParser(SimpleSQLParser[ParsedSeedNode]): def parse_from_dict(self, dct, validate=True) -> ParsedSeedNode: + # seeds need the root_path because the contents are not loaded + dct["root_path"] = self.project.project_root if validate: ParsedSeedNode.validate(dct) return ParsedSeedNode.from_dict(dct) diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 30440076440..73fdd80e4c9 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -161,7 +161,6 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinit database=(source.database or default_database), schema=(source.schema or source.name), identifier=(table.identifier or table.name), - root_path=target.root_path, path=target.path, original_file_path=target.original_file_path, columns=refs.column_info, diff --git a/core/dbt/parser/sql.py b/core/dbt/parser/sql.py index 35c8f3072dd..14c74247b62 100644 --- a/core/dbt/parser/sql.py +++ b/core/dbt/parser/sql.py @@ -56,7 +56,6 @@ def parse_remote(self, contents) -> Iterable[ParsedMacro]: package_name=self.project.project_name, raw_code=contents, language="sql", - root_path=self.project.project_root, resource_type=NodeType.Macro, ) for node in self.parse_unparsed_macros(base): diff --git a/schemas/dbt/manifest/v8.json b/schemas/dbt/manifest/v8.json new file mode 100644 index 00000000000..4442ae1d39f --- /dev/null +++ b/schemas/dbt/manifest/v8.json @@ -0,0 +1,6503 @@ +{ + "type": "object", + "required": [ + "metadata", + "nodes", + "sources", + "macros", + "docs", + "exposures", + "metrics", + "selectors" + ], + "properties": { + "metadata": { + "$ref": "#/definitions/ManifestMetadata", + "description": "Metadata about the manifest" + }, + "nodes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/CompiledAnalysisNode" + }, + { + "$ref": "#/definitions/CompiledSingularTestNode" + }, + { + "$ref": "#/definitions/CompiledModelNode" + }, + { + "$ref": "#/definitions/CompiledHookNode" + }, + { + "$ref": "#/definitions/CompiledRPCNode" + }, + { + "$ref": "#/definitions/CompiledSqlNode" + }, + { + "$ref": "#/definitions/CompiledGenericTestNode" + }, + { + "$ref": "#/definitions/CompiledSeedNode" + }, + { + "$ref": "#/definitions/CompiledSnapshotNode" + }, + { + "$ref": "#/definitions/ParsedAnalysisNode" + }, + { + "$ref": "#/definitions/ParsedSingularTestNode" + }, + { + "$ref": "#/definitions/ParsedHookNode" + }, + { + "$ref": "#/definitions/ParsedModelNode" + }, + { + "$ref": "#/definitions/ParsedRPCNode" + }, + { + "$ref": "#/definitions/ParsedSqlNode" + }, + { + "$ref": "#/definitions/ParsedGenericTestNode" + }, + { + "$ref": "#/definitions/ParsedSeedNode" + }, + { + "$ref": "#/definitions/ParsedSnapshotNode" + } + ] + }, + "description": "The nodes defined in the dbt project and its dependencies" + }, + "sources": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ParsedSourceDefinition" + }, + "description": "The sources defined in the dbt project and its dependencies" + }, + "macros": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ParsedMacro" + }, + "description": "The macros defined in the dbt project and its dependencies" + }, + "docs": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ParsedDocumentation" + }, + "description": "The docs defined in the dbt project and its dependencies" + }, + "exposures": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ParsedExposure" + }, + "description": "The exposures defined in the dbt project and its dependencies" + }, + "metrics": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ParsedMetric" + }, + "description": "The metrics defined in the dbt project and its dependencies" + }, + "selectors": { + "type": "object", + "description": "The selectors defined in selectors.yml" + }, + "disabled": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/CompiledAnalysisNode" + }, + { + "$ref": "#/definitions/CompiledSingularTestNode" + }, + { + "$ref": "#/definitions/CompiledModelNode" + }, + { + "$ref": "#/definitions/CompiledHookNode" + }, + { + "$ref": "#/definitions/CompiledRPCNode" + }, + { + "$ref": "#/definitions/CompiledSqlNode" + }, + { + "$ref": "#/definitions/CompiledGenericTestNode" + }, + { + "$ref": "#/definitions/CompiledSeedNode" + }, + { + "$ref": "#/definitions/CompiledSnapshotNode" + }, + { + "$ref": "#/definitions/ParsedAnalysisNode" + }, + { + "$ref": "#/definitions/ParsedSingularTestNode" + }, + { + "$ref": "#/definitions/ParsedHookNode" + }, + { + "$ref": "#/definitions/ParsedModelNode" + }, + { + "$ref": "#/definitions/ParsedRPCNode" + }, + { + "$ref": "#/definitions/ParsedSqlNode" + }, + { + "$ref": "#/definitions/ParsedGenericTestNode" + }, + { + "$ref": "#/definitions/ParsedSeedNode" + }, + { + "$ref": "#/definitions/ParsedSnapshotNode" + }, + { + "$ref": "#/definitions/ParsedSourceDefinition" + } + ] + } + } + }, + { + "type": "null" + } + ], + "description": "A mapping of the disabled nodes in the target" + }, + "parent_map": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + { + "type": "null" + } + ], + "description": "A mapping from\u00a0child nodes to their dependencies" + }, + "child_map": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + { + "type": "null" + } + ], + "description": "A mapping from parent nodes to their dependents" + } + }, + "additionalProperties": false, + "description": "WritableManifest(metadata: dbt.contracts.graph.manifest.ManifestMetadata, nodes: Mapping[str, Union[dbt.contracts.graph.compiled.CompiledAnalysisNode, dbt.contracts.graph.compiled.CompiledSingularTestNode, dbt.contracts.graph.compiled.CompiledModelNode, dbt.contracts.graph.compiled.CompiledHookNode, dbt.contracts.graph.compiled.CompiledRPCNode, dbt.contracts.graph.compiled.CompiledSqlNode, dbt.contracts.graph.compiled.CompiledGenericTestNode, dbt.contracts.graph.compiled.CompiledSeedNode, dbt.contracts.graph.compiled.CompiledSnapshotNode, dbt.contracts.graph.parsed.ParsedAnalysisNode, dbt.contracts.graph.parsed.ParsedSingularTestNode, dbt.contracts.graph.parsed.ParsedHookNode, dbt.contracts.graph.parsed.ParsedModelNode, dbt.contracts.graph.parsed.ParsedRPCNode, dbt.contracts.graph.parsed.ParsedSqlNode, dbt.contracts.graph.parsed.ParsedGenericTestNode, dbt.contracts.graph.parsed.ParsedSeedNode, dbt.contracts.graph.parsed.ParsedSnapshotNode]], sources: Mapping[str, dbt.contracts.graph.parsed.ParsedSourceDefinition], macros: Mapping[str, dbt.contracts.graph.parsed.ParsedMacro], docs: Mapping[str, dbt.contracts.graph.parsed.ParsedDocumentation], exposures: Mapping[str, dbt.contracts.graph.parsed.ParsedExposure], metrics: Mapping[str, dbt.contracts.graph.parsed.ParsedMetric], selectors: Mapping[str, Any], disabled: Optional[Mapping[str, List[Union[dbt.contracts.graph.compiled.CompiledAnalysisNode, dbt.contracts.graph.compiled.CompiledSingularTestNode, dbt.contracts.graph.compiled.CompiledModelNode, dbt.contracts.graph.compiled.CompiledHookNode, dbt.contracts.graph.compiled.CompiledRPCNode, dbt.contracts.graph.compiled.CompiledSqlNode, dbt.contracts.graph.compiled.CompiledGenericTestNode, dbt.contracts.graph.compiled.CompiledSeedNode, dbt.contracts.graph.compiled.CompiledSnapshotNode, dbt.contracts.graph.parsed.ParsedAnalysisNode, dbt.contracts.graph.parsed.ParsedSingularTestNode, dbt.contracts.graph.parsed.ParsedHookNode, dbt.contracts.graph.parsed.ParsedModelNode, dbt.contracts.graph.parsed.ParsedRPCNode, dbt.contracts.graph.parsed.ParsedSqlNode, dbt.contracts.graph.parsed.ParsedGenericTestNode, dbt.contracts.graph.parsed.ParsedSeedNode, dbt.contracts.graph.parsed.ParsedSnapshotNode, dbt.contracts.graph.parsed.ParsedSourceDefinition]]]], parent_map: Optional[Dict[str, List[str]]], child_map: Optional[Dict[str, List[str]]])", + "definitions": { + "ManifestMetadata": { + "type": "object", + "required": [], + "properties": { + "dbt_schema_version": { + "type": "string", + "default": "https://schemas.getdbt.com/dbt/manifest/v8.json" + }, + "dbt_version": { + "type": "string", + "default": "1.4.0a1" + }, + "generated_at": { + "type": "string", + "format": "date-time", + "default": "2022-11-01T18:01:47.759437Z" + }, + "invocation_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "94cf6dd0-d59b-4139-bf79-70055cb9bb34" + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "default": {} + }, + "project_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A unique identifier for the project" + }, + "user_id": { + "oneOf": [ + { + "type": "string", + "pattern": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + { + "type": "null" + } + ], + "description": "A unique identifier for the user" + }, + "send_anonymous_usage_stats": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "description": "Whether dbt is configured to send anonymous usage statistics" + }, + "adapter_type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The type name of the adapter" + } + }, + "additionalProperties": false, + "description": "Metadata for the manifest." + }, + "CompiledAnalysisNode": { + "type": "object", + "required": [ + "compiled", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "compiled": { + "type": "boolean" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "analysis" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.764821 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "CompiledAnalysisNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + }, + "FileHash": { + "type": "object", + "required": [ + "name", + "checksum" + ], + "properties": { + "name": { + "type": "string" + }, + "checksum": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "FileHash(name: str, checksum: str)" + }, + "NodeConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "view" + }, + "incremental_strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "persist_docs": { + "type": "object", + "default": {} + }, + "post-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "pre-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "quoting": { + "type": "object", + "default": {} + }, + "column_types": { + "type": "object", + "default": {} + }, + "full_refresh": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "unique_key": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ] + }, + "on_schema_change": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "ignore" + }, + "grants": { + "type": "object", + "default": {} + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + } + }, + "additionalProperties": true, + "description": "NodeConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'view', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Union[str, List[str], NoneType] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = )" + }, + "Hook": { + "type": "object", + "required": [ + "sql" + ], + "properties": { + "sql": { + "type": "string" + }, + "transaction": { + "type": "boolean", + "default": true + }, + "index": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Hook(sql: str, transaction: bool = True, index: Optional[int] = None)" + }, + "Docs": { + "type": "object", + "required": [], + "properties": { + "show": { + "type": "boolean", + "default": true + }, + "node_color": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Docs(show: bool = True, node_color: Optional[str] = None)" + }, + "DependsOn": { + "type": "object", + "required": [], + "properties": { + "macros": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "nodes": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "DependsOn(macros: List[str] = , nodes: List[str] = )" + }, + "ColumnInfo": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string", + "default": "" + }, + "meta": { + "type": "object", + "default": {} + }, + "data_type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "quote": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": true, + "description": "ColumnInfo(name: str, description: str = '', meta: Dict[str, Any] = , data_type: Optional[str] = None, quote: Optional[bool] = None, tags: List[str] = , _extra: Dict[str, Any] = )" + }, + "InjectedCTE": { + "type": "object", + "required": [ + "id", + "sql" + ], + "properties": { + "id": { + "type": "string" + }, + "sql": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "InjectedCTE(id: str, sql: str)" + }, + "CompiledSingularTestNode": { + "type": "object", + "required": [ + "compiled", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "compiled": { + "type": "boolean" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "test" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/TestConfig", + "default": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.767402 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "CompiledSingularTestNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + }, + "TestConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "dbt_test__audit" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "test" + }, + "severity": { + "type": "string", + "pattern": "^([Ww][Aa][Rr][Nn]|[Ee][Rr][Rr][Oo][Rr])$", + "default": "ERROR" + }, + "store_failures": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "where": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "limit": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "fail_calc": { + "type": "string", + "default": "count(*)" + }, + "warn_if": { + "type": "string", + "default": "!= 0" + }, + "error_if": { + "type": "string", + "default": "!= 0" + } + }, + "additionalProperties": true, + "description": "TestConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = 'dbt_test__audit', database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'test', severity: dbt.contracts.graph.model_config.Severity = 'ERROR', store_failures: Optional[bool] = None, where: Optional[str] = None, limit: Optional[int] = None, fail_calc: str = 'count(*)', warn_if: str = '!= 0', error_if: str = '!= 0')" + }, + "CompiledModelNode": { + "type": "object", + "required": [ + "compiled", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "compiled": { + "type": "boolean" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "model" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.768972 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "CompiledModelNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + }, + "CompiledHookNode": { + "type": "object", + "required": [ + "compiled", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "compiled": { + "type": "boolean" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "operation" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.7706041 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "index": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "CompiledHookNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None, index: Optional[int] = None)" + }, + "CompiledRPCNode": { + "type": "object", + "required": [ + "compiled", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "compiled": { + "type": "boolean" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "rpc" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.772256 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "CompiledRPCNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + }, + "CompiledSqlNode": { + "type": "object", + "required": [ + "compiled", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "compiled": { + "type": "boolean" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "sql operation" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.7739131 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "CompiledSqlNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + }, + "CompiledGenericTestNode": { + "type": "object", + "required": [ + "test_metadata", + "compiled", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "test_metadata": { + "$ref": "#/definitions/TestMetadata" + }, + "compiled": { + "type": "boolean" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "test" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/TestConfig", + "default": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.7757251 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "column_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "file_key_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "CompiledGenericTestNode(test_metadata: dbt.contracts.graph.parsed.TestMetadata, compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None, column_name: Optional[str] = None, file_key_name: Optional[str] = None)" + }, + "TestMetadata": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "kwargs": { + "type": "object", + "default": {} + }, + "namespace": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "TestMetadata(name: str, kwargs: Dict[str, Any] = , namespace: Optional[str] = None)" + }, + "CompiledSeedNode": { + "type": "object", + "required": [ + "compiled", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "compiled": { + "type": "boolean" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "seed" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/SeedConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "seed", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "quote_columns": null, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.7787 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "root_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "CompiledSeedNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SeedConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None, root_path: Optional[str] = None)" + }, + "SeedConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "seed" + }, + "incremental_strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "persist_docs": { + "type": "object", + "default": {} + }, + "post-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "pre-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "quoting": { + "type": "object", + "default": {} + }, + "column_types": { + "type": "object", + "default": {} + }, + "full_refresh": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "unique_key": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ] + }, + "on_schema_change": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "ignore" + }, + "grants": { + "type": "object", + "default": {} + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "quote_columns": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true, + "description": "SeedConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'seed', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Union[str, List[str], NoneType] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , quote_columns: Optional[bool] = None)" + }, + "CompiledSnapshotNode": { + "type": "object", + "required": [ + "compiled", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "compiled": { + "type": "boolean" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "snapshot" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.780513 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "compiled_code": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "CompiledSnapshotNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + }, + "ParsedAnalysisNode": { + "type": "object", + "required": [ + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "analysis" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.782298 + }, + "config_call_dict": { + "type": "object", + "default": {} + } + }, + "additionalProperties": false, + "description": "ParsedAnalysisNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" + }, + "ParsedSingularTestNode": { + "type": "object", + "required": [ + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "test" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/TestConfig", + "default": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.7835789 + }, + "config_call_dict": { + "type": "object", + "default": {} + } + }, + "additionalProperties": false, + "description": "ParsedSingularTestNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" + }, + "ParsedHookNode": { + "type": "object", + "required": [ + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "operation" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.784904 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "index": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "ParsedHookNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , index: Optional[int] = None)" + }, + "ParsedModelNode": { + "type": "object", + "required": [ + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "model" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.78629 + }, + "config_call_dict": { + "type": "object", + "default": {} + } + }, + "additionalProperties": false, + "description": "ParsedModelNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" + }, + "ParsedRPCNode": { + "type": "object", + "required": [ + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "rpc" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.78762 + }, + "config_call_dict": { + "type": "object", + "default": {} + } + }, + "additionalProperties": false, + "description": "ParsedRPCNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" + }, + "ParsedSqlNode": { + "type": "object", + "required": [ + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "sql operation" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/NodeConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.789003 + }, + "config_call_dict": { + "type": "object", + "default": {} + } + }, + "additionalProperties": false, + "description": "ParsedSqlNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" + }, + "ParsedGenericTestNode": { + "type": "object", + "required": [ + "test_metadata", + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "test_metadata": { + "$ref": "#/definitions/TestMetadata" + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "test" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/TestConfig", + "default": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "materialized": "test", + "severity": "ERROR", + "store_failures": null, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.790516 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "column_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "file_key_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "ParsedGenericTestNode(test_metadata: dbt.contracts.graph.parsed.TestMetadata, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , column_name: Optional[str] = None, file_key_name: Optional[str] = None)" + }, + "ParsedSeedNode": { + "type": "object", + "required": [ + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum" + ], + "properties": { + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "seed" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/SeedConfig", + "default": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "materialized": "seed", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "grants": {}, + "packages": [], + "docs": { + "show": true, + "node_color": null + }, + "quote_columns": null, + "post-hook": [], + "pre-hook": [] + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.792015 + }, + "config_call_dict": { + "type": "object", + "default": {} + }, + "root_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "ParsedSeedNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SeedConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , root_path: Optional[str] = None)" + }, + "ParsedSnapshotNode": { + "type": "object", + "required": [ + "schema", + "fqn", + "unique_id", + "raw_code", + "language", + "package_name", + "path", + "original_file_path", + "name", + "resource_type", + "alias", + "checksum", + "config" + ], + "properties": { + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "raw_code": { + "type": "string" + }, + "language": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "snapshot" + ] + }, + "alias": { + "type": "string" + }, + "checksum": { + "$ref": "#/definitions/FileHash" + }, + "config": { + "$ref": "#/definitions/SnapshotConfig" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "build_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "deferred": { + "type": "boolean", + "default": false + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "created_at": { + "type": "number", + "default": 1667325707.794882 + }, + "config_call_dict": { + "type": "object", + "default": {} + } + }, + "additionalProperties": false, + "description": "ParsedSnapshotNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SnapshotConfig, _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" + }, + "SnapshotConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + }, + "alias": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tags": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "default": [] + }, + "meta": { + "type": "object", + "default": {} + }, + "materialized": { + "type": "string", + "default": "snapshot" + }, + "incremental_strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "persist_docs": { + "type": "object", + "default": {} + }, + "post-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "pre-hook": { + "type": "array", + "items": { + "$ref": "#/definitions/Hook" + }, + "default": [] + }, + "quoting": { + "type": "object", + "default": {} + }, + "column_types": { + "type": "object", + "default": {} + }, + "full_refresh": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "unique_key": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "on_schema_change": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "ignore" + }, + "grants": { + "type": "object", + "default": {} + }, + "packages": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "strategy": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "target_schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "target_database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "updated_at": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "check_cols": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true, + "description": "SnapshotConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'snapshot', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Optional[str] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , strategy: Optional[str] = None, target_schema: Optional[str] = None, target_database: Optional[str] = None, updated_at: Optional[str] = None, check_cols: Union[str, List[str], NoneType] = None)" + }, + "ParsedSourceDefinition": { + "type": "object", + "required": [ + "fqn", + "schema", + "unique_id", + "package_name", + "path", + "original_file_path", + "name", + "source_name", + "source_description", + "loader", + "identifier", + "resource_type" + ], + "properties": { + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "schema": { + "type": "string" + }, + "unique_id": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "source_name": { + "type": "string" + }, + "source_description": { + "type": "string" + }, + "loader": { + "type": "string" + }, + "identifier": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "source" + ] + }, + "quoting": { + "$ref": "#/definitions/Quoting", + "default": { + "database": null, + "schema": null, + "identifier": null, + "column": null + } + }, + "loaded_at_field": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "freshness": { + "oneOf": [ + { + "$ref": "#/definitions/FreshnessThreshold" + }, + { + "type": "null" + } + ] + }, + "external": { + "oneOf": [ + { + "$ref": "#/definitions/ExternalTable" + }, + { + "type": "null" + } + ] + }, + "description": { + "type": "string", + "default": "" + }, + "columns": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ColumnInfo" + }, + "default": {} + }, + "meta": { + "type": "object", + "default": {} + }, + "source_meta": { + "type": "object", + "default": {} + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "config": { + "$ref": "#/definitions/SourceConfig", + "default": { + "enabled": true + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "created_at": { + "type": "number", + "default": 1667325707.797194 + } + }, + "additionalProperties": false, + "description": "ParsedSourceDefinition(fqn: List[str], database: Optional[str], schema: str, unique_id: str, package_name: str, path: str, original_file_path: str, name: str, source_name: str, source_description: str, loader: str, identifier: str, resource_type: dbt.node_types.NodeType, _event_status: Dict[str, Any] = , quoting: dbt.contracts.graph.unparsed.Quoting = , loaded_at_field: Optional[str] = None, freshness: Optional[dbt.contracts.graph.unparsed.FreshnessThreshold] = None, external: Optional[dbt.contracts.graph.unparsed.ExternalTable] = None, description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , source_meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.SourceConfig = , patch_path: Optional[pathlib.Path] = None, unrendered_config: Dict[str, Any] = , relation_name: Optional[str] = None, created_at: float = )" + }, + "Quoting": { + "type": "object", + "required": [], + "properties": { + "database": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "schema": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "identifier": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "column": { + "oneOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Quoting(database: Optional[bool] = None, schema: Optional[bool] = None, identifier: Optional[bool] = None, column: Optional[bool] = None)" + }, + "FreshnessThreshold": { + "type": "object", + "required": [], + "properties": { + "warn_after": { + "oneOf": [ + { + "$ref": "#/definitions/Time" + }, + { + "type": "null" + } + ], + "default": { + "count": null, + "period": null + } + }, + "error_after": { + "oneOf": [ + { + "$ref": "#/definitions/Time" + }, + { + "type": "null" + } + ], + "default": { + "count": null, + "period": null + } + }, + "filter": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "FreshnessThreshold(warn_after: Optional[dbt.contracts.graph.unparsed.Time] = , error_after: Optional[dbt.contracts.graph.unparsed.Time] = , filter: Optional[str] = None)" + }, + "FreshnessMetadata": { + "type": "object", + "required": [], + "properties": { + "dbt_schema_version": { + "type": "string", + "default": "https://schemas.getdbt.com/dbt/sources/v3.json" + }, + "dbt_version": { + "type": "string", + "default": "1.4.0a1" + }, + "generated_at": { + "type": "string", + "format": "date-time", + "default": "2022-11-01T18:01:47.754102Z" + }, + "invocation_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "94cf6dd0-d59b-4139-bf79-70055cb9bb34" + }, + "env": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "default": {} + } + }, + "additionalProperties": false, + "description": "FreshnessMetadata(dbt_schema_version: str = , dbt_version: str = '1.4.0a1', generated_at: datetime.datetime = , invocation_id: Optional[str] = , env: Dict[str, str] = )" + }, + "SourceFreshnessRuntimeError": { + "type": "object", + "required": [ + "unique_id", + "status" + ], + "properties": { + "unique_id": { + "type": "string" + }, + "error": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "status": { + "type": "string", + "enum": [ + "runtime error" + ] + } + }, + "additionalProperties": false, + "description": "SourceFreshnessRuntimeError(unique_id: str, error: Union[str, int, NoneType], status: dbt.contracts.results.FreshnessErrorEnum)" + }, + "SourceFreshnessOutput": { + "type": "object", + "required": [ + "unique_id", + "max_loaded_at", + "snapshotted_at", + "max_loaded_at_time_ago_in_s", + "status", + "criteria", + "adapter_response", + "timing", + "thread_id", + "execution_time" + ], + "properties": { + "unique_id": { + "type": "string" + }, + "max_loaded_at": { + "type": "string", + "format": "date-time" + }, + "snapshotted_at": { + "type": "string", + "format": "date-time" + }, + "max_loaded_at_time_ago_in_s": { + "type": "number" + }, + "status": { + "type": "string", + "enum": [ + "pass", + "warn", + "error", + "runtime error" + ] + }, + "criteria": { + "$ref": "#/definitions/FreshnessThreshold" + }, + "adapter_response": { + "type": "object" + }, + "timing": { + "type": "array", + "items": { + "$ref": "#/definitions/TimingInfo" + } + }, + "thread_id": { + "type": "string" + }, + "execution_time": { + "type": "number" + } + }, + "additionalProperties": false, + "description": "SourceFreshnessOutput(unique_id: str, max_loaded_at: datetime.datetime, snapshotted_at: datetime.datetime, max_loaded_at_time_ago_in_s: float, status: dbt.contracts.results.FreshnessStatus, criteria: dbt.contracts.graph.unparsed.FreshnessThreshold, adapter_response: Dict[str, Any], timing: List[dbt.contracts.results.TimingInfo], thread_id: str, execution_time: float)" + }, + "Time": { + "type": "object", + "required": [], + "properties": { + "count": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "period": { + "oneOf": [ + { + "type": "string", + "enum": [ + "minute", + "hour", + "day" + ] + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "Time(count: Optional[int] = None, period: Optional[dbt.contracts.graph.unparsed.TimePeriod] = None)" + }, + "TimingInfo": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "started_at": { + "oneOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ] + }, + "completed_at": { + "oneOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "TimingInfo(name: str, started_at: Optional[datetime.datetime] = None, completed_at: Optional[datetime.datetime] = None)" + }, + "ExternalTable": { + "type": "object", + "required": [], + "properties": { + "location": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "file_format": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "row_format": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "tbl_properties": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "partitions": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/ExternalPartition" + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": true, + "description": "ExternalTable(_extra: Dict[str, Any] = , location: Optional[str] = None, file_format: Optional[str] = None, row_format: Optional[str] = None, tbl_properties: Optional[str] = None, partitions: Union[List[str], List[dbt.contracts.graph.unparsed.ExternalPartition], NoneType] = None)" + }, + "ExternalPartition": { + "type": "object", + "required": [], + "properties": { + "name": { + "type": "string", + "default": "" + }, + "description": { + "type": "string", + "default": "" + }, + "data_type": { + "type": "string", + "default": "" + }, + "meta": { + "type": "object", + "default": {} + } + }, + "additionalProperties": true, + "description": "ExternalPartition(_extra: Dict[str, Any] = , name: str = '', description: str = '', data_type: str = '', meta: Dict[str, Any] = )" + }, + "SourceConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": true, + "description": "SourceConfig(_extra: Dict[str, Any] = , enabled: bool = True)" + }, + "ParsedMacro": { + "type": "object", + "required": [ + "unique_id", + "package_name", + "path", + "original_file_path", + "name", + "macro_sql", + "resource_type" + ], + "properties": { + "unique_id": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "macro_sql": { + "type": "string" + }, + "resource_type": { + "type": "string", + "enum": [ + "macro" + ] + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/MacroDependsOn", + "default": { + "macros": [] + } + }, + "description": { + "type": "string", + "default": "" + }, + "meta": { + "type": "object", + "default": {} + }, + "docs": { + "$ref": "#/definitions/Docs", + "default": { + "show": true, + "node_color": null + } + }, + "patch_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "arguments": { + "type": "array", + "items": { + "$ref": "#/definitions/MacroArgument" + }, + "default": [] + }, + "created_at": { + "type": "number", + "default": 1667325707.798143 + }, + "supported_languages": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string", + "enum": [ + "python", + "sql" + ] + } + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "ParsedMacro(unique_id: str, package_name: str, path: str, original_file_path: str, name: str, macro_sql: str, resource_type: dbt.node_types.NodeType, tags: List[str] = , depends_on: dbt.contracts.graph.parsed.MacroDependsOn = , description: str = '', meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, arguments: List[dbt.contracts.graph.unparsed.MacroArgument] = , created_at: float = , supported_languages: Optional[List[dbt.node_types.ModelLanguage]] = None)" + }, + "MacroDependsOn": { + "type": "object", + "required": [], + "properties": { + "macros": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "MacroDependsOn(macros: List[str] = )" + }, + "MacroArgument": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "type": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "description": { + "type": "string", + "default": "" + } + }, + "additionalProperties": false, + "description": "MacroArgument(name: str, type: Optional[str] = None, description: str = '')" + }, + "ParsedDocumentation": { + "type": "object", + "required": [ + "unique_id", + "package_name", + "path", + "original_file_path", + "name", + "block_contents" + ], + "properties": { + "unique_id": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "block_contents": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "ParsedDocumentation(unique_id: str, package_name: str, path: str, original_file_path: str, name: str, block_contents: str)" + }, + "ParsedExposure": { + "type": "object", + "required": [ + "fqn", + "unique_id", + "package_name", + "path", + "original_file_path", + "name", + "type", + "owner" + ], + "properties": { + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "dashboard", + "notebook", + "analysis", + "ml", + "application" + ] + }, + "owner": { + "$ref": "#/definitions/ExposureOwner" + }, + "resource_type": { + "type": "string", + "enum": [ + "model", + "analysis", + "test", + "snapshot", + "operation", + "seed", + "rpc", + "sql operation", + "docs block", + "source", + "macro", + "exposure", + "metric" + ], + "default": "exposure" + }, + "description": { + "type": "string", + "default": "" + }, + "label": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "maturity": { + "oneOf": [ + { + "type": "string", + "enum": [ + "low", + "medium", + "high" + ] + }, + { + "type": "null" + } + ] + }, + "meta": { + "type": "object", + "default": {} + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "config": { + "$ref": "#/definitions/ExposureConfig", + "default": { + "enabled": true + } + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "url": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "created_at": { + "type": "number", + "default": 1667325707.799795 + } + }, + "additionalProperties": false, + "description": "ParsedExposure(fqn: List[str], unique_id: str, package_name: str, path: str, original_file_path: str, name: str, type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, resource_type: dbt.node_types.NodeType = , description: str = '', label: Optional[str] = None, maturity: Optional[dbt.contracts.graph.unparsed.MaturityType] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.ExposureConfig = , unrendered_config: Dict[str, Any] = , url: Optional[str] = None, depends_on: dbt.contracts.graph.parsed.DependsOn = , refs: List[List[str]] = , sources: List[List[str]] = , created_at: float = )" + }, + "ExposureOwner": { + "type": "object", + "required": [ + "email" + ], + "properties": { + "email": { + "type": "string" + }, + "name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "ExposureOwner(email: str, name: Optional[str] = None)" + }, + "ExposureConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": true, + "description": "ExposureConfig(_extra: Dict[str, Any] = , enabled: bool = True)" + }, + "ParsedMetric": { + "type": "object", + "required": [ + "fqn", + "unique_id", + "package_name", + "path", + "original_file_path", + "name", + "description", + "label", + "calculation_method", + "timestamp", + "expression", + "filters", + "time_grains", + "dimensions" + ], + "properties": { + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, + "unique_id": { + "type": "string" + }, + "package_name": { + "type": "string" + }, + "path": { + "type": "string" + }, + "original_file_path": { + "type": "string" + }, + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "label": { + "type": "string" + }, + "calculation_method": { + "type": "string" + }, + "timestamp": { + "type": "string" + }, + "expression": { + "type": "string" + }, + "filters": { + "type": "array", + "items": { + "$ref": "#/definitions/MetricFilter" + } + }, + "time_grains": { + "type": "array", + "items": { + "type": "string" + } + }, + "dimensions": { + "type": "array", + "items": { + "type": "string" + } + }, + "window": { + "oneOf": [ + { + "$ref": "#/definitions/MetricTime" + }, + { + "type": "null" + } + ] + }, + "model": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "model_unique_id": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "resource_type": { + "type": "string", + "enum": [ + "model", + "analysis", + "test", + "snapshot", + "operation", + "seed", + "rpc", + "sql operation", + "docs block", + "source", + "macro", + "exposure", + "metric" + ], + "default": "metric" + }, + "meta": { + "type": "object", + "default": {} + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "config": { + "$ref": "#/definitions/MetricConfig", + "default": { + "enabled": true + } + }, + "unrendered_config": { + "type": "object", + "default": {} + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "created_at": { + "type": "number", + "default": 1667325707.801514 + } + }, + "additionalProperties": false, + "description": "ParsedMetric(fqn: List[str], unique_id: str, package_name: str, path: str, original_file_path: str, name: str, description: str, label: str, calculation_method: str, timestamp: str, expression: str, filters: List[dbt.contracts.graph.unparsed.MetricFilter], time_grains: List[str], dimensions: List[str], window: Optional[dbt.contracts.graph.unparsed.MetricTime] = None, model: Optional[str] = None, model_unique_id: Optional[str] = None, resource_type: dbt.node_types.NodeType = , meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.MetricConfig = , unrendered_config: Dict[str, Any] = , sources: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , refs: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" + }, + "MetricFilter": { + "type": "object", + "required": [ + "field", + "operator", + "value" + ], + "properties": { + "field": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false, + "description": "MetricFilter(field: str, operator: str, value: str)" + }, + "MetricTime": { + "type": "object", + "required": [], + "properties": { + "count": { + "oneOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "period": { + "oneOf": [ + { + "type": "string", + "enum": [ + "day", + "week", + "month", + "year" + ] + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "description": "MetricTime(count: Optional[int] = None, period: Optional[dbt.contracts.graph.unparsed.MetricTimePeriod] = None)" + }, + "MetricConfig": { + "type": "object", + "required": [], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + }, + "additionalProperties": true, + "description": "MetricConfig(_extra: Dict[str, Any] = , enabled: bool = True)" + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://schemas.getdbt.com/dbt/manifest/v8.json" +} diff --git a/test/unit/test_compiler.py b/test/unit/test_compiler.py index 506c427a067..919f897c549 100644 --- a/test/unit/test_compiler.py +++ b/test/unit/test_compiler.py @@ -95,7 +95,6 @@ def test__prepend_ctes__already_has_cte(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -112,7 +111,6 @@ def test__prepend_ctes__already_has_cte(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', @@ -159,7 +157,6 @@ def test__prepend_ctes__no_ctes(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -177,7 +174,6 @@ def test__prepend_ctes__no_ctes(self): unique_id='model.root.view_no_cte', fqn=['root', 'view_no_cte'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -237,7 +233,6 @@ def test__prepend_ctes(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -254,7 +249,6 @@ def test__prepend_ctes(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', @@ -299,7 +293,6 @@ def test__prepend_ctes__cte_not_compiled(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', refs=[], sources=[], depends_on=DependsOn(), @@ -320,7 +313,6 @@ def test__prepend_ctes__cte_not_compiled(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', refs=[], sources=[], depends_on=DependsOn(), @@ -348,7 +340,6 @@ def test__prepend_ctes__cte_not_compiled(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', refs=[], sources=[], depends_on=DependsOn(nodes=['model.root.ephemeral']), @@ -418,7 +409,6 @@ def test__prepend_ctes__multiple_levels(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -436,7 +426,6 @@ def test__prepend_ctes__multiple_levels(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', @@ -453,7 +442,6 @@ def test__prepend_ctes__multiple_levels(self): unique_id='model.root.ephemeral_level_two', fqn=['root', 'ephemeral_level_two'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral_level_two.sql', original_file_path='ephemeral_level_two.sql', @@ -509,7 +497,6 @@ def test__prepend_ctes__valid_ephemeral_sql(self): unique_id='model.root.view', fqn=['root', 'view'], package_name='root', - root_path='/usr/src/app', config=self.model_config, path='view.sql', original_file_path='view.sql', @@ -526,7 +513,6 @@ def test__prepend_ctes__valid_ephemeral_sql(self): unique_id='model.root.inner_ephemeral', fqn=['root', 'inner_ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='inner_ephemeral.sql', original_file_path='inner_ephemeral.sql', @@ -543,7 +529,6 @@ def test__prepend_ctes__valid_ephemeral_sql(self): unique_id='model.root.ephemeral', fqn=['root', 'ephemeral'], package_name='root', - root_path='/usr/src/app', config=ephemeral_config, path='ephemeral.sql', original_file_path='ephemeral.sql', diff --git a/test/unit/test_context.py b/test/unit/test_context.py index 668d76cc525..01c4f678abe 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -43,7 +43,6 @@ def setUp(self): fqn=["root", "model_one"], package_name="root", original_file_path="model_one.sql", - root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), @@ -283,7 +282,6 @@ def model(): fqn=["root", "model_one"], package_name="root", original_file_path="model_one.sql", - root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), @@ -346,7 +344,6 @@ def mock_model(): fqn=["root", "model_one"], package_name="root", original_file_path="model_one.sql", - root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), diff --git a/test/unit/test_contracts_graph_compiled.py b/test/unit/test_contracts_graph_compiled.py index aaa44857326..982673514ab 100644 --- a/test/unit/test_contracts_graph_compiled.py +++ b/test/unit/test_contracts_graph_compiled.py @@ -24,7 +24,6 @@ def basic_uncompiled_model(): return CompiledModelNode( package_name='test', - root_path='/root/', path='/root/models/foo.sql', original_file_path='models/foo.sql', language='sql', @@ -57,7 +56,6 @@ def basic_uncompiled_model(): def basic_compiled_model(): return CompiledModelNode( package_name='test', - root_path='/root/', path='/root/models/foo.sql', original_file_path='models/foo.sql', language='sql', @@ -91,7 +89,6 @@ def basic_compiled_model(): def minimal_uncompiled_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Model), 'path': '/root/models/foo.sql', @@ -114,7 +111,6 @@ def minimal_uncompiled_dict(): def basic_uncompiled_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Model), 'path': '/root/models/foo.sql', @@ -164,7 +160,6 @@ def basic_uncompiled_dict(): def basic_compiled_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Model), 'path': '/root/models/foo.sql', @@ -328,7 +323,6 @@ def test_compare_changed_model(func, basic_uncompiled_model): def minimal_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -354,7 +348,6 @@ def minimal_schema_test_dict(): def basic_uncompiled_schema_test_node(): return CompiledGenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -388,7 +381,6 @@ def basic_uncompiled_schema_test_node(): def basic_compiled_schema_test_node(): return CompiledGenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -426,7 +418,6 @@ def basic_compiled_schema_test_node(): def basic_uncompiled_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -477,7 +468,6 @@ def basic_uncompiled_schema_test_dict(): def basic_compiled_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index b5ec79a7aba..9d11b1cfbc8 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -127,7 +127,6 @@ def test_config_same(unrendered_node_config_dict, func): def base_parsed_model_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Model), 'path': '/root/x/path.sql', @@ -175,7 +174,6 @@ def base_parsed_model_dict(): def basic_parsed_model_object(): return ParsedModelNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -204,7 +202,6 @@ def basic_parsed_model_object(): def minimal_parsed_model_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Model), 'path': '/root/x/path.sql', @@ -226,7 +223,6 @@ def minimal_parsed_model_dict(): def complex_parsed_model_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Model), 'path': '/root/x/path.sql', @@ -285,7 +281,6 @@ def complex_parsed_model_dict(): def complex_parsed_model_object(): return ParsedModelNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -428,7 +423,6 @@ def test_compare_changed_model(func, basic_parsed_model_object): def basic_parsed_seed_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Seed), 'path': '/root/seeds/seed.csv', @@ -476,7 +470,6 @@ def basic_parsed_seed_dict(): def basic_parsed_seed_object(): return ParsedSeedNode( name='foo', - root_path='/root/', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', original_file_path='seeds/seed.csv', @@ -509,7 +502,6 @@ def basic_parsed_seed_object(): def minimal_parsed_seed_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Seed), 'path': '/root/seeds/seed.csv', @@ -530,7 +522,6 @@ def minimal_parsed_seed_dict(): def complex_parsed_seed_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Seed), 'path': '/root/seeds/seed.csv', @@ -581,7 +572,6 @@ def complex_parsed_seed_dict(): def complex_parsed_seed_object(): return ParsedSeedNode( name='foo', - root_path='/root/', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', original_file_path='seeds/seed.csv', @@ -731,7 +721,6 @@ def basic_parsed_model_patch_object(): def patched_model_object(): return ParsedModelNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -771,7 +760,6 @@ def test_patch_parsed_model(basic_parsed_model_object, basic_parsed_model_patch_ def minimal_parsed_hook_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'resource_type': str(NodeType.Operation), 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -791,7 +779,6 @@ def minimal_parsed_hook_dict(): def base_parsed_hook_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Operation), 'path': '/root/x/path.sql', @@ -839,7 +826,6 @@ def base_parsed_hook_dict(): def base_parsed_hook_object(): return ParsedHookNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -869,7 +855,6 @@ def base_parsed_hook_object(): def complex_parsed_hook_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Operation), 'path': '/root/x/path.sql', @@ -928,7 +913,6 @@ def complex_parsed_hook_dict(): def complex_parsed_hook_object(): return ParsedHookNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -996,7 +980,6 @@ def test_invalid_hook_index_type(base_parsed_hook_dict): def minimal_parsed_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -1023,7 +1006,6 @@ def minimal_parsed_schema_test_dict(): def basic_parsed_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -1071,7 +1053,6 @@ def basic_parsed_schema_test_dict(): def basic_parsed_schema_test_object(): return ParsedGenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1100,7 +1081,6 @@ def basic_parsed_schema_test_object(): def complex_parsed_schema_test_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Test), 'path': '/root/x/path.sql', @@ -1165,7 +1145,6 @@ def complex_parsed_schema_test_object(): cfg._extra.update({'extra_key': 'extra value'}) return ParsedGenericTestNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1459,7 +1438,6 @@ def test_invalid_check_value(basic_check_snapshot_config_dict): def basic_timestamp_snapshot_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Snapshot), 'path': '/root/x/path.sql', @@ -1518,7 +1496,6 @@ def basic_timestamp_snapshot_dict(): def basic_timestamp_snapshot_object(): return ParsedSnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1567,7 +1544,6 @@ def basic_intermediate_timestamp_snapshot_object(): return IntermediateSnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1602,7 +1578,6 @@ def basic_intermediate_timestamp_snapshot_object(): def basic_check_snapshot_dict(): return { 'name': 'foo', - 'root_path': '/root/', 'created_at': 1.0, 'resource_type': str(NodeType.Snapshot), 'path': '/root/x/path.sql', @@ -1661,7 +1636,6 @@ def basic_check_snapshot_dict(): def basic_check_snapshot_object(): return ParsedSnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1710,7 +1684,6 @@ def basic_intermediate_check_snapshot_object(): return IntermediateSnapshotNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -1829,7 +1802,6 @@ def _ok_dict(self): 'created_at': 1.0, 'package_name': 'test', 'macro_sql': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'resource_type': 'macro', 'unique_id': 'macro.test.foo', 'tags': [], @@ -1848,7 +1820,6 @@ def test_ok(self): original_file_path='/root/path.sql', package_name='test', macro_sql='{% macro foo() %}select 1 as id{% endmacro %}', - root_path='/root/', resource_type=NodeType.Macro, unique_id='macro.test.foo', tags=[], @@ -1881,7 +1852,6 @@ def _ok_dict(self): 'original_file_path': '/root/docs/doc.md', 'package_name': 'test', 'path': '/root/docs', - 'root_path': '/root', 'unique_id': 'test.foo', } @@ -1889,7 +1859,6 @@ def test_ok(self): doc_dict = self._ok_dict() doc = self.ContractType( package_name='test', - root_path='/root', path='/root/docs', original_file_path='/root/docs/doc.md', name='foo', @@ -1914,7 +1883,6 @@ def test_invalid_extra(self): def minimum_parsed_source_definition_dict(): return { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/models/sources.yml', 'original_file_path': '/root/models/sources.yml', 'created_at': 1.0, @@ -1935,7 +1903,6 @@ def minimum_parsed_source_definition_dict(): def basic_parsed_source_definition_dict(): return { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/models/sources.yml', 'original_file_path': '/root/models/sources.yml', 'created_at': 1.0, @@ -1977,7 +1944,6 @@ def basic_parsed_source_definition_object(): path='/root/models/sources.yml', quoting=Quoting(), resource_type=NodeType.Source, - root_path='/root', schema='some_schema', source_description='my source description', source_name='my_source', @@ -1991,7 +1957,6 @@ def basic_parsed_source_definition_object(): def complex_parsed_source_definition_dict(): return { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/models/sources.yml', 'original_file_path': '/root/models/sources.yml', 'created_at': 1.0, @@ -2038,7 +2003,6 @@ def complex_parsed_source_definition_object(): path='/root/models/sources.yml', quoting=Quoting(), resource_type=NodeType.Source, - root_path='/root', schema='some_schema', source_description='my source description', source_name='my_source', @@ -2146,7 +2110,6 @@ def minimal_parsed_exposure_dict(): 'meta': {}, 'tags': [], 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'created_at': 1.0, @@ -2172,7 +2135,6 @@ def basic_parsed_exposure_dict(): 'unique_id': 'exposure.test.my_exposure', 'package_name': 'test', 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'meta': {}, @@ -2194,7 +2156,6 @@ def basic_parsed_exposure_object(): unique_id='exposure.test.my_exposure', package_name='test', path='models/something.yml', - root_path='/usr/src/app', original_file_path='models/something.yml', owner=ExposureOwner(email='test@example.com'), description='', @@ -2234,7 +2195,6 @@ def complex_parsed_exposure_dict(): 'unique_id': 'exposure.test.my_exposure', 'package_name': 'test', 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'config': { 'enabled': True, @@ -2259,7 +2219,6 @@ def complex_parsed_exposure_object(): unique_id='exposure.test.my_exposure', package_name='test', path='models/something.yml', - root_path='/usr/src/app', original_file_path='models/something.yml', config=ExposureConfig(), unrendered_config={}, @@ -2318,7 +2277,6 @@ def minimal_parsed_metric_dict(): 'meta': {}, 'tags': [], 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'created_at': 1.0, @@ -2351,7 +2309,6 @@ def basic_parsed_metric_dict(): 'unique_id': 'metric.test.my_metric', 'package_name': 'test', 'path': 'models/something.yml', - 'root_path': '/usr/src/app', 'original_file_path': 'models/something.yml', 'description': '', 'meta': {}, @@ -2373,7 +2330,6 @@ def basic_parsed_metric_object(): unique_id='metric.test.my_metric', package_name='test', path='models/something.yml', - root_path='/usr/src/app', original_file_path='models/something.yml', description='', meta={}, diff --git a/test/unit/test_contracts_graph_unparsed.py b/test/unit/test_contracts_graph_unparsed.py index 5c89148cd11..8821b355b71 100644 --- a/test/unit/test_contracts_graph_unparsed.py +++ b/test/unit/test_contracts_graph_unparsed.py @@ -24,7 +24,6 @@ def test_ok(self): 'package_name': 'test', 'language': 'sql', 'raw_code': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'resource_type': 'macro', } macro = self.ContractType( @@ -33,7 +32,6 @@ def test_ok(self): package_name='test', language='sql', raw_code='{% macro foo() %}select 1 as id{% endmacro %}', - root_path='/root/', resource_type=NodeType.Macro, ) self.assert_symmetric(macro, macro_dict) @@ -46,7 +44,6 @@ def test_invalid_missing_field(self): # 'package_name': 'test', 'language': 'sql', 'raw_code': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'resource_type': 'macro', } self.assert_fails_validation(macro_dict) @@ -58,7 +55,6 @@ def test_invalid_extra_field(self): 'package_name': 'test', 'language': 'sql', 'raw_code': '{% macro foo() %}select 1 as id{% endmacro %}', - 'root_path': '/root/', 'extra': 'extra', 'resource_type': 'macro', } @@ -71,7 +67,6 @@ class TestUnparsedNode(ContractTestCase): def test_ok(self): node_dict = { 'name': 'foo', - 'root_path': '/root/', 'resource_type': NodeType.Model, 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -81,7 +76,6 @@ def test_ok(self): } node = self.ContractType( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -99,7 +93,6 @@ def test_ok(self): def test_empty(self): node_dict = { 'name': 'foo', - 'root_path': '/root/', 'resource_type': NodeType.Model, 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -109,7 +102,6 @@ def test_empty(self): } node = UnparsedNode( package_name='test', - root_path='/root/', path='/root/x/path.sql', original_file_path='/root/path.sql', language='sql', @@ -126,7 +118,6 @@ def test_empty(self): def test_bad_type(self): node_dict = { 'name': 'foo', - 'root_path': '/root/', 'resource_type': NodeType.Source, # not valid! 'path': '/root/x/path.sql', 'original_file_path': '/root/path.sql', @@ -143,7 +134,6 @@ class TestUnparsedRunHook(ContractTestCase): def test_ok(self): node_dict = { 'name': 'foo', - 'root_path': 'test/dbt_project.yml', 'resource_type': NodeType.Operation, 'path': '/root/dbt_project.yml', 'original_file_path': '/root/dbt_project.yml', @@ -154,7 +144,6 @@ def test_ok(self): } node = self.ContractType( package_name='test', - root_path='test/dbt_project.yml', path='/root/dbt_project.yml', original_file_path='/root/dbt_project.yml', language='sql', @@ -170,7 +159,6 @@ def test_ok(self): def test_bad_type(self): node_dict = { 'name': 'foo', - 'root_path': 'test/dbt_project.yml', 'resource_type': NodeType.Model, # invalid 'path': '/root/dbt_project.yml', 'original_file_path': '/root/dbt_project.yml', @@ -365,14 +353,12 @@ class TestUnparsedDocumentationFile(ContractTestCase): def test_ok(self): doc = self.ContractType( package_name='test', - root_path='/root', path='/root/docs', original_file_path='/root/docs/doc.md', file_contents='blah blah blah', ) doc_dict = { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/docs', 'original_file_path': '/root/docs/doc.md', 'file_contents': 'blah blah blah', @@ -386,7 +372,6 @@ def test_extra_field(self): self.assert_fails_validation({}) doc_dict = { 'package_name': 'test', - 'root_path': '/root', 'path': '/root/docs', 'original_file_path': '/root/docs/doc.md', 'file_contents': 'blah blah blah', diff --git a/test/unit/test_docs_blocks.py b/test/unit/test_docs_blocks.py index c6673321480..8b87463313a 100644 --- a/test/unit/test_docs_blocks.py +++ b/test/unit/test_docs_blocks.py @@ -158,7 +158,6 @@ def test_load_file(self): self.assertIsInstance(result, ParsedDocumentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) - self.assertEqual(result.root_path, self.subdir_path) self.assertEqual(result.resource_type, NodeType.Documentation) self.assertEqual(result.path, 'test_file.md') @@ -200,7 +199,6 @@ def test_multiple_raw_blocks(self): self.assertIsInstance(result, ParsedDocumentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) - self.assertEqual(result.root_path, self.subdir_path) self.assertEqual(result.resource_type, NodeType.Documentation) self.assertEqual(result.path, 'test_file.md') diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index 55559b13e17..87343ca3756 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -87,7 +87,6 @@ def make_model(pkg, name, sql, refs=None, sources=None, tags=None, path=None, al fqn=fqn, unique_id=f'model.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=f'models/{path}', config=NodeConfig(**config_kwargs), @@ -128,7 +127,6 @@ def make_seed(pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extr fqn=fqn, unique_id=f'seed.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=f'data/{path}', tags=tags, @@ -156,7 +154,6 @@ def make_source(pkg, source_name, table_name, path=None, loader=None, identifier schema='dbt_schema', unique_id=f'source.{pkg}.{source_name}.{table_name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=path, name=table_name, @@ -182,7 +179,6 @@ def make_macro(pkg, name, macro_sql, path=None, depends_on_macros=None): macro_sql=macro_sql, unique_id=f'macro.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=path, resource_type=NodeType.Macro, @@ -266,7 +262,6 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No fqn=['minimal', 'schema_test', node_name], unique_id=f'test.{pkg}.{node_name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=f'schema_test/{node_name}.sql', original_file_path=f'models/{path}', resource_type=NodeType.Test, @@ -318,7 +313,6 @@ def make_data_test(pkg, name, sql, refs=None, sources=None, tags=None, path=None fqn=fqn, unique_id=f'test.{pkg}.{name}', package_name=pkg, - root_path='/usr/dbt/some-project', path=path, original_file_path=f'tests/{path}', config=TestConfig(**config_kwargs), @@ -349,7 +343,6 @@ def make_exposure(pkg, name, path=None, fqn_extras=None, owner=None): unique_id=f'exposure.{pkg}.{name}', package_name=pkg, path=path, - root_path='/usr/src/app', original_file_path=path, owner=owner, ) @@ -363,7 +356,6 @@ def make_metric(pkg, name, path=None): name=name, path='schema.yml', package_name=pkg, - root_path='/usr/src/app', original_file_path=path, unique_id=f'metric.{pkg}.{name}', fqn=[pkg, 'metrics', name], diff --git a/test/unit/test_manifest.py b/test/unit/test_manifest.py index cbce93fc052..3e18c555b9c 100644 --- a/test/unit/test_manifest.py +++ b/test/unit/test_manifest.py @@ -45,7 +45,7 @@ REQUIRED_PARSED_NODE_KEYS = frozenset({ 'alias', 'tags', 'config', 'unique_id', 'refs', 'sources', 'metrics', 'meta', 'depends_on', 'database', 'schema', 'name', 'resource_type', - 'package_name', 'root_path', 'path', 'original_file_path', 'raw_code', 'language', + 'package_name', 'path', 'original_file_path', 'raw_code', 'language', 'description', 'columns', 'fqn', 'build_path', 'compiled_path', 'patch_path', 'docs', 'deferred', 'checksum', 'unrendered_config', 'created_at', 'config_call_dict', }) @@ -95,7 +95,6 @@ def setUp(self): fqn=['root', 'my_exposure'], unique_id='exposure.root.my_exposure', package_name='root', - root_path='', path='my_exposure.sql', original_file_path='my_exposure.sql' ) @@ -128,7 +127,6 @@ def setUp(self): fqn=['root', 'my_metric'], unique_id='metric.root.my_metric', package_name='root', - root_path='', path='my_metric.yml', original_file_path='my_metric.yml' ) @@ -152,7 +150,6 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -175,7 +172,6 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -198,7 +194,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -221,7 +216,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -244,7 +238,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -267,7 +260,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -289,7 +281,6 @@ def setUp(self): unique_id='source.test.my_source.my_table', fqn=['test', 'my_source', 'my_table'], package_name='root', - root_path='', path='schema.yml', original_file_path='schema.yml', ), @@ -331,7 +322,7 @@ def test__no_nodes(self): 'child_map': {}, 'metadata': { 'generated_at': '2018-02-14T09:15:13Z', - 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v7.json', + 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v8.json', 'dbt_version': dbt.version.__version__, 'env': {ENV_KEY_NAME: 'value'}, 'invocation_id': invocation_id, @@ -482,7 +473,7 @@ def test_no_nodes_with_metadata(self, mock_user): 'docs': {}, 'metadata': { 'generated_at': '2018-02-14T09:15:13Z', - 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v7.json', + 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v8.json', 'dbt_version': dbt.version.__version__, 'project_id': '098f6bcd4621d373cade4e832627b4f6', 'user_id': 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf', @@ -518,7 +509,6 @@ def test_get_resource_fqns(self): tags=[], path='seed.csv', original_file_path='seed.csv', - root_path='', language='sql', raw_code='-- csv --', checksum=FileHash.empty(), @@ -569,7 +559,6 @@ def test__deepcopy_copies_flat_graph(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -613,7 +602,6 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', language='sql', raw_code='does not matter', meta={}, @@ -640,7 +628,6 @@ def setUp(self): tags=[], path='events.sql', original_file_path='events.sql', - root_path='', raw_code='does not matter', meta={}, compiled=True, @@ -667,7 +654,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -689,7 +675,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -711,7 +696,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -733,7 +717,6 @@ def setUp(self): tags=[], path='multi.sql', original_file_path='multi.sql', - root_path='', meta={}, language='sql', raw_code='does not matter', @@ -763,7 +746,7 @@ def test__no_nodes(self): 'child_map': {}, 'metadata': { 'generated_at': '2018-02-14T09:15:13Z', - 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v7.json', + 'dbt_schema_version': 'https://schemas.getdbt.com/dbt/manifest/v8.json', 'dbt_version': dbt.version.__version__, 'invocation_id': '01234567-0123-0123-0123-0123456789ab', 'env': {ENV_KEY_NAME: 'value'}, diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 8fdf297d4cd..1ae9e3917ed 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -66,7 +66,6 @@ def _generate_macros(self): unique_id=f'macro.root.{name}', package_name='root', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/root'), path=normalize('macros/macro.sql'), macro_sql=sql, ) @@ -521,7 +520,6 @@ def test_basic(self): fqn=['snowplow', 'nested', 'model_1'], package_name='snowplow', original_file_path=normalize('models/nested/model_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', @@ -580,7 +578,6 @@ def model(dbt, session): fqn=['snowplow', 'nested', 'py_model'], package_name='snowplow', original_file_path=normalize('models/nested/py_model.py'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table', packages=python_packages), # config.packages = ['textblob'] path=normalize('nested/py_model.py'), @@ -765,7 +762,6 @@ def test_built_in_macro_override_detection(self): unique_id=macro_unique_id, package_name='root', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/root'), path=normalize('macros/macro.sql'), macro_sql='{% macro ref(model_name) %}{% set x = raise("boom") %}{% endmacro %}', ) @@ -782,7 +778,6 @@ def test_built_in_macro_override_detection(self): fqn=['snowplow', 'nested', 'model_1'], package_name='snowplow', original_file_path=normalize('models/nested/model_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', @@ -818,7 +813,6 @@ def setUp(self): fqn=['snowplow', 'nested', 'model_1'], package_name='snowplow', original_file_path=normalize('models/nested/model_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', @@ -999,7 +993,6 @@ def test_single_block(self): fqn=['snowplow', 'nested', 'snap_1', 'foo'], package_name='snowplow', original_file_path=normalize('snapshots/nested/snap_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=SnapshotConfig( strategy='timestamp', updated_at='last_update', @@ -1068,7 +1061,6 @@ def test_multi_block(self): fqn=['snowplow', 'nested', 'snap_1', 'foo'], package_name='snowplow', original_file_path=normalize('snapshots/nested/snap_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=SnapshotConfig( strategy='timestamp', updated_at='last_update', @@ -1106,7 +1098,6 @@ def test_multi_block(self): fqn=['snowplow', 'nested', 'snap_1', 'bar'], package_name='snowplow', original_file_path=normalize('snapshots/nested/snap_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), config=SnapshotConfig( strategy='timestamp', updated_at='last_update', @@ -1166,7 +1157,6 @@ def test_single_block(self): unique_id='macro.snowplow.foo', package_name='snowplow', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('macros/macro.sql'), macro_sql=raw_code, ) @@ -1189,7 +1179,6 @@ def test_multiple_blocks(self): unique_id='macro.snowplow.bar', package_name='snowplow', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('macros/macro.sql'), macro_sql='{% macro bar(c, d) %}c + d{% endmacro %}', ) @@ -1199,7 +1188,6 @@ def test_multiple_blocks(self): unique_id='macro.snowplow.foo', package_name='snowplow', original_file_path=normalize('macros/macro.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('macros/macro.sql'), macro_sql='{% macro foo(a, b) %}a ~ b{% endmacro %}', ) @@ -1242,7 +1230,6 @@ def test_basic(self): fqn=['snowplow', 'test_1'], package_name='snowplow', original_file_path=normalize('tests/test_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), refs=[['blah']], config=TestConfig(severity='ERROR'), tags=[], @@ -1282,7 +1269,6 @@ def test_basic(self): unique_id='macro.snowplow.test_not_null', package_name='snowplow', original_file_path=normalize('tests/generic/test_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), path=normalize('tests/generic/test_1.sql'), macro_sql=raw_code, ) @@ -1321,7 +1307,6 @@ def test_basic(self): fqn=['snowplow', 'analysis', 'nested', 'analysis_1'], package_name='snowplow', original_file_path=normalize('analyses/nested/analysis_1.sql'), - root_path=get_abs_os_path('./dbt_packages/snowplow'), depends_on=DependsOn(), config=NodeConfig(), path=normalize('analysis/nested/analysis_1.sql'), diff --git a/test/unit/test_partial_parsing.py b/test/unit/test_partial_parsing.py index de0e230ad3c..a784532fcf4 100644 --- a/test/unit/test_partial_parsing.py +++ b/test/unit/test_partial_parsing.py @@ -90,7 +90,6 @@ def setUp(self): def get_model(self, name): return ParsedModelNode( package_name='my_test', - root_path='/users/root/', path=f'{name}.sql', original_file_path=f'models/{name}.sql', language='sql', @@ -109,7 +108,6 @@ def get_model(self, name): def get_python_model(self, name): return ParsedModelNode( package_name='my_test', - root_path='/users/root/', path=f'{name}.py', original_file_path=f'models/{name}.py', raw_code='import something', diff --git a/test/unit/utils.py b/test/unit/utils.py index 4cfe0519d44..5df2ef6ac8c 100644 --- a/test/unit/utils.py +++ b/test/unit/utils.py @@ -246,7 +246,6 @@ def generate_name_macros(package): unique_id=f'macro.{package}.{name}', package_name=package, original_file_path=normalize('macros/macro.sql'), - root_path='./dbt_packages/root', path=normalize('macros/macro.sql'), macro_sql=sql, ) diff --git a/tests/functional/artifacts/data/state/v8/manifest.json b/tests/functional/artifacts/data/state/v8/manifest.json new file mode 100644 index 00000000000..9cfefdaaf85 --- /dev/null +++ b/tests/functional/artifacts/data/state/v8/manifest.json @@ -0,0 +1 @@ +{"metadata": {"dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v8.json", "dbt_version": "1.4.0a1", "generated_at": "2022-11-04T14:47:38.242390Z", "invocation_id": "c6157471-2b64-428a-ada9-044ddfcc03ac", "env": {}, "project_id": "098f6bcd4621d373cade4e832627b4f6", "user_id": null, "send_anonymous_usage_stats": false, "adapter_type": "postgres"}, "nodes": {"model.test.my_model": {"resource_type": "model", "depends_on": {"macros": [], "nodes": []}, "config": {"enabled": true, "alias": null, "schema": null, "database": null, "tags": [], "meta": {}, "materialized": "view", "incremental_strategy": null, "persist_docs": {}, "quoting": {}, "column_types": {}, "full_refresh": null, "unique_key": null, "on_schema_change": "ignore", "grants": {}, "packages": [], "docs": {"show": true, "node_color": null}, "post-hook": [], "pre-hook": []}, "database": "dbt", "schema": "test16675732582545487557_test_previous_version_state", "fqn": ["test", "my_model"], "unique_id": "model.test.my_model", "raw_code": "select 1 as id", "language": "sql", "package_name": "test", "path": "my_model.sql", "original_file_path": "models/my_model.sql", "name": "my_model", "alias": "my_model", "checksum": {"name": "sha256", "checksum": "2b9123e04ab8bb798f7c565afdc3ee0e56fcd66b4bfbdb435b4891c878d947c5"}, "tags": [], "refs": [], "sources": [], "metrics": [], "description": "", "columns": {}, "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "compiled_path": null, "build_path": null, "deferred": false, "unrendered_config": {}, "created_at": 1667573258.9454}}, "sources": {}, "macros": {"macro.dbt_postgres.postgres__current_timestamp": {"unique_id": "macro.dbt_postgres.postgres__current_timestamp", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__current_timestamp", "macro_sql": "{% macro postgres__current_timestamp() -%}\n now()\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.265455, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_string_as_time": {"unique_id": "macro.dbt_postgres.postgres__snapshot_string_as_time", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__snapshot_string_as_time", "macro_sql": "{% macro postgres__snapshot_string_as_time(timestamp) -%}\n {%- set result = \"'\" ~ timestamp ~ \"'::timestamp without time zone\" -%}\n {{ return(result) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.266111, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_get_time": {"unique_id": "macro.dbt_postgres.postgres__snapshot_get_time", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__snapshot_get_time", "macro_sql": "{% macro postgres__snapshot_get_time() -%}\n {{ current_timestamp() }}::timestamp without time zone\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.266408, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_backcompat": {"unique_id": "macro.dbt_postgres.postgres__current_timestamp_backcompat", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__current_timestamp_backcompat", "macro_sql": "{% macro postgres__current_timestamp_backcompat() %}\n current_timestamp::{{ type_timestamp() }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.266695, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat": {"unique_id": "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro postgres__current_timestamp_in_utc_backcompat() %}\n (current_timestamp at time zone 'utc')::{{ type_timestamp() }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.266977, "supported_languages": null}, "macro.dbt_postgres.postgres__get_catalog": {"unique_id": "macro.dbt_postgres.postgres__get_catalog", "package_name": "dbt_postgres", "path": "macros/catalog.sql", "original_file_path": "macros/catalog.sql", "name": "postgres__get_catalog", "macro_sql": "{% macro postgres__get_catalog(information_schema, schemas) -%}\n\n {%- call statement('catalog', fetch_result=True) -%}\n {#\n If the user has multiple databases set and the first one is wrong, this will fail.\n But we won't fail in the case where there are multiple quoting-difference-only dbs, which is better.\n #}\n {% set database = information_schema.database %}\n {{ adapter.verify_database(database) }}\n\n select\n '{{ database }}' as table_database,\n sch.nspname as table_schema,\n tbl.relname as table_name,\n case tbl.relkind\n when 'v' then 'VIEW'\n else 'BASE TABLE'\n end as table_type,\n tbl_desc.description as table_comment,\n col.attname as column_name,\n col.attnum as column_index,\n pg_catalog.format_type(col.atttypid, col.atttypmod) as column_type,\n col_desc.description as column_comment,\n pg_get_userbyid(tbl.relowner) as table_owner\n\n from pg_catalog.pg_namespace sch\n join pg_catalog.pg_class tbl on tbl.relnamespace = sch.oid\n join pg_catalog.pg_attribute col on col.attrelid = tbl.oid\n left outer join pg_catalog.pg_description tbl_desc on (tbl_desc.objoid = tbl.oid and tbl_desc.objsubid = 0)\n left outer join pg_catalog.pg_description col_desc on (col_desc.objoid = tbl.oid and col_desc.objsubid = col.attnum)\n\n where (\n {%- for schema in schemas -%}\n upper(sch.nspname) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n )\n and not pg_is_other_temp_schema(sch.oid) -- not a temporary schema belonging to another session\n and tbl.relpersistence in ('p', 'u') -- [p]ermanent table or [u]nlogged table. Exclude [t]emporary tables\n and tbl.relkind in ('r', 'v', 'f', 'p') -- o[r]dinary table, [v]iew, [f]oreign table, [p]artitioned table. Other values are [i]ndex, [S]equence, [c]omposite type, [t]OAST table, [m]aterialized view\n and col.attnum > 0 -- negative numbers are used for system columns such as oid\n and not col.attisdropped -- column as not been dropped\n\n order by\n sch.nspname,\n tbl.relname,\n col.attnum\n\n {%- endcall -%}\n\n {{ return(load_result('catalog').table) }}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2698722, "supported_languages": null}, "macro.dbt_postgres.postgres_get_relations": {"unique_id": "macro.dbt_postgres.postgres_get_relations", "package_name": "dbt_postgres", "path": "macros/relations.sql", "original_file_path": "macros/relations.sql", "name": "postgres_get_relations", "macro_sql": "{% macro postgres_get_relations () -%}\n\n {#\n -- in pg_depend, objid is the dependent, refobjid is the referenced object\n -- > a pg_depend entry indicates that the referenced object cannot be\n -- > dropped without also dropping the dependent object.\n #}\n\n {%- call statement('relations', fetch_result=True) -%}\n with relation as (\n select\n pg_rewrite.ev_class as class,\n pg_rewrite.oid as id\n from pg_rewrite\n ),\n class as (\n select\n oid as id,\n relname as name,\n relnamespace as schema,\n relkind as kind\n from pg_class\n ),\n dependency as (\n select distinct\n pg_depend.objid as id,\n pg_depend.refobjid as ref\n from pg_depend\n ),\n schema as (\n select\n pg_namespace.oid as id,\n pg_namespace.nspname as name\n from pg_namespace\n where nspname != 'information_schema' and nspname not like 'pg\\_%'\n ),\n referenced as (\n select\n relation.id AS id,\n referenced_class.name ,\n referenced_class.schema ,\n referenced_class.kind\n from relation\n join class as referenced_class on relation.class=referenced_class.id\n where referenced_class.kind in ('r', 'v')\n ),\n relationships as (\n select\n referenced.name as referenced_name,\n referenced.schema as referenced_schema_id,\n dependent_class.name as dependent_name,\n dependent_class.schema as dependent_schema_id,\n referenced.kind as kind\n from referenced\n join dependency on referenced.id=dependency.id\n join class as dependent_class on dependency.ref=dependent_class.id\n where\n (referenced.name != dependent_class.name or\n referenced.schema != dependent_class.schema)\n )\n\n select\n referenced_schema.name as referenced_schema,\n relationships.referenced_name as referenced_name,\n dependent_schema.name as dependent_schema,\n relationships.dependent_name as dependent_name\n from relationships\n join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id\n join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id\n group by referenced_schema, referenced_name, dependent_schema, dependent_name\n order by referenced_schema, referenced_name, dependent_schema, dependent_name;\n\n {%- endcall -%}\n\n {{ return(load_result('relations').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.271539, "supported_languages": null}, "macro.dbt_postgres.postgres__create_table_as": {"unique_id": "macro.dbt_postgres.postgres__create_table_as", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__create_table_as", "macro_sql": "{% macro postgres__create_table_as(temporary, relation, sql) -%}\n {%- set unlogged = config.get('unlogged', default=false) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary -%}\n temporary\n {%- elif unlogged -%}\n unlogged\n {%- endif %} table {{ relation }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2825222, "supported_languages": null}, "macro.dbt_postgres.postgres__get_create_index_sql": {"unique_id": "macro.dbt_postgres.postgres__get_create_index_sql", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__get_create_index_sql", "macro_sql": "{% macro postgres__get_create_index_sql(relation, index_dict) -%}\n {%- set index_config = adapter.parse_index(index_dict) -%}\n {%- set comma_separated_columns = \", \".join(index_config.columns) -%}\n {%- set index_name = index_config.render(relation) -%}\n\n create {% if index_config.unique -%}\n unique\n {%- endif %} index if not exists\n \"{{ index_name }}\"\n on {{ relation }} {% if index_config.type -%}\n using {{ index_config.type }}\n {%- endif %}\n ({{ comma_separated_columns }});\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.283871, "supported_languages": null}, "macro.dbt_postgres.postgres__create_schema": {"unique_id": "macro.dbt_postgres.postgres__create_schema", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__create_schema", "macro_sql": "{% macro postgres__create_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier().include(database=False) }}\n {%- endcall -%}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.284721, "supported_languages": null}, "macro.dbt_postgres.postgres__drop_schema": {"unique_id": "macro.dbt_postgres.postgres__drop_schema", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__drop_schema", "macro_sql": "{% macro postgres__drop_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier().include(database=False) }} cascade\n {%- endcall -%}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.285629, "supported_languages": null}, "macro.dbt_postgres.postgres__get_columns_in_relation": {"unique_id": "macro.dbt_postgres.postgres__get_columns_in_relation", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__get_columns_in_relation", "macro_sql": "{% macro postgres__get_columns_in_relation(relation) -%}\n {% call statement('get_columns_in_relation', fetch_result=True) %}\n select\n column_name,\n data_type,\n character_maximum_length,\n numeric_precision,\n numeric_scale\n\n from {{ relation.information_schema('columns') }}\n where table_name = '{{ relation.identifier }}'\n {% if relation.schema %}\n and table_schema = '{{ relation.schema }}'\n {% endif %}\n order by ordinal_position\n\n {% endcall %}\n {% set table = load_result('get_columns_in_relation').table %}\n {{ return(sql_convert_columns_in_relation(table)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.sql_convert_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.286848, "supported_languages": null}, "macro.dbt_postgres.postgres__list_relations_without_caching": {"unique_id": "macro.dbt_postgres.postgres__list_relations_without_caching", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__list_relations_without_caching", "macro_sql": "{% macro postgres__list_relations_without_caching(schema_relation) %}\n {% call statement('list_relations_without_caching', fetch_result=True) -%}\n select\n '{{ schema_relation.database }}' as database,\n tablename as name,\n schemaname as schema,\n 'table' as type\n from pg_tables\n where schemaname ilike '{{ schema_relation.schema }}'\n union all\n select\n '{{ schema_relation.database }}' as database,\n viewname as name,\n schemaname as schema,\n 'view' as type\n from pg_views\n where schemaname ilike '{{ schema_relation.schema }}'\n {% endcall %}\n {{ return(load_result('list_relations_without_caching').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2877922, "supported_languages": null}, "macro.dbt_postgres.postgres__information_schema_name": {"unique_id": "macro.dbt_postgres.postgres__information_schema_name", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__information_schema_name", "macro_sql": "{% macro postgres__information_schema_name(database) -%}\n {% if database_name -%}\n {{ adapter.verify_database(database_name) }}\n {%- endif -%}\n information_schema\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.288241, "supported_languages": null}, "macro.dbt_postgres.postgres__list_schemas": {"unique_id": "macro.dbt_postgres.postgres__list_schemas", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__list_schemas", "macro_sql": "{% macro postgres__list_schemas(database) %}\n {% if database -%}\n {{ adapter.verify_database(database) }}\n {%- endif -%}\n {% call statement('list_schemas', fetch_result=True, auto_begin=False) %}\n select distinct nspname from pg_namespace\n {% endcall %}\n {{ return(load_result('list_schemas').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.289124, "supported_languages": null}, "macro.dbt_postgres.postgres__check_schema_exists": {"unique_id": "macro.dbt_postgres.postgres__check_schema_exists", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__check_schema_exists", "macro_sql": "{% macro postgres__check_schema_exists(information_schema, schema) -%}\n {% if information_schema.database -%}\n {{ adapter.verify_database(information_schema.database) }}\n {%- endif -%}\n {% call statement('check_schema_exists', fetch_result=True, auto_begin=False) %}\n select count(*) from pg_namespace where nspname = '{{ schema }}'\n {% endcall %}\n {{ return(load_result('check_schema_exists').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.290139, "supported_languages": null}, "macro.dbt_postgres.postgres__make_relation_with_suffix": {"unique_id": "macro.dbt_postgres.postgres__make_relation_with_suffix", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__make_relation_with_suffix", "macro_sql": "{% macro postgres__make_relation_with_suffix(base_relation, suffix, dstring) %}\n {% if dstring %}\n {% set dt = modules.datetime.datetime.now() %}\n {% set dtstring = dt.strftime(\"%H%M%S%f\") %}\n {% set suffix = suffix ~ dtstring %}\n {% endif %}\n {% set suffix_length = suffix|length %}\n {% set relation_max_name_length = base_relation.relation_max_name_length() %}\n {% if suffix_length > relation_max_name_length %}\n {% do exceptions.raise_compiler_error('Relation suffix is too long (' ~ suffix_length ~ ' characters). Maximum length is ' ~ relation_max_name_length ~ ' characters.') %}\n {% endif %}\n {% set identifier = base_relation.identifier[:relation_max_name_length - suffix_length] ~ suffix %}\n\n {{ return(base_relation.incorporate(path={\"identifier\": identifier })) }}\n\n {% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2923021, "supported_languages": null}, "macro.dbt_postgres.postgres__make_intermediate_relation": {"unique_id": "macro.dbt_postgres.postgres__make_intermediate_relation", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__make_intermediate_relation", "macro_sql": "{% macro postgres__make_intermediate_relation(base_relation, suffix) %}\n {{ return(postgres__make_relation_with_suffix(base_relation, suffix, dstring=False)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.292798, "supported_languages": null}, "macro.dbt_postgres.postgres__make_temp_relation": {"unique_id": "macro.dbt_postgres.postgres__make_temp_relation", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__make_temp_relation", "macro_sql": "{% macro postgres__make_temp_relation(base_relation, suffix) %}\n {% set temp_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=True) %}\n {{ return(temp_relation.incorporate(path={\"schema\": none,\n \"database\": none})) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.293597, "supported_languages": null}, "macro.dbt_postgres.postgres__make_backup_relation": {"unique_id": "macro.dbt_postgres.postgres__make_backup_relation", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__make_backup_relation", "macro_sql": "{% macro postgres__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {% set backup_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=False) %}\n {{ return(backup_relation.incorporate(type=backup_relation_type)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.294295, "supported_languages": null}, "macro.dbt_postgres.postgres_escape_comment": {"unique_id": "macro.dbt_postgres.postgres_escape_comment", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres_escape_comment", "macro_sql": "{% macro postgres_escape_comment(comment) -%}\n {% if comment is not string %}\n {% do exceptions.raise_compiler_error('cannot escape a non-string: ' ~ comment) %}\n {% endif %}\n {%- set magic = '$dbt_comment_literal_block$' -%}\n {%- if magic in comment -%}\n {%- do exceptions.raise_compiler_error('The string ' ~ magic ~ ' is not allowed in comments.') -%}\n {%- endif -%}\n {{ magic }}{{ comment }}{{ magic }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2953768, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_relation_comment": {"unique_id": "macro.dbt_postgres.postgres__alter_relation_comment", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__alter_relation_comment", "macro_sql": "{% macro postgres__alter_relation_comment(relation, comment) %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on {{ relation.type }} {{ relation }} is {{ escaped_comment }};\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.296059, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_column_comment": {"unique_id": "macro.dbt_postgres.postgres__alter_column_comment", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__alter_column_comment", "macro_sql": "{% macro postgres__alter_column_comment(relation, column_dict) %}\n {% set existing_columns = adapter.get_columns_in_relation(relation) | map(attribute=\"name\") | list %}\n {% for column_name in column_dict if (column_name in existing_columns) %}\n {% set comment = column_dict[column_name]['description'] %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on column {{ relation }}.{{ adapter.quote(column_name) if column_dict[column_name]['quote'] else column_name }} is {{ escaped_comment }};\n {% endfor %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2975638, "supported_languages": null}, "macro.dbt_postgres.postgres__get_show_grant_sql": {"unique_id": "macro.dbt_postgres.postgres__get_show_grant_sql", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__get_show_grant_sql", "macro_sql": "\n\n{%- macro postgres__get_show_grant_sql(relation) -%}\n select grantee, privilege_type\n from {{ relation.information_schema('role_table_grants') }}\n where grantor = current_role\n and grantee != current_role\n and table_schema = '{{ relation.schema }}'\n and table_name = '{{ relation.identifier }}'\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2980769, "supported_languages": null}, "macro.dbt_postgres.postgres__copy_grants": {"unique_id": "macro.dbt_postgres.postgres__copy_grants", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__copy_grants", "macro_sql": "{% macro postgres__copy_grants() %}\n {{ return(False) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.298378, "supported_languages": null}, "macro.dbt_postgres.postgres__get_incremental_default_sql": {"unique_id": "macro.dbt_postgres.postgres__get_incremental_default_sql", "package_name": "dbt_postgres", "path": "macros/materializations/incremental_strategies.sql", "original_file_path": "macros/materializations/incremental_strategies.sql", "name": "postgres__get_incremental_default_sql", "macro_sql": "{% macro postgres__get_incremental_default_sql(arg_dict) %}\n\n {% if arg_dict[\"unique_key\"] %}\n {% do return(get_incremental_delete_insert_sql(arg_dict)) %}\n {% else %}\n {% do return(get_incremental_append_sql(arg_dict)) %}\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_incremental_delete_insert_sql", "macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.299883, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_merge_sql": {"unique_id": "macro.dbt_postgres.postgres__snapshot_merge_sql", "package_name": "dbt_postgres", "path": "macros/materializations/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshot_merge.sql", "name": "postgres__snapshot_merge_sql", "macro_sql": "{% macro postgres__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n update {{ target }}\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_scd_id::text = {{ target }}.dbt_scd_id::text\n and DBT_INTERNAL_SOURCE.dbt_change_type::text in ('update'::text, 'delete'::text)\n and {{ target }}.dbt_valid_to is null;\n\n insert into {{ target }} ({{ insert_cols_csv }})\n select {% for column in insert_cols -%}\n DBT_INTERNAL_SOURCE.{{ column }} {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_change_type::text = 'insert'::text;\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3019001, "supported_languages": null}, "macro.dbt_postgres.postgres__dateadd": {"unique_id": "macro.dbt_postgres.postgres__dateadd", "package_name": "dbt_postgres", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "name": "postgres__dateadd", "macro_sql": "{% macro postgres__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n {{ from_date_or_timestamp }} + ((interval '1 {{ datepart }}') * ({{ interval }}))\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.302811, "supported_languages": null}, "macro.dbt_postgres.postgres__listagg": {"unique_id": "macro.dbt_postgres.postgres__listagg", "package_name": "dbt_postgres", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "name": "postgres__listagg", "macro_sql": "{% macro postgres__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n (array_agg(\n {{ measure }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n ))[1:{{ limit_num }}],\n {{ delimiter_text }}\n )\n {%- else %}\n string_agg(\n {{ measure }},\n {{ delimiter_text }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n )\n {%- endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.304598, "supported_languages": null}, "macro.dbt_postgres.postgres__datediff": {"unique_id": "macro.dbt_postgres.postgres__datediff", "package_name": "dbt_postgres", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "name": "postgres__datediff", "macro_sql": "{% macro postgres__datediff(first_date, second_date, datepart) -%}\n\n {% if datepart == 'year' %}\n (date_part('year', ({{second_date}})::date) - date_part('year', ({{first_date}})::date))\n {% elif datepart == 'quarter' %}\n ({{ datediff(first_date, second_date, 'year') }} * 4 + date_part('quarter', ({{second_date}})::date) - date_part('quarter', ({{first_date}})::date))\n {% elif datepart == 'month' %}\n ({{ datediff(first_date, second_date, 'year') }} * 12 + date_part('month', ({{second_date}})::date) - date_part('month', ({{first_date}})::date))\n {% elif datepart == 'day' %}\n (({{second_date}})::date - ({{first_date}})::date)\n {% elif datepart == 'week' %}\n ({{ datediff(first_date, second_date, 'day') }} / 7 + case\n when date_part('dow', ({{first_date}})::timestamp) <= date_part('dow', ({{second_date}})::timestamp) then\n case when {{first_date}} <= {{second_date}} then 0 else -1 end\n else\n case when {{first_date}} <= {{second_date}} then 1 else 0 end\n end)\n {% elif datepart == 'hour' %}\n ({{ datediff(first_date, second_date, 'day') }} * 24 + date_part('hour', ({{second_date}})::timestamp) - date_part('hour', ({{first_date}})::timestamp))\n {% elif datepart == 'minute' %}\n ({{ datediff(first_date, second_date, 'hour') }} * 60 + date_part('minute', ({{second_date}})::timestamp) - date_part('minute', ({{first_date}})::timestamp))\n {% elif datepart == 'second' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60 + floor(date_part('second', ({{second_date}})::timestamp)) - floor(date_part('second', ({{first_date}})::timestamp)))\n {% elif datepart == 'millisecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000 + floor(date_part('millisecond', ({{second_date}})::timestamp)) - floor(date_part('millisecond', ({{first_date}})::timestamp)))\n {% elif datepart == 'microsecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000000 + floor(date_part('microsecond', ({{second_date}})::timestamp)) - floor(date_part('microsecond', ({{first_date}})::timestamp)))\n {% else %}\n {{ exceptions.raise_compiler_error(\"Unsupported datepart for macro datediff in postgres: {!r}\".format(datepart)) }}\n {% endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.31184, "supported_languages": null}, "macro.dbt_postgres.postgres__any_value": {"unique_id": "macro.dbt_postgres.postgres__any_value", "package_name": "dbt_postgres", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "name": "postgres__any_value", "macro_sql": "{% macro postgres__any_value(expression) -%}\n\n min({{ expression }})\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.312644, "supported_languages": null}, "macro.dbt_postgres.postgres__last_day": {"unique_id": "macro.dbt_postgres.postgres__last_day", "package_name": "dbt_postgres", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "name": "postgres__last_day", "macro_sql": "{% macro postgres__last_day(date, datepart) -%}\n\n {%- if datepart == 'quarter' -%}\n -- postgres dateadd does not support quarter interval.\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd('month', '3', dbt.date_trunc(datepart, date))\n )}}\n as date)\n {%- else -%}\n {{dbt.default_last_day(date, datepart)}}\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc", "macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.314328, "supported_languages": null}, "macro.dbt_postgres.postgres__split_part": {"unique_id": "macro.dbt_postgres.postgres__split_part", "package_name": "dbt_postgres", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "name": "postgres__split_part", "macro_sql": "{% macro postgres__split_part(string_text, delimiter_text, part_number) %}\n\n {% if part_number >= 0 %}\n {{ dbt.default__split_part(string_text, delimiter_text, part_number) }}\n {% else %}\n {{ dbt._split_part_negative(string_text, delimiter_text, part_number) }}\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__split_part", "macro.dbt._split_part_negative"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3157928, "supported_languages": null}, "macro.dbt.run_hooks": {"unique_id": "macro.dbt.run_hooks", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "run_hooks", "macro_sql": "{% macro run_hooks(hooks, inside_transaction=True) %}\n {% for hook in hooks | selectattr('transaction', 'equalto', inside_transaction) %}\n {% if not inside_transaction and loop.first %}\n {% call statement(auto_begin=inside_transaction) %}\n commit;\n {% endcall %}\n {% endif %}\n {% set rendered = render(hook.get('sql')) | trim %}\n {% if (rendered | length) > 0 %}\n {% call statement(auto_begin=inside_transaction) %}\n {{ rendered }}\n {% endcall %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.318797, "supported_languages": null}, "macro.dbt.make_hook_config": {"unique_id": "macro.dbt.make_hook_config", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "make_hook_config", "macro_sql": "{% macro make_hook_config(sql, inside_transaction) %}\n {{ tojson({\"sql\": sql, \"transaction\": inside_transaction}) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.319341, "supported_languages": null}, "macro.dbt.before_begin": {"unique_id": "macro.dbt.before_begin", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "before_begin", "macro_sql": "{% macro before_begin(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.319724, "supported_languages": null}, "macro.dbt.in_transaction": {"unique_id": "macro.dbt.in_transaction", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "in_transaction", "macro_sql": "{% macro in_transaction(sql) %}\n {{ make_hook_config(sql, inside_transaction=True) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.320093, "supported_languages": null}, "macro.dbt.after_commit": {"unique_id": "macro.dbt.after_commit", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "after_commit", "macro_sql": "{% macro after_commit(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.320464, "supported_languages": null}, "macro.dbt.set_sql_header": {"unique_id": "macro.dbt.set_sql_header", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "name": "set_sql_header", "macro_sql": "{% macro set_sql_header(config) -%}\n {{ config.set('sql_header', caller()) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.321773, "supported_languages": null}, "macro.dbt.should_full_refresh": {"unique_id": "macro.dbt.should_full_refresh", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "name": "should_full_refresh", "macro_sql": "{% macro should_full_refresh() %}\n {% set config_full_refresh = config.get('full_refresh') %}\n {% if config_full_refresh is none %}\n {% set config_full_refresh = flags.FULL_REFRESH %}\n {% endif %}\n {% do return(config_full_refresh) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.322558, "supported_languages": null}, "macro.dbt.should_store_failures": {"unique_id": "macro.dbt.should_store_failures", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "name": "should_store_failures", "macro_sql": "{% macro should_store_failures() %}\n {% set config_store_failures = config.get('store_failures') %}\n {% if config_store_failures is none %}\n {% set config_store_failures = flags.STORE_FAILURES %}\n {% endif %}\n {% do return(config_store_failures) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.323475, "supported_languages": null}, "macro.dbt.snapshot_merge_sql": {"unique_id": "macro.dbt.snapshot_merge_sql", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "name": "snapshot_merge_sql", "macro_sql": "{% macro snapshot_merge_sql(target, source, insert_cols) -%}\n {{ adapter.dispatch('snapshot_merge_sql', 'dbt')(target, source, insert_cols) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.324824, "supported_languages": null}, "macro.dbt.default__snapshot_merge_sql": {"unique_id": "macro.dbt.default__snapshot_merge_sql", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "name": "default__snapshot_merge_sql", "macro_sql": "{% macro default__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on DBT_INTERNAL_SOURCE.dbt_scd_id = DBT_INTERNAL_DEST.dbt_scd_id\n\n when matched\n and DBT_INTERNAL_DEST.dbt_valid_to is null\n and DBT_INTERNAL_SOURCE.dbt_change_type in ('update', 'delete')\n then update\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n\n when not matched\n and DBT_INTERNAL_SOURCE.dbt_change_type = 'insert'\n then insert ({{ insert_cols_csv }})\n values ({{ insert_cols_csv }})\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3254972, "supported_languages": null}, "macro.dbt.strategy_dispatch": {"unique_id": "macro.dbt.strategy_dispatch", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "strategy_dispatch", "macro_sql": "{% macro strategy_dispatch(name) -%}\n{% set original_name = name %}\n {% if '.' in name %}\n {% set package_name, name = name.split(\".\", 1) %}\n {% else %}\n {% set package_name = none %}\n {% endif %}\n\n {% if package_name is none %}\n {% set package_context = context %}\n {% elif package_name in context %}\n {% set package_context = context[package_name] %}\n {% else %}\n {% set error_msg %}\n Could not find package '{{package_name}}', called with '{{original_name}}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n\n {%- set search_name = 'snapshot_' ~ name ~ '_strategy' -%}\n\n {% if search_name not in package_context %}\n {% set error_msg %}\n The specified strategy macro '{{name}}' was not found in package '{{ package_name }}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n {{ return(package_context[search_name]) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3332338, "supported_languages": null}, "macro.dbt.snapshot_hash_arguments": {"unique_id": "macro.dbt.snapshot_hash_arguments", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_hash_arguments", "macro_sql": "{% macro snapshot_hash_arguments(args) -%}\n {{ adapter.dispatch('snapshot_hash_arguments', 'dbt')(args) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.333798, "supported_languages": null}, "macro.dbt.default__snapshot_hash_arguments": {"unique_id": "macro.dbt.default__snapshot_hash_arguments", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "default__snapshot_hash_arguments", "macro_sql": "{% macro default__snapshot_hash_arguments(args) -%}\n md5({%- for arg in args -%}\n coalesce(cast({{ arg }} as varchar ), '')\n {% if not loop.last %} || '|' || {% endif %}\n {%- endfor -%})\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3343842, "supported_languages": null}, "macro.dbt.snapshot_timestamp_strategy": {"unique_id": "macro.dbt.snapshot_timestamp_strategy", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_timestamp_strategy", "macro_sql": "{% macro snapshot_timestamp_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set primary_key = config['unique_key'] %}\n {% set updated_at = config['updated_at'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n\n {#/*\n The snapshot relation might not have an {{ updated_at }} value if the\n snapshot strategy is changed from `check` to `timestamp`. We\n should use a dbt-created column for the comparison in the snapshot\n table instead of assuming that the user-supplied {{ updated_at }}\n will be present in the historical data.\n\n See https://github.com/dbt-labs/dbt-core/issues/2350\n */ #}\n {% set row_changed_expr -%}\n ({{ snapshotted_rel }}.dbt_valid_from < {{ current_rel }}.{{ updated_at }})\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.336249, "supported_languages": null}, "macro.dbt.snapshot_string_as_time": {"unique_id": "macro.dbt.snapshot_string_as_time", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_string_as_time", "macro_sql": "{% macro snapshot_string_as_time(timestamp) -%}\n {{ adapter.dispatch('snapshot_string_as_time', 'dbt')(timestamp) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_string_as_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.336672, "supported_languages": null}, "macro.dbt.default__snapshot_string_as_time": {"unique_id": "macro.dbt.default__snapshot_string_as_time", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "default__snapshot_string_as_time", "macro_sql": "{% macro default__snapshot_string_as_time(timestamp) %}\n {% do exceptions.raise_not_implemented(\n 'snapshot_string_as_time macro not implemented for adapter '+adapter.type()\n ) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.337124, "supported_languages": null}, "macro.dbt.snapshot_check_all_get_existing_columns": {"unique_id": "macro.dbt.snapshot_check_all_get_existing_columns", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_check_all_get_existing_columns", "macro_sql": "{% macro snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) -%}\n {%- if not target_exists -%}\n {#-- no table yet -> return whatever the query does --#}\n {{ return((false, query_columns)) }}\n {%- endif -%}\n\n {#-- handle any schema changes --#}\n {%- set target_relation = adapter.get_relation(database=node.database, schema=node.schema, identifier=node.alias) -%}\n\n {% if check_cols_config == 'all' %}\n {%- set query_columns = get_columns_in_query(node['compiled_code']) -%}\n\n {% elif check_cols_config is iterable and (check_cols_config | length) > 0 %}\n {#-- query for proper casing/quoting, to support comparison below --#}\n {%- set select_check_cols_from_target -%}\n select {{ check_cols_config | join(', ') }} from ({{ node['compiled_code'] }}) subq\n {%- endset -%}\n {% set query_columns = get_columns_in_query(select_check_cols_from_target) %}\n\n {% else %}\n {% do exceptions.raise_compiler_error(\"Invalid value for 'check_cols': \" ~ check_cols_config) %}\n {% endif %}\n\n {%- set existing_cols = adapter.get_columns_in_relation(target_relation) | map(attribute = 'name') | list -%}\n {%- set ns = namespace() -%} {#-- handle for-loop scoping with a namespace --#}\n {%- set ns.column_added = false -%}\n\n {%- set intersection = [] -%}\n {%- for col in query_columns -%}\n {%- if col in existing_cols -%}\n {%- do intersection.append(adapter.quote(col)) -%}\n {%- else -%}\n {% set ns.column_added = true %}\n {%- endif -%}\n {%- endfor -%}\n {{ return((ns.column_added, intersection)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.340634, "supported_languages": null}, "macro.dbt.snapshot_check_strategy": {"unique_id": "macro.dbt.snapshot_check_strategy", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_check_strategy", "macro_sql": "{% macro snapshot_check_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set check_cols_config = config['check_cols'] %}\n {% set primary_key = config['unique_key'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n {% set updated_at = config.get('updated_at', snapshot_get_time()) %}\n\n {% set column_added = false %}\n\n {% set column_added, check_cols = snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) %}\n\n {%- set row_changed_expr -%}\n (\n {%- if column_added -%}\n {{ get_true_sql() }}\n {%- else -%}\n {%- for col in check_cols -%}\n {{ snapshotted_rel }}.{{ col }} != {{ current_rel }}.{{ col }}\n or\n (\n (({{ snapshotted_rel }}.{{ col }} is null) and not ({{ current_rel }}.{{ col }} is null))\n or\n ((not {{ snapshotted_rel }}.{{ col }} is null) and ({{ current_rel }}.{{ col }} is null))\n )\n {%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n {%- endif -%}\n )\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.snapshot_get_time", "macro.dbt.snapshot_check_all_get_existing_columns", "macro.dbt.get_true_sql", "macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.34407, "supported_languages": null}, "macro.dbt.create_columns": {"unique_id": "macro.dbt.create_columns", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "create_columns", "macro_sql": "{% macro create_columns(relation, columns) %}\n {{ adapter.dispatch('create_columns', 'dbt')(relation, columns) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__create_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.351994, "supported_languages": null}, "macro.dbt.default__create_columns": {"unique_id": "macro.dbt.default__create_columns", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__create_columns", "macro_sql": "{% macro default__create_columns(relation, columns) %}\n {% for column in columns %}\n {% call statement() %}\n alter table {{ relation }} add column \"{{ column.name }}\" {{ column.data_type }};\n {% endcall %}\n {% endfor %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.35276, "supported_languages": null}, "macro.dbt.post_snapshot": {"unique_id": "macro.dbt.post_snapshot", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "post_snapshot", "macro_sql": "{% macro post_snapshot(staging_relation) %}\n {{ adapter.dispatch('post_snapshot', 'dbt')(staging_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.353195, "supported_languages": null}, "macro.dbt.default__post_snapshot": {"unique_id": "macro.dbt.default__post_snapshot", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__post_snapshot", "macro_sql": "{% macro default__post_snapshot(staging_relation) %}\n {# no-op #}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.353436, "supported_languages": null}, "macro.dbt.get_true_sql": {"unique_id": "macro.dbt.get_true_sql", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "get_true_sql", "macro_sql": "{% macro get_true_sql() %}\n {{ adapter.dispatch('get_true_sql', 'dbt')() }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_true_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3538182, "supported_languages": null}, "macro.dbt.default__get_true_sql": {"unique_id": "macro.dbt.default__get_true_sql", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__get_true_sql", "macro_sql": "{% macro default__get_true_sql() %}\n {{ return('TRUE') }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3541272, "supported_languages": null}, "macro.dbt.snapshot_staging_table": {"unique_id": "macro.dbt.snapshot_staging_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "snapshot_staging_table", "macro_sql": "{% macro snapshot_staging_table(strategy, source_sql, target_relation) -%}\n {{ adapter.dispatch('snapshot_staging_table', 'dbt')(strategy, source_sql, target_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__snapshot_staging_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3546538, "supported_languages": null}, "macro.dbt.default__snapshot_staging_table": {"unique_id": "macro.dbt.default__snapshot_staging_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__snapshot_staging_table", "macro_sql": "{% macro default__snapshot_staging_table(strategy, source_sql, target_relation) -%}\n\n with snapshot_query as (\n\n {{ source_sql }}\n\n ),\n\n snapshotted_data as (\n\n select *,\n {{ strategy.unique_key }} as dbt_unique_key\n\n from {{ target_relation }}\n where dbt_valid_to is null\n\n ),\n\n insertions_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to,\n {{ strategy.scd_id }} as dbt_scd_id\n\n from snapshot_query\n ),\n\n updates_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n {{ strategy.updated_at }} as dbt_valid_to\n\n from snapshot_query\n ),\n\n {%- if strategy.invalidate_hard_deletes %}\n\n deletes_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key\n from snapshot_query\n ),\n {% endif %}\n\n insertions as (\n\n select\n 'insert' as dbt_change_type,\n source_data.*\n\n from insertions_source_data as source_data\n left outer join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where snapshotted_data.dbt_unique_key is null\n or (\n snapshotted_data.dbt_unique_key is not null\n and (\n {{ strategy.row_changed }}\n )\n )\n\n ),\n\n updates as (\n\n select\n 'update' as dbt_change_type,\n source_data.*,\n snapshotted_data.dbt_scd_id\n\n from updates_source_data as source_data\n join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where (\n {{ strategy.row_changed }}\n )\n )\n\n {%- if strategy.invalidate_hard_deletes -%}\n ,\n\n deletes as (\n\n select\n 'delete' as dbt_change_type,\n source_data.*,\n {{ snapshot_get_time() }} as dbt_valid_from,\n {{ snapshot_get_time() }} as dbt_updated_at,\n {{ snapshot_get_time() }} as dbt_valid_to,\n snapshotted_data.dbt_scd_id\n\n from snapshotted_data\n left join deletes_source_data as source_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where source_data.dbt_unique_key is null\n )\n {%- endif %}\n\n select * from insertions\n union all\n select * from updates\n {%- if strategy.invalidate_hard_deletes %}\n union all\n select * from deletes\n {%- endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.356801, "supported_languages": null}, "macro.dbt.build_snapshot_table": {"unique_id": "macro.dbt.build_snapshot_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "build_snapshot_table", "macro_sql": "{% macro build_snapshot_table(strategy, sql) -%}\n {{ adapter.dispatch('build_snapshot_table', 'dbt')(strategy, sql) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__build_snapshot_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.357286, "supported_languages": null}, "macro.dbt.default__build_snapshot_table": {"unique_id": "macro.dbt.default__build_snapshot_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__build_snapshot_table", "macro_sql": "{% macro default__build_snapshot_table(strategy, sql) %}\n\n select *,\n {{ strategy.scd_id }} as dbt_scd_id,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to\n from (\n {{ sql }}\n ) sbq\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.357929, "supported_languages": null}, "macro.dbt.build_snapshot_staging_table": {"unique_id": "macro.dbt.build_snapshot_staging_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "build_snapshot_staging_table", "macro_sql": "{% macro build_snapshot_staging_table(strategy, sql, target_relation) %}\n {% set temp_relation = make_temp_relation(target_relation) %}\n\n {% set select = snapshot_staging_table(strategy, sql, target_relation) %}\n\n {% call statement('build_snapshot_staging_relation') %}\n {{ create_table_as(True, temp_relation, select) }}\n {% endcall %}\n\n {% do return(temp_relation) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.make_temp_relation", "macro.dbt.snapshot_staging_table", "macro.dbt.statement", "macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.359015, "supported_languages": null}, "macro.dbt.materialization_snapshot_default": {"unique_id": "macro.dbt.materialization_snapshot_default", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot.sql", "original_file_path": "macros/materializations/snapshots/snapshot.sql", "name": "materialization_snapshot_default", "macro_sql": "{% materialization snapshot, default %}\n {%- set config = model['config'] -%}\n\n {%- set target_table = model.get('alias', model.get('name')) -%}\n\n {%- set strategy_name = config.get('strategy') -%}\n {%- set unique_key = config.get('unique_key') %}\n -- grab current tables grants config for comparision later on\n {%- set grant_config = config.get('grants') -%}\n\n {% set target_relation_exists, target_relation = get_or_create_relation(\n database=model.database,\n schema=model.schema,\n identifier=target_table,\n type='table') -%}\n\n {%- if not target_relation.is_table -%}\n {% do exceptions.relation_wrong_type(target_relation, 'table') %}\n {%- endif -%}\n\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set strategy_macro = strategy_dispatch(strategy_name) %}\n {% set strategy = strategy_macro(model, \"snapshotted_data\", \"source_data\", config, target_relation_exists) %}\n\n {% if not target_relation_exists %}\n\n {% set build_sql = build_snapshot_table(strategy, model['compiled_code']) %}\n {% set final_sql = create_table_as(False, target_relation, build_sql) %}\n\n {% else %}\n\n {{ adapter.valid_snapshot_target(target_relation) }}\n\n {% set staging_table = build_snapshot_staging_table(strategy, sql, target_relation) %}\n\n -- this may no-op if the database does not require column expansion\n {% do adapter.expand_target_column_types(from_relation=staging_table,\n to_relation=target_relation) %}\n\n {% set missing_columns = adapter.get_missing_columns(staging_table, target_relation)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% do create_columns(target_relation, missing_columns) %}\n\n {% set source_columns = adapter.get_columns_in_relation(staging_table)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% set quoted_source_columns = [] %}\n {% for column in source_columns %}\n {% do quoted_source_columns.append(adapter.quote(column.name)) %}\n {% endfor %}\n\n {% set final_sql = snapshot_merge_sql(\n target = target_relation,\n source = staging_table,\n insert_cols = quoted_source_columns\n )\n %}\n\n {% endif %}\n\n {% call statement('main') %}\n {{ final_sql }}\n {% endcall %}\n\n {% set should_revoke = should_revoke(target_relation_exists, full_refresh_mode=False) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if not target_relation_exists %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {% if staging_table is defined %}\n {% do post_snapshot(staging_table) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_or_create_relation", "macro.dbt.run_hooks", "macro.dbt.strategy_dispatch", "macro.dbt.build_snapshot_table", "macro.dbt.create_table_as", "macro.dbt.build_snapshot_staging_table", "macro.dbt.create_columns", "macro.dbt.snapshot_merge_sql", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes", "macro.dbt.post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.37277, "supported_languages": ["sql"]}, "macro.dbt.materialization_test_default": {"unique_id": "macro.dbt.materialization_test_default", "package_name": "dbt", "path": "macros/materializations/tests/test.sql", "original_file_path": "macros/materializations/tests/test.sql", "name": "materialization_test_default", "macro_sql": "{%- materialization test, default -%}\n\n {% set relations = [] %}\n\n {% if should_store_failures() %}\n\n {% set identifier = model['alias'] %}\n {% set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n {% set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database, type='table') -%} %}\n\n {% if old_relation %}\n {% do adapter.drop_relation(old_relation) %}\n {% endif %}\n\n {% call statement(auto_begin=True) %}\n {{ create_table_as(False, target_relation, sql) }}\n {% endcall %}\n\n {% do relations.append(target_relation) %}\n\n {% set main_sql %}\n select *\n from {{ target_relation }}\n {% endset %}\n\n {{ adapter.commit() }}\n\n {% else %}\n\n {% set main_sql = sql %}\n\n {% endif %}\n\n {% set limit = config.get('limit') %}\n {% set fail_calc = config.get('fail_calc') %}\n {% set warn_if = config.get('warn_if') %}\n {% set error_if = config.get('error_if') %}\n\n {% call statement('main', fetch_result=True) -%}\n\n {{ get_test_sql(main_sql, fail_calc, warn_if, error_if, limit)}}\n\n {%- endcall %}\n\n {{ return({'relations': relations}) }}\n\n{%- endmaterialization -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.should_store_failures", "macro.dbt.statement", "macro.dbt.create_table_as", "macro.dbt.get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3780842, "supported_languages": ["sql"]}, "macro.dbt.get_test_sql": {"unique_id": "macro.dbt.get_test_sql", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "name": "get_test_sql", "macro_sql": "{% macro get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n {{ adapter.dispatch('get_test_sql', 'dbt')(main_sql, fail_calc, warn_if, error_if, limit) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.379586, "supported_languages": null}, "macro.dbt.default__get_test_sql": {"unique_id": "macro.dbt.default__get_test_sql", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "name": "default__get_test_sql", "macro_sql": "{% macro default__get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n select\n {{ fail_calc }} as failures,\n {{ fail_calc }} {{ warn_if }} as should_warn,\n {{ fail_calc }} {{ error_if }} as should_error\n from (\n {{ main_sql }}\n {{ \"limit \" ~ limit if limit != none }}\n ) dbt_internal_test\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3803918, "supported_languages": null}, "macro.dbt.get_where_subquery": {"unique_id": "macro.dbt.get_where_subquery", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "name": "get_where_subquery", "macro_sql": "{% macro get_where_subquery(relation) -%}\n {% do return(adapter.dispatch('get_where_subquery', 'dbt')(relation)) %}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_where_subquery"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.381695, "supported_languages": null}, "macro.dbt.default__get_where_subquery": {"unique_id": "macro.dbt.default__get_where_subquery", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "name": "default__get_where_subquery", "macro_sql": "{% macro default__get_where_subquery(relation) -%}\n {% set where = config.get('where', '') %}\n {% if where %}\n {%- set filtered -%}\n (select * from {{ relation }} where {{ where }}) dbt_subquery\n {%- endset -%}\n {% do return(filtered) %}\n {%- else -%}\n {% do return(relation) %}\n {%- endif -%}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3826728, "supported_languages": null}, "macro.dbt.get_quoted_csv": {"unique_id": "macro.dbt.get_quoted_csv", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "get_quoted_csv", "macro_sql": "{% macro get_quoted_csv(column_names) %}\n\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote(col)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.386467, "supported_languages": null}, "macro.dbt.diff_columns": {"unique_id": "macro.dbt.diff_columns", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "diff_columns", "macro_sql": "{% macro diff_columns(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% set source_names = source_columns | map(attribute = 'column') | list %}\n {% set target_names = target_columns | map(attribute = 'column') | list %}\n\n {# --check whether the name attribute exists in the target - this does not perform a data type check #}\n {% for sc in source_columns %}\n {% if sc.name not in target_names %}\n {{ result.append(sc) }}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.387875, "supported_languages": null}, "macro.dbt.diff_column_data_types": {"unique_id": "macro.dbt.diff_column_data_types", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "diff_column_data_types", "macro_sql": "{% macro diff_column_data_types(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% for sc in source_columns %}\n {% set tc = target_columns | selectattr(\"name\", \"equalto\", sc.name) | list | first %}\n {% if tc %}\n {% if sc.data_type != tc.data_type and not sc.can_expand_to(other_column=tc) %}\n {{ result.append( { 'column_name': tc.name, 'new_type': sc.data_type } ) }}\n {% endif %}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.389664, "supported_languages": null}, "macro.dbt.get_merge_update_columns": {"unique_id": "macro.dbt.get_merge_update_columns", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "get_merge_update_columns", "macro_sql": "{% macro get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {{ return(adapter.dispatch('get_merge_update_columns', 'dbt')(merge_update_columns, merge_exclude_columns, dest_columns)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.390273, "supported_languages": null}, "macro.dbt.default__get_merge_update_columns": {"unique_id": "macro.dbt.default__get_merge_update_columns", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "default__get_merge_update_columns", "macro_sql": "{% macro default__get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {%- set default_cols = dest_columns | map(attribute=\"quoted\") | list -%}\n\n {%- if merge_update_columns and merge_exclude_columns -%}\n {{ exceptions.raise_compiler_error(\n 'Model cannot specify merge_update_columns and merge_exclude_columns. Please update model to use only one config'\n )}}\n {%- elif merge_update_columns -%}\n {%- set update_columns = merge_update_columns -%}\n {%- elif merge_exclude_columns -%}\n {%- set update_columns = [] -%}\n {%- for column in dest_columns -%}\n {% if column.column | lower not in merge_exclude_columns | map(\"lower\") | list %}\n {%- do update_columns.append(column.quoted) -%}\n {% endif %}\n {%- endfor -%}\n {%- else -%}\n {%- set update_columns = default_cols -%}\n {%- endif -%}\n\n {{ return(update_columns) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.392107, "supported_languages": null}, "macro.dbt.get_merge_sql": {"unique_id": "macro.dbt.get_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "get_merge_sql", "macro_sql": "{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%}\n {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.400875, "supported_languages": null}, "macro.dbt.default__get_merge_sql": {"unique_id": "macro.dbt.default__get_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "default__get_merge_sql", "macro_sql": "{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%}\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set merge_update_columns = config.get('merge_update_columns') -%}\n {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%}\n {%- set update_columns = get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not mapping and unique_key is not string %}\n {% for key in unique_key %}\n {% set this_key_match %}\n DBT_INTERNAL_SOURCE.{{ key }} = DBT_INTERNAL_DEST.{{ key }}\n {% endset %}\n {% do predicates.append(this_key_match) %}\n {% endfor %}\n {% else %}\n {% set unique_key_match %}\n DBT_INTERNAL_SOURCE.{{ unique_key }} = DBT_INTERNAL_DEST.{{ unique_key }}\n {% endset %}\n {% do predicates.append(unique_key_match) %}\n {% endif %}\n {% else %}\n {% do predicates.append('FALSE') %}\n {% endif %}\n\n {{ sql_header if sql_header is not none }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on {{ predicates | join(' and ') }}\n\n {% if unique_key %}\n when matched then update set\n {% for column_name in update_columns -%}\n {{ column_name }} = DBT_INTERNAL_SOURCE.{{ column_name }}\n {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n {% endif %}\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_quoted_csv", "macro.dbt.get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4049711, "supported_languages": null}, "macro.dbt.get_delete_insert_merge_sql": {"unique_id": "macro.dbt.get_delete_insert_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "get_delete_insert_merge_sql", "macro_sql": "{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4055731, "supported_languages": null}, "macro.dbt.default__get_delete_insert_merge_sql": {"unique_id": "macro.dbt.default__get_delete_insert_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "default__get_delete_insert_merge_sql", "macro_sql": "{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not string %}\n delete from {{target }}\n using {{ source }}\n where (\n {% for key in unique_key %}\n {{ source }}.{{ key }} = {{ target }}.{{ key }}\n {{ \"and \" if not loop.last }}\n {% endfor %}\n );\n {% else %}\n delete from {{ target }}\n where (\n {{ unique_key }}) in (\n select ({{ unique_key }})\n from {{ source }}\n );\n\n {% endif %}\n {% endif %}\n\n insert into {{ target }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ source }}\n )\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.407451, "supported_languages": null}, "macro.dbt.get_insert_overwrite_merge_sql": {"unique_id": "macro.dbt.get_insert_overwrite_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "get_insert_overwrite_merge_sql", "macro_sql": "{% macro get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header=false) -%}\n {{ adapter.dispatch('get_insert_overwrite_merge_sql', 'dbt')(target, source, dest_columns, predicates, include_sql_header) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.408113, "supported_languages": null}, "macro.dbt.default__get_insert_overwrite_merge_sql": {"unique_id": "macro.dbt.default__get_insert_overwrite_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "default__get_insert_overwrite_merge_sql", "macro_sql": "{% macro default__get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header) -%}\n {#-- The only time include_sql_header is True: --#}\n {#-- BigQuery + insert_overwrite strategy + \"static\" partitions config --#}\n {#-- We should consider including the sql header at the materialization level instead --#}\n\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none and include_sql_header }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on FALSE\n\n when not matched by source\n {% if predicates %} and {{ predicates | join(' and ') }} {% endif %}\n then delete\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4097211, "supported_languages": null}, "macro.dbt.is_incremental": {"unique_id": "macro.dbt.is_incremental", "package_name": "dbt", "path": "macros/materializations/models/incremental/is_incremental.sql", "original_file_path": "macros/materializations/models/incremental/is_incremental.sql", "name": "is_incremental", "macro_sql": "{% macro is_incremental() %}\n {#-- do not run introspective queries in parsing #}\n {% if not execute %}\n {{ return(False) }}\n {% else %}\n {% set relation = adapter.get_relation(this.database, this.schema, this.table) %}\n {{ return(relation is not none\n and relation.type == 'table'\n and model.config.materialized == 'incremental'\n and not should_full_refresh()) }}\n {% endif %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.should_full_refresh"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.411711, "supported_languages": null}, "macro.dbt.get_incremental_append_sql": {"unique_id": "macro.dbt.get_incremental_append_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_append_sql", "macro_sql": "{% macro get_incremental_append_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_append_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.413886, "supported_languages": null}, "macro.dbt.default__get_incremental_append_sql": {"unique_id": "macro.dbt.default__get_incremental_append_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_append_sql", "macro_sql": "{% macro default__get_incremental_append_sql(arg_dict) %}\n\n {% do return(get_insert_into_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_insert_into_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.414494, "supported_languages": null}, "macro.dbt.get_incremental_delete_insert_sql": {"unique_id": "macro.dbt.get_incremental_delete_insert_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_delete_insert_sql", "macro_sql": "{% macro get_incremental_delete_insert_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_delete_insert_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_incremental_delete_insert_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.414966, "supported_languages": null}, "macro.dbt.default__get_incremental_delete_insert_sql": {"unique_id": "macro.dbt.default__get_incremental_delete_insert_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_delete_insert_sql", "macro_sql": "{% macro default__get_incremental_delete_insert_sql(arg_dict) %}\n\n {% do return(get_delete_insert_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4156349, "supported_languages": null}, "macro.dbt.get_incremental_merge_sql": {"unique_id": "macro.dbt.get_incremental_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_merge_sql", "macro_sql": "{% macro get_incremental_merge_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_merge_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_incremental_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.416111, "supported_languages": null}, "macro.dbt.default__get_incremental_merge_sql": {"unique_id": "macro.dbt.default__get_incremental_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_merge_sql", "macro_sql": "{% macro default__get_incremental_merge_sql(arg_dict) %}\n\n {% do return(get_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.416785, "supported_languages": null}, "macro.dbt.get_incremental_insert_overwrite_sql": {"unique_id": "macro.dbt.get_incremental_insert_overwrite_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_insert_overwrite_sql", "macro_sql": "{% macro get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_insert_overwrite_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_incremental_insert_overwrite_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4172869, "supported_languages": null}, "macro.dbt.default__get_incremental_insert_overwrite_sql": {"unique_id": "macro.dbt.default__get_incremental_insert_overwrite_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_insert_overwrite_sql", "macro_sql": "{% macro default__get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {% do return(get_insert_overwrite_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"], arg_dict[\"predicates\"])) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.418015, "supported_languages": null}, "macro.dbt.get_incremental_default_sql": {"unique_id": "macro.dbt.get_incremental_default_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_default_sql", "macro_sql": "{% macro get_incremental_default_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_default_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_incremental_default_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.418626, "supported_languages": null}, "macro.dbt.default__get_incremental_default_sql": {"unique_id": "macro.dbt.default__get_incremental_default_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_default_sql", "macro_sql": "{% macro default__get_incremental_default_sql(arg_dict) %}\n\n {% do return(get_incremental_append_sql(arg_dict)) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.419035, "supported_languages": null}, "macro.dbt.get_insert_into_sql": {"unique_id": "macro.dbt.get_insert_into_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_insert_into_sql", "macro_sql": "{% macro get_insert_into_sql(target_relation, temp_relation, dest_columns) %}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n insert into {{ target_relation }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ temp_relation }}\n )\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.419761, "supported_languages": null}, "macro.dbt.materialization_incremental_default": {"unique_id": "macro.dbt.materialization_incremental_default", "package_name": "dbt", "path": "macros/materializations/models/incremental/incremental.sql", "original_file_path": "macros/materializations/models/incremental/incremental.sql", "name": "materialization_incremental_default", "macro_sql": "{% materialization incremental, default -%}\n\n -- relations\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') -%}\n {%- set temp_relation = make_temp_relation(target_relation)-%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation)-%}\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n\n -- configs\n {%- set unique_key = config.get('unique_key') -%}\n {%- set full_refresh_mode = (should_full_refresh() or existing_relation.is_view) -%}\n {%- set on_schema_change = incremental_validate_on_schema_change(config.get('on_schema_change'), default='ignore') -%}\n\n -- the temp_ and backup_ relations should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation. This has to happen before\n -- BEGIN, in a separate transaction\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation)-%}\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set to_drop = [] %}\n\n {% if existing_relation is none %}\n {% set build_sql = get_create_table_as_sql(False, target_relation, sql) %}\n {% elif full_refresh_mode %}\n {% set build_sql = get_create_table_as_sql(False, intermediate_relation, sql) %}\n {% set need_swap = true %}\n {% else %}\n {% do run_query(get_create_table_as_sql(True, temp_relation, sql)) %}\n {% do adapter.expand_target_column_types(\n from_relation=temp_relation,\n to_relation=target_relation) %}\n {#-- Process schema changes. Returns dict of changes if successful. Use source columns for upserting/merging --#}\n {% set dest_columns = process_schema_changes(on_schema_change, temp_relation, existing_relation) %}\n {% if not dest_columns %}\n {% set dest_columns = adapter.get_columns_in_relation(existing_relation) %}\n {% endif %}\n\n {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#}\n {% set incremental_strategy = config.get('incremental_strategy') or 'default' %}\n {% set incremental_predicates = config.get('incremental_predicates', none) %}\n {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %}\n {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'predicates': incremental_predicates }) %}\n {% set build_sql = strategy_sql_macro_func(strategy_arg_dict) %}\n\n {% endif %}\n\n {% call statement(\"main\") %}\n {{ build_sql }}\n {% endcall %}\n\n {% if need_swap %}\n {% do adapter.rename_relation(target_relation, backup_relation) %}\n {% do adapter.rename_relation(intermediate_relation, target_relation) %}\n {% do to_drop.append(backup_relation) %}\n {% endif %}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if existing_relation is none or existing_relation.is_view or should_full_refresh() %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {% do adapter.commit() %}\n\n {% for rel in to_drop %}\n {% do adapter.drop_relation(rel) %}\n {% endfor %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_temp_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.should_full_refresh", "macro.dbt.incremental_validate_on_schema_change", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.get_create_table_as_sql", "macro.dbt.run_query", "macro.dbt.process_schema_changes", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.431493, "supported_languages": ["sql"]}, "macro.dbt.incremental_validate_on_schema_change": {"unique_id": "macro.dbt.incremental_validate_on_schema_change", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "name": "incremental_validate_on_schema_change", "macro_sql": "{% macro incremental_validate_on_schema_change(on_schema_change, default='ignore') %}\n\n {% if on_schema_change not in ['sync_all_columns', 'append_new_columns', 'fail', 'ignore'] %}\n\n {% set log_message = 'Invalid value for on_schema_change (%s) specified. Setting default value of %s.' % (on_schema_change, default) %}\n {% do log(log_message) %}\n\n {{ return(default) }}\n\n {% else %}\n\n {{ return(on_schema_change) }}\n\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.441895, "supported_languages": null}, "macro.dbt.check_for_schema_changes": {"unique_id": "macro.dbt.check_for_schema_changes", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "name": "check_for_schema_changes", "macro_sql": "{% macro check_for_schema_changes(source_relation, target_relation) %}\n\n {% set schema_changed = False %}\n\n {%- set source_columns = adapter.get_columns_in_relation(source_relation) -%}\n {%- set target_columns = adapter.get_columns_in_relation(target_relation) -%}\n {%- set source_not_in_target = diff_columns(source_columns, target_columns) -%}\n {%- set target_not_in_source = diff_columns(target_columns, source_columns) -%}\n\n {% set new_target_types = diff_column_data_types(source_columns, target_columns) %}\n\n {% if source_not_in_target != [] %}\n {% set schema_changed = True %}\n {% elif target_not_in_source != [] or new_target_types != [] %}\n {% set schema_changed = True %}\n {% elif new_target_types != [] %}\n {% set schema_changed = True %}\n {% endif %}\n\n {% set changes_dict = {\n 'schema_changed': schema_changed,\n 'source_not_in_target': source_not_in_target,\n 'target_not_in_source': target_not_in_source,\n 'source_columns': source_columns,\n 'target_columns': target_columns,\n 'new_target_types': new_target_types\n } %}\n\n {% set msg %}\n In {{ target_relation }}:\n Schema changed: {{ schema_changed }}\n Source columns not in target: {{ source_not_in_target }}\n Target columns not in source: {{ target_not_in_source }}\n New column types: {{ new_target_types }}\n {% endset %}\n\n {% do log(msg) %}\n\n {{ return(changes_dict) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.diff_columns", "macro.dbt.diff_column_data_types"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.444941, "supported_languages": null}, "macro.dbt.sync_column_schemas": {"unique_id": "macro.dbt.sync_column_schemas", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "name": "sync_column_schemas", "macro_sql": "{% macro sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {%- set add_to_target_arr = schema_changes_dict['source_not_in_target'] -%}\n\n {%- if on_schema_change == 'append_new_columns'-%}\n {%- if add_to_target_arr | length > 0 -%}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, none) -%}\n {%- endif -%}\n\n {% elif on_schema_change == 'sync_all_columns' %}\n {%- set remove_from_target_arr = schema_changes_dict['target_not_in_source'] -%}\n {%- set new_target_types = schema_changes_dict['new_target_types'] -%}\n\n {% if add_to_target_arr | length > 0 or remove_from_target_arr | length > 0 %}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, remove_from_target_arr) -%}\n {% endif %}\n\n {% if new_target_types != [] %}\n {% for ntt in new_target_types %}\n {% set column_name = ntt['column_name'] %}\n {% set new_type = ntt['new_type'] %}\n {% do alter_column_type(target_relation, column_name, new_type) %}\n {% endfor %}\n {% endif %}\n\n {% endif %}\n\n {% set schema_change_message %}\n In {{ target_relation }}:\n Schema change approach: {{ on_schema_change }}\n Columns added: {{ add_to_target_arr }}\n Columns removed: {{ remove_from_target_arr }}\n Data types changed: {{ new_target_types }}\n {% endset %}\n\n {% do log(schema_change_message) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.alter_relation_add_remove_columns", "macro.dbt.alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.447967, "supported_languages": null}, "macro.dbt.process_schema_changes": {"unique_id": "macro.dbt.process_schema_changes", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "name": "process_schema_changes", "macro_sql": "{% macro process_schema_changes(on_schema_change, source_relation, target_relation) %}\n\n {% if on_schema_change == 'ignore' %}\n\n {{ return({}) }}\n\n {% else %}\n\n {% set schema_changes_dict = check_for_schema_changes(source_relation, target_relation) %}\n\n {% if schema_changes_dict['schema_changed'] %}\n\n {% if on_schema_change == 'fail' %}\n\n {% set fail_msg %}\n The source and target schemas on this incremental model are out of sync!\n They can be reconciled in several ways:\n - set the `on_schema_change` config to either append_new_columns or sync_all_columns, depending on your situation.\n - Re-run the incremental model with `full_refresh: True` to update the target schema.\n - update the schema manually and re-run the process.\n\n Additional troubleshooting context:\n Source columns not in target: {{ schema_changes_dict['source_not_in_target'] }}\n Target columns not in source: {{ schema_changes_dict['target_not_in_source'] }}\n New column types: {{ schema_changes_dict['new_target_types'] }}\n {% endset %}\n\n {% do exceptions.raise_compiler_error(fail_msg) %}\n\n {# -- unless we ignore, run the sync operation per the config #}\n {% else %}\n\n {% do sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {% endif %}\n\n {% endif %}\n\n {{ return(schema_changes_dict['source_columns']) }}\n\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.check_for_schema_changes", "macro.dbt.sync_column_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4501061, "supported_languages": null}, "macro.dbt.materialization_table_default": {"unique_id": "macro.dbt.materialization_table_default", "package_name": "dbt", "path": "macros/materializations/models/table/table.sql", "original_file_path": "macros/materializations/models/table/table.sql", "name": "materialization_table_default", "macro_sql": "{% materialization table, default %}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') %}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n See ../view/view.sql for more information about this relation.\n */\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_table_as_sql(False, intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% do create_indexes(target_relation) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n -- finally, drop the existing/backup relation after the commit\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n{% endmaterialization %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.statement", "macro.dbt.get_create_table_as_sql", "macro.dbt.create_indexes", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.456702, "supported_languages": ["sql"]}, "macro.dbt.get_create_table_as_sql": {"unique_id": "macro.dbt.get_create_table_as_sql", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "name": "get_create_table_as_sql", "macro_sql": "{% macro get_create_table_as_sql(temporary, relation, sql) -%}\n {{ adapter.dispatch('get_create_table_as_sql', 'dbt')(temporary, relation, sql) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_create_table_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.458294, "supported_languages": null}, "macro.dbt.default__get_create_table_as_sql": {"unique_id": "macro.dbt.default__get_create_table_as_sql", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "name": "default__get_create_table_as_sql", "macro_sql": "{% macro default__get_create_table_as_sql(temporary, relation, sql) -%}\n {{ return(create_table_as(temporary, relation, sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.458766, "supported_languages": null}, "macro.dbt.create_table_as": {"unique_id": "macro.dbt.create_table_as", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "name": "create_table_as", "macro_sql": "{% macro create_table_as(temporary, relation, compiled_code, language='sql') -%}\n {# backward compatibility for create_table_as that does not support language #}\n {% if language == \"sql\" %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code)}}\n {% else %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code, language) }}\n {% endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.459863, "supported_languages": null}, "macro.dbt.default__create_table_as": {"unique_id": "macro.dbt.default__create_table_as", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "name": "default__create_table_as", "macro_sql": "{% macro default__create_table_as(temporary, relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary: -%}temporary{%- endif %} table\n {{ relation.include(database=(not temporary), schema=(not temporary)) }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4608908, "supported_languages": null}, "macro.dbt.materialization_view_default": {"unique_id": "macro.dbt.materialization_view_default", "package_name": "dbt", "path": "macros/materializations/models/view/view.sql", "original_file_path": "macros/materializations/models/view/view.sql", "name": "materialization_view_default", "macro_sql": "{%- materialization view, default -%}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='view') -%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n This relation (probably) doesn't exist yet. If it does exist, it's a leftover from\n a previous run, and we're going to try to drop it immediately. At the end of this\n materialization, we're going to rename the \"existing_relation\" to this identifier,\n and then we're going to drop it. In order to make sure we run the correct one of:\n - drop view ...\n - drop table ...\n\n We need to set the type of this relation to be the type of the existing_relation, if it exists,\n or else \"view\" as a sane default if it does not. Note that if the existing_relation does not\n exist, then there is nothing to move out of the way and subsequentally drop. In that case,\n this relation will be effectively unused.\n */\n {%- set backup_relation_type = 'view' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n -- move the existing view out of the way\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.run_hooks", "macro.dbt.drop_relation_if_exists", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4670231, "supported_languages": ["sql"]}, "macro.dbt.handle_existing_table": {"unique_id": "macro.dbt.handle_existing_table", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "name": "handle_existing_table", "macro_sql": "{% macro handle_existing_table(full_refresh, old_relation) %}\n {{ adapter.dispatch('handle_existing_table', 'dbt')(full_refresh, old_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__handle_existing_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.468151, "supported_languages": null}, "macro.dbt.default__handle_existing_table": {"unique_id": "macro.dbt.default__handle_existing_table", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "name": "default__handle_existing_table", "macro_sql": "{% macro default__handle_existing_table(full_refresh, old_relation) %}\n {{ log(\"Dropping relation \" ~ old_relation ~ \" because it is of type \" ~ old_relation.type) }}\n {{ adapter.drop_relation(old_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.468713, "supported_languages": null}, "macro.dbt.create_or_replace_view": {"unique_id": "macro.dbt.create_or_replace_view", "package_name": "dbt", "path": "macros/materializations/models/view/create_or_replace_view.sql", "original_file_path": "macros/materializations/models/view/create_or_replace_view.sql", "name": "create_or_replace_view", "macro_sql": "{% macro create_or_replace_view() %}\n {%- set identifier = model['alias'] -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database,\n type='view') -%}\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks) }}\n\n -- If there's a table with the same name and we weren't told to full refresh,\n -- that's an error. If we were told to full refresh, drop it. This behavior differs\n -- for Snowflake and BigQuery, so multiple dispatch is used.\n {%- if old_relation is not none and old_relation.is_table -%}\n {{ handle_existing_table(should_full_refresh(), old_relation) }}\n {%- endif -%}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(target_relation, sql) }}\n {%- endcall %}\n\n {% set should_revoke = should_revoke(exists_as_view, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=True) %}\n\n {{ run_hooks(post_hooks) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.run_hooks", "macro.dbt.handle_existing_table", "macro.dbt.should_full_refresh", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.472539, "supported_languages": null}, "macro.dbt.get_create_view_as_sql": {"unique_id": "macro.dbt.get_create_view_as_sql", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "name": "get_create_view_as_sql", "macro_sql": "{% macro get_create_view_as_sql(relation, sql) -%}\n {{ adapter.dispatch('get_create_view_as_sql', 'dbt')(relation, sql) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_create_view_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.473769, "supported_languages": null}, "macro.dbt.default__get_create_view_as_sql": {"unique_id": "macro.dbt.default__get_create_view_as_sql", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "name": "default__get_create_view_as_sql", "macro_sql": "{% macro default__get_create_view_as_sql(relation, sql) -%}\n {{ return(create_view_as(relation, sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.474185, "supported_languages": null}, "macro.dbt.create_view_as": {"unique_id": "macro.dbt.create_view_as", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "name": "create_view_as", "macro_sql": "{% macro create_view_as(relation, sql) -%}\n {{ adapter.dispatch('create_view_as', 'dbt')(relation, sql) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.474647, "supported_languages": null}, "macro.dbt.default__create_view_as": {"unique_id": "macro.dbt.default__create_view_as", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "name": "default__create_view_as", "macro_sql": "{% macro default__create_view_as(relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n create view {{ relation }} as (\n {{ sql }}\n );\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.475304, "supported_languages": null}, "macro.dbt.materialization_seed_default": {"unique_id": "macro.dbt.materialization_seed_default", "package_name": "dbt", "path": "macros/materializations/seeds/seed.sql", "original_file_path": "macros/materializations/seeds/seed.sql", "name": "materialization_seed_default", "macro_sql": "{% materialization seed, default %}\n\n {%- set identifier = model['alias'] -%}\n {%- set full_refresh_mode = (should_full_refresh()) -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n\n {%- set exists_as_table = (old_relation is not none and old_relation.is_table) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set grant_config = config.get('grants') -%}\n {%- set agate_table = load_agate_table() -%}\n -- grab current tables grants config for comparision later on\n\n {%- do store_result('agate_table', response='OK', agate_table=agate_table) -%}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% set create_table_sql = \"\" %}\n {% if exists_as_view %}\n {{ exceptions.raise_compiler_error(\"Cannot seed to '{}', it is a view\".format(old_relation)) }}\n {% elif exists_as_table %}\n {% set create_table_sql = reset_csv_table(model, full_refresh_mode, old_relation, agate_table) %}\n {% else %}\n {% set create_table_sql = create_csv_table(model, agate_table) %}\n {% endif %}\n\n {% set code = 'CREATE' if full_refresh_mode else 'INSERT' %}\n {% set rows_affected = (agate_table.rows | length) %}\n {% set sql = load_csv_rows(model, agate_table) %}\n\n {% call noop_statement('main', code ~ ' ' ~ rows_affected, code, rows_affected) %}\n {{ get_csv_sql(create_table_sql, sql) }};\n {% endcall %}\n\n {% set target_relation = this.incorporate(type='table') %}\n\n {% set should_revoke = should_revoke(old_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if full_refresh_mode or not exists_as_table %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.should_full_refresh", "macro.dbt.run_hooks", "macro.dbt.reset_csv_table", "macro.dbt.create_csv_table", "macro.dbt.load_csv_rows", "macro.dbt.noop_statement", "macro.dbt.get_csv_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.48314, "supported_languages": ["sql"]}, "macro.dbt.create_csv_table": {"unique_id": "macro.dbt.create_csv_table", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "create_csv_table", "macro_sql": "{% macro create_csv_table(model, agate_table) -%}\n {{ adapter.dispatch('create_csv_table', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.492415, "supported_languages": null}, "macro.dbt.default__create_csv_table": {"unique_id": "macro.dbt.default__create_csv_table", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__create_csv_table", "macro_sql": "{% macro default__create_csv_table(model, agate_table) %}\n {%- set column_override = model['config'].get('column_types', {}) -%}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n\n {% set sql %}\n create table {{ this.render() }} (\n {%- for col_name in agate_table.column_names -%}\n {%- set inferred_type = adapter.convert_type(agate_table, loop.index0) -%}\n {%- set type = column_override.get(col_name, inferred_type) -%}\n {%- set column_name = (col_name | string) -%}\n {{ adapter.quote_seed_column(column_name, quote_seed_column) }} {{ type }} {%- if not loop.last -%}, {%- endif -%}\n {%- endfor -%}\n )\n {% endset %}\n\n {% call statement('_') -%}\n {{ sql }}\n {%- endcall %}\n\n {{ return(sql) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.494709, "supported_languages": null}, "macro.dbt.reset_csv_table": {"unique_id": "macro.dbt.reset_csv_table", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "reset_csv_table", "macro_sql": "{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}\n {{ adapter.dispatch('reset_csv_table', 'dbt')(model, full_refresh, old_relation, agate_table) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__reset_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.495312, "supported_languages": null}, "macro.dbt.default__reset_csv_table": {"unique_id": "macro.dbt.default__reset_csv_table", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__reset_csv_table", "macro_sql": "{% macro default__reset_csv_table(model, full_refresh, old_relation, agate_table) %}\n {% set sql = \"\" %}\n {% if full_refresh %}\n {{ adapter.drop_relation(old_relation) }}\n {% set sql = create_csv_table(model, agate_table) %}\n {% else %}\n {{ adapter.truncate_relation(old_relation) }}\n {% set sql = \"truncate table \" ~ old_relation %}\n {% endif %}\n\n {{ return(sql) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.496661, "supported_languages": null}, "macro.dbt.get_csv_sql": {"unique_id": "macro.dbt.get_csv_sql", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "get_csv_sql", "macro_sql": "{% macro get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ adapter.dispatch('get_csv_sql', 'dbt')(create_or_truncate_sql, insert_sql) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_csv_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4971678, "supported_languages": null}, "macro.dbt.default__get_csv_sql": {"unique_id": "macro.dbt.default__get_csv_sql", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__get_csv_sql", "macro_sql": "{% macro default__get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ create_or_truncate_sql }};\n -- dbt seed --\n {{ insert_sql }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.497521, "supported_languages": null}, "macro.dbt.get_binding_char": {"unique_id": "macro.dbt.get_binding_char", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "get_binding_char", "macro_sql": "{% macro get_binding_char() -%}\n {{ adapter.dispatch('get_binding_char', 'dbt')() }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4978828, "supported_languages": null}, "macro.dbt.default__get_binding_char": {"unique_id": "macro.dbt.default__get_binding_char", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__get_binding_char", "macro_sql": "{% macro default__get_binding_char() %}\n {{ return('%s') }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.498185, "supported_languages": null}, "macro.dbt.get_batch_size": {"unique_id": "macro.dbt.get_batch_size", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "get_batch_size", "macro_sql": "{% macro get_batch_size() -%}\n {{ return(adapter.dispatch('get_batch_size', 'dbt')()) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_batch_size"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4986072, "supported_languages": null}, "macro.dbt.default__get_batch_size": {"unique_id": "macro.dbt.default__get_batch_size", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__get_batch_size", "macro_sql": "{% macro default__get_batch_size() %}\n {{ return(10000) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.498921, "supported_languages": null}, "macro.dbt.get_seed_column_quoted_csv": {"unique_id": "macro.dbt.get_seed_column_quoted_csv", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "get_seed_column_quoted_csv", "macro_sql": "{% macro get_seed_column_quoted_csv(model, column_names) %}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote_seed_column(col, quote_seed_column)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.500187, "supported_languages": null}, "macro.dbt.load_csv_rows": {"unique_id": "macro.dbt.load_csv_rows", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "load_csv_rows", "macro_sql": "{% macro load_csv_rows(model, agate_table) -%}\n {{ adapter.dispatch('load_csv_rows', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__load_csv_rows"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.500671, "supported_languages": null}, "macro.dbt.default__load_csv_rows": {"unique_id": "macro.dbt.default__load_csv_rows", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__load_csv_rows", "macro_sql": "{% macro default__load_csv_rows(model, agate_table) %}\n\n {% set batch_size = get_batch_size() %}\n\n {% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}\n {% set bindings = [] %}\n\n {% set statements = [] %}\n\n {% for chunk in agate_table.rows | batch(batch_size) %}\n {% set bindings = [] %}\n\n {% for row in chunk %}\n {% do bindings.extend(row) %}\n {% endfor %}\n\n {% set sql %}\n insert into {{ this.render() }} ({{ cols_sql }}) values\n {% for row in chunk -%}\n ({%- for column in agate_table.column_names -%}\n {{ get_binding_char() }}\n {%- if not loop.last%},{%- endif %}\n {%- endfor -%})\n {%- if not loop.last%},{%- endif %}\n {%- endfor %}\n {% endset %}\n\n {% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}\n\n {% if loop.index0 == 0 %}\n {% do statements.append(sql) %}\n {% endif %}\n {% endfor %}\n\n {# Return SQL so we can render it out into the compiled files #}\n {{ return(statements[0]) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_batch_size", "macro.dbt.get_seed_column_quoted_csv", "macro.dbt.get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.504315, "supported_languages": null}, "macro.dbt.generate_alias_name": {"unique_id": "macro.dbt.generate_alias_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "name": "generate_alias_name", "macro_sql": "{% macro generate_alias_name(custom_alias_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_alias_name', 'dbt')(custom_alias_name, node)) %}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__generate_alias_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.505684, "supported_languages": null}, "macro.dbt.default__generate_alias_name": {"unique_id": "macro.dbt.default__generate_alias_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "name": "default__generate_alias_name", "macro_sql": "{% macro default__generate_alias_name(custom_alias_name=none, node=none) -%}\n\n {%- if custom_alias_name is none -%}\n\n {{ node.name }}\n\n {%- else -%}\n\n {{ custom_alias_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.506274, "supported_languages": null}, "macro.dbt.generate_schema_name": {"unique_id": "macro.dbt.generate_schema_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "name": "generate_schema_name", "macro_sql": "{% macro generate_schema_name(custom_schema_name=none, node=none) -%}\n {{ return(adapter.dispatch('generate_schema_name', 'dbt')(custom_schema_name, node)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__generate_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5077639, "supported_languages": null}, "macro.dbt.default__generate_schema_name": {"unique_id": "macro.dbt.default__generate_schema_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "name": "default__generate_schema_name", "macro_sql": "{% macro default__generate_schema_name(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if custom_schema_name is none -%}\n\n {{ default_schema }}\n\n {%- else -%}\n\n {{ default_schema }}_{{ custom_schema_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5084338, "supported_languages": null}, "macro.dbt.generate_schema_name_for_env": {"unique_id": "macro.dbt.generate_schema_name_for_env", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "name": "generate_schema_name_for_env", "macro_sql": "{% macro generate_schema_name_for_env(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if target.name == 'prod' and custom_schema_name is not none -%}\n\n {{ custom_schema_name | trim }}\n\n {%- else -%}\n\n {{ default_schema }}\n\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5091588, "supported_languages": null}, "macro.dbt.generate_database_name": {"unique_id": "macro.dbt.generate_database_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "name": "generate_database_name", "macro_sql": "{% macro generate_database_name(custom_database_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_database_name', 'dbt')(custom_database_name, node)) %}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__generate_database_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.510411, "supported_languages": null}, "macro.dbt.default__generate_database_name": {"unique_id": "macro.dbt.default__generate_database_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "name": "default__generate_database_name", "macro_sql": "{% macro default__generate_database_name(custom_database_name=none, node=none) -%}\n {%- set default_database = target.database -%}\n {%- if custom_database_name is none -%}\n\n {{ default_database }}\n\n {%- else -%}\n\n {{ custom_database_name }}\n\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.51106, "supported_languages": null}, "macro.dbt.default__test_relationships": {"unique_id": "macro.dbt.default__test_relationships", "package_name": "dbt", "path": "macros/generic_test_sql/relationships.sql", "original_file_path": "macros/generic_test_sql/relationships.sql", "name": "default__test_relationships", "macro_sql": "{% macro default__test_relationships(model, column_name, to, field) %}\n\nwith child as (\n select {{ column_name }} as from_field\n from {{ model }}\n where {{ column_name }} is not null\n),\n\nparent as (\n select {{ field }} as to_field\n from {{ to }}\n)\n\nselect\n from_field\n\nfrom child\nleft join parent\n on child.from_field = parent.to_field\n\nwhere parent.to_field is null\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.512234, "supported_languages": null}, "macro.dbt.default__test_not_null": {"unique_id": "macro.dbt.default__test_not_null", "package_name": "dbt", "path": "macros/generic_test_sql/not_null.sql", "original_file_path": "macros/generic_test_sql/not_null.sql", "name": "default__test_not_null", "macro_sql": "{% macro default__test_not_null(model, column_name) %}\n\n{% set column_list = '*' if should_store_failures() else column_name %}\n\nselect {{ column_list }}\nfrom {{ model }}\nwhere {{ column_name }} is null\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.should_store_failures"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5134442, "supported_languages": null}, "macro.dbt.default__test_unique": {"unique_id": "macro.dbt.default__test_unique", "package_name": "dbt", "path": "macros/generic_test_sql/unique.sql", "original_file_path": "macros/generic_test_sql/unique.sql", "name": "default__test_unique", "macro_sql": "{% macro default__test_unique(model, column_name) %}\n\nselect\n {{ column_name }} as unique_field,\n count(*) as n_records\n\nfrom {{ model }}\nwhere {{ column_name }} is not null\ngroup by {{ column_name }}\nhaving count(*) > 1\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.514476, "supported_languages": null}, "macro.dbt.default__test_accepted_values": {"unique_id": "macro.dbt.default__test_accepted_values", "package_name": "dbt", "path": "macros/generic_test_sql/accepted_values.sql", "original_file_path": "macros/generic_test_sql/accepted_values.sql", "name": "default__test_accepted_values", "macro_sql": "{% macro default__test_accepted_values(model, column_name, values, quote=True) %}\n\nwith all_values as (\n\n select\n {{ column_name }} as value_field,\n count(*) as n_records\n\n from {{ model }}\n group by {{ column_name }}\n\n)\n\nselect *\nfrom all_values\nwhere value_field not in (\n {% for value in values -%}\n {% if quote -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif -%}\n {%- if not loop.last -%},{%- endif %}\n {%- endfor %}\n)\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5161521, "supported_languages": null}, "macro.dbt.statement": {"unique_id": "macro.dbt.statement", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "name": "statement", "macro_sql": "\n{%- macro statement(name=None, fetch_result=False, auto_begin=True, language='sql') -%}\n {%- if execute: -%}\n {%- set compiled_code = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime {} for node \"{}\"'.format(language, model['unique_id'])) }}\n {{ write(compiled_code) }}\n {%- endif -%}\n {%- if language == 'sql'-%}\n {%- set res, table = adapter.execute(compiled_code, auto_begin=auto_begin, fetch=fetch_result) -%}\n {%- elif language == 'python' -%}\n {%- set res = submit_python_job(model, compiled_code) -%}\n {#-- TODO: What should table be for python models? --#}\n {%- set table = None -%}\n {%- else -%}\n {% do exceptions.raise_compiler_error(\"statement macro didn't get supported language\") %}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_result(name, response=res, agate_table=table) }}\n {%- endif -%}\n\n {%- endif -%}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.520122, "supported_languages": null}, "macro.dbt.noop_statement": {"unique_id": "macro.dbt.noop_statement", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "name": "noop_statement", "macro_sql": "{% macro noop_statement(name=None, message=None, code=None, rows_affected=None, res=None) -%}\n {%- set sql = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime SQL for node \"{}\"'.format(model['unique_id'])) }}\n {{ write(sql) }}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_raw_result(name, message=message, code=code, rows_affected=rows_affected, agate_table=res) }}\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.521693, "supported_languages": null}, "macro.dbt.run_query": {"unique_id": "macro.dbt.run_query", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "name": "run_query", "macro_sql": "{% macro run_query(sql) %}\n {% call statement(\"run_query_statement\", fetch_result=true, auto_begin=false) %}\n {{ sql }}\n {% endcall %}\n\n {% do return(load_result(\"run_query_statement\").table) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5224488, "supported_languages": null}, "macro.dbt.convert_datetime": {"unique_id": "macro.dbt.convert_datetime", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "name": "convert_datetime", "macro_sql": "{% macro convert_datetime(date_str, date_fmt) %}\n\n {% set error_msg -%}\n The provided partition date '{{ date_str }}' does not match the expected format '{{ date_fmt }}'\n {%- endset %}\n\n {% set res = try_or_compiler_error(error_msg, modules.datetime.datetime.strptime, date_str.strip(), date_fmt) %}\n {{ return(res) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.526434, "supported_languages": null}, "macro.dbt.dates_in_range": {"unique_id": "macro.dbt.dates_in_range", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "name": "dates_in_range", "macro_sql": "{% macro dates_in_range(start_date_str, end_date_str=none, in_fmt=\"%Y%m%d\", out_fmt=\"%Y%m%d\") %}\n {% set end_date_str = start_date_str if end_date_str is none else end_date_str %}\n\n {% set start_date = convert_datetime(start_date_str, in_fmt) %}\n {% set end_date = convert_datetime(end_date_str, in_fmt) %}\n\n {% set day_count = (end_date - start_date).days %}\n {% if day_count < 0 %}\n {% set msg -%}\n Partiton start date is after the end date ({{ start_date }}, {{ end_date }})\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg, model) }}\n {% endif %}\n\n {% set date_list = [] %}\n {% for i in range(0, day_count + 1) %}\n {% set the_date = (modules.datetime.timedelta(days=i) + start_date) %}\n {% if not out_fmt %}\n {% set _ = date_list.append(the_date) %}\n {% else %}\n {% set _ = date_list.append(the_date.strftime(out_fmt)) %}\n {% endif %}\n {% endfor %}\n\n {{ return(date_list) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.convert_datetime"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.529401, "supported_languages": null}, "macro.dbt.partition_range": {"unique_id": "macro.dbt.partition_range", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "name": "partition_range", "macro_sql": "{% macro partition_range(raw_partition_date, date_fmt='%Y%m%d') %}\n {% set partition_range = (raw_partition_date | string).split(\",\") %}\n\n {% if (partition_range | length) == 1 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = none %}\n {% elif (partition_range | length) == 2 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = partition_range[1] %}\n {% else %}\n {{ exceptions.raise_compiler_error(\"Invalid partition time. Expected format: {Start Date}[,{End Date}]. Got: \" ~ raw_partition_date) }}\n {% endif %}\n\n {{ return(dates_in_range(start_date, end_date, in_fmt=date_fmt)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.dates_in_range"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.531286, "supported_languages": null}, "macro.dbt.py_current_timestring": {"unique_id": "macro.dbt.py_current_timestring", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "name": "py_current_timestring", "macro_sql": "{% macro py_current_timestring() %}\n {% set dt = modules.datetime.datetime.now() %}\n {% do return(dt.strftime(\"%Y%m%d%H%M%S%f\")) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.531877, "supported_languages": null}, "macro.dbt.except": {"unique_id": "macro.dbt.except", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "name": "except", "macro_sql": "{% macro except() %}\n {{ return(adapter.dispatch('except', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__except"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5329752, "supported_languages": null}, "macro.dbt.default__except": {"unique_id": "macro.dbt.default__except", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "name": "default__except", "macro_sql": "{% macro default__except() %}\n\n except\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.533185, "supported_languages": null}, "macro.dbt.replace": {"unique_id": "macro.dbt.replace", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "name": "replace", "macro_sql": "{% macro replace(field, old_chars, new_chars) -%}\n {{ return(adapter.dispatch('replace', 'dbt') (field, old_chars, new_chars)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__replace"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.534307, "supported_languages": null}, "macro.dbt.default__replace": {"unique_id": "macro.dbt.default__replace", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "name": "default__replace", "macro_sql": "{% macro default__replace(field, old_chars, new_chars) %}\n\n replace(\n {{ field }},\n {{ old_chars }},\n {{ new_chars }}\n )\n\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.534719, "supported_languages": null}, "macro.dbt.concat": {"unique_id": "macro.dbt.concat", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "name": "concat", "macro_sql": "{% macro concat(fields) -%}\n {{ return(adapter.dispatch('concat', 'dbt')(fields)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.535649, "supported_languages": null}, "macro.dbt.default__concat": {"unique_id": "macro.dbt.default__concat", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "name": "default__concat", "macro_sql": "{% macro default__concat(fields) -%}\n {{ fields|join(' || ') }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5359669, "supported_languages": null}, "macro.dbt.length": {"unique_id": "macro.dbt.length", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "name": "length", "macro_sql": "{% macro length(expression) -%}\n {{ return(adapter.dispatch('length', 'dbt') (expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__length"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5369081, "supported_languages": null}, "macro.dbt.default__length": {"unique_id": "macro.dbt.default__length", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "name": "default__length", "macro_sql": "{% macro default__length(expression) %}\n\n length(\n {{ expression }}\n )\n\n{%- endmacro -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.53718, "supported_languages": null}, "macro.dbt.dateadd": {"unique_id": "macro.dbt.dateadd", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "name": "dateadd", "macro_sql": "{% macro dateadd(datepart, interval, from_date_or_timestamp) %}\n {{ return(adapter.dispatch('dateadd', 'dbt')(datepart, interval, from_date_or_timestamp)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__dateadd"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.538305, "supported_languages": null}, "macro.dbt.default__dateadd": {"unique_id": "macro.dbt.default__dateadd", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "name": "default__dateadd", "macro_sql": "{% macro default__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n dateadd(\n {{ datepart }},\n {{ interval }},\n {{ from_date_or_timestamp }}\n )\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5387268, "supported_languages": null}, "macro.dbt.intersect": {"unique_id": "macro.dbt.intersect", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "name": "intersect", "macro_sql": "{% macro intersect() %}\n {{ return(adapter.dispatch('intersect', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__intersect"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.539616, "supported_languages": null}, "macro.dbt.default__intersect": {"unique_id": "macro.dbt.default__intersect", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "name": "default__intersect", "macro_sql": "{% macro default__intersect() %}\n\n intersect\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.539814, "supported_languages": null}, "macro.dbt.escape_single_quotes": {"unique_id": "macro.dbt.escape_single_quotes", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "name": "escape_single_quotes", "macro_sql": "{% macro escape_single_quotes(expression) %}\n {{ return(adapter.dispatch('escape_single_quotes', 'dbt') (expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__escape_single_quotes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.540791, "supported_languages": null}, "macro.dbt.default__escape_single_quotes": {"unique_id": "macro.dbt.default__escape_single_quotes", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "name": "default__escape_single_quotes", "macro_sql": "{% macro default__escape_single_quotes(expression) -%}\n{{ expression | replace(\"'\",\"''\") }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.541302, "supported_languages": null}, "macro.dbt.right": {"unique_id": "macro.dbt.right", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "name": "right", "macro_sql": "{% macro right(string_text, length_expression) -%}\n {{ return(adapter.dispatch('right', 'dbt') (string_text, length_expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__right"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.542329, "supported_languages": null}, "macro.dbt.default__right": {"unique_id": "macro.dbt.default__right", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "name": "default__right", "macro_sql": "{% macro default__right(string_text, length_expression) %}\n\n right(\n {{ string_text }},\n {{ length_expression }}\n )\n\n{%- endmacro -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.54267, "supported_languages": null}, "macro.dbt.listagg": {"unique_id": "macro.dbt.listagg", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "name": "listagg", "macro_sql": "{% macro listagg(measure, delimiter_text=\"','\", order_by_clause=none, limit_num=none) -%}\n {{ return(adapter.dispatch('listagg', 'dbt') (measure, delimiter_text, order_by_clause, limit_num)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__listagg"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.544319, "supported_languages": null}, "macro.dbt.default__listagg": {"unique_id": "macro.dbt.default__listagg", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "name": "default__listagg", "macro_sql": "{% macro default__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n array_slice(\n array_agg(\n {{ measure }}\n ){% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n ,0\n ,{{ limit_num }}\n ),\n {{ delimiter_text }}\n )\n {%- else %}\n listagg(\n {{ measure }},\n {{ delimiter_text }}\n )\n {% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n {%- endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.545324, "supported_languages": null}, "macro.dbt.datediff": {"unique_id": "macro.dbt.datediff", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "name": "datediff", "macro_sql": "{% macro datediff(first_date, second_date, datepart) %}\n {{ return(adapter.dispatch('datediff', 'dbt')(first_date, second_date, datepart)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.546518, "supported_languages": null}, "macro.dbt.default__datediff": {"unique_id": "macro.dbt.default__datediff", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "name": "default__datediff", "macro_sql": "{% macro default__datediff(first_date, second_date, datepart) -%}\n\n datediff(\n {{ datepart }},\n {{ first_date }},\n {{ second_date }}\n )\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5469708, "supported_languages": null}, "macro.dbt.safe_cast": {"unique_id": "macro.dbt.safe_cast", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "name": "safe_cast", "macro_sql": "{% macro safe_cast(field, type) %}\n {{ return(adapter.dispatch('safe_cast', 'dbt') (field, type)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__safe_cast"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.548071, "supported_languages": null}, "macro.dbt.default__safe_cast": {"unique_id": "macro.dbt.default__safe_cast", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "name": "default__safe_cast", "macro_sql": "{% macro default__safe_cast(field, type) %}\n {# most databases don't support this function yet\n so we just need to use cast #}\n cast({{field}} as {{type}})\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.548503, "supported_languages": null}, "macro.dbt.hash": {"unique_id": "macro.dbt.hash", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "name": "hash", "macro_sql": "{% macro hash(field) -%}\n {{ return(adapter.dispatch('hash', 'dbt') (field)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__hash"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5495412, "supported_languages": null}, "macro.dbt.default__hash": {"unique_id": "macro.dbt.default__hash", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "name": "default__hash", "macro_sql": "{% macro default__hash(field) -%}\n md5(cast({{ field }} as {{ api.Column.translate_type('string') }}))\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.549956, "supported_languages": null}, "macro.dbt.cast_bool_to_text": {"unique_id": "macro.dbt.cast_bool_to_text", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "name": "cast_bool_to_text", "macro_sql": "{% macro cast_bool_to_text(field) %}\n {{ adapter.dispatch('cast_bool_to_text', 'dbt') (field) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__cast_bool_to_text"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.550951, "supported_languages": null}, "macro.dbt.default__cast_bool_to_text": {"unique_id": "macro.dbt.default__cast_bool_to_text", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "name": "default__cast_bool_to_text", "macro_sql": "{% macro default__cast_bool_to_text(field) %}\n cast({{ field }} as {{ api.Column.translate_type('string') }})\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.551501, "supported_languages": null}, "macro.dbt.any_value": {"unique_id": "macro.dbt.any_value", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "name": "any_value", "macro_sql": "{% macro any_value(expression) -%}\n {{ return(adapter.dispatch('any_value', 'dbt') (expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__any_value"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5527098, "supported_languages": null}, "macro.dbt.default__any_value": {"unique_id": "macro.dbt.default__any_value", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "name": "default__any_value", "macro_sql": "{% macro default__any_value(expression) -%}\n\n any_value({{ expression }})\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.553046, "supported_languages": null}, "macro.dbt.position": {"unique_id": "macro.dbt.position", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "name": "position", "macro_sql": "{% macro position(substring_text, string_text) -%}\n {{ return(adapter.dispatch('position', 'dbt') (substring_text, string_text)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__position"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.554094, "supported_languages": null}, "macro.dbt.default__position": {"unique_id": "macro.dbt.default__position", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "name": "default__position", "macro_sql": "{% macro default__position(substring_text, string_text) %}\n\n position(\n {{ substring_text }} in {{ string_text }}\n )\n\n{%- endmacro -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.55445, "supported_languages": null}, "macro.dbt.string_literal": {"unique_id": "macro.dbt.string_literal", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "name": "string_literal", "macro_sql": "{%- macro string_literal(value) -%}\n {{ return(adapter.dispatch('string_literal', 'dbt') (value)) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__string_literal"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.555383, "supported_languages": null}, "macro.dbt.default__string_literal": {"unique_id": "macro.dbt.default__string_literal", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "name": "default__string_literal", "macro_sql": "{% macro default__string_literal(value) -%}\n '{{ value }}'\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.555779, "supported_languages": null}, "macro.dbt.type_string": {"unique_id": "macro.dbt.type_string", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_string", "macro_sql": "\n\n{%- macro type_string() -%}\n {{ return(adapter.dispatch('type_string', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_string"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5583549, "supported_languages": null}, "macro.dbt.default__type_string": {"unique_id": "macro.dbt.default__type_string", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_string", "macro_sql": "{% macro default__type_string() %}\n {{ return(api.Column.translate_type(\"string\")) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.558912, "supported_languages": null}, "macro.dbt.type_timestamp": {"unique_id": "macro.dbt.type_timestamp", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_timestamp", "macro_sql": "\n\n{%- macro type_timestamp() -%}\n {{ return(adapter.dispatch('type_timestamp', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.559475, "supported_languages": null}, "macro.dbt.default__type_timestamp": {"unique_id": "macro.dbt.default__type_timestamp", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_timestamp", "macro_sql": "{% macro default__type_timestamp() %}\n {{ return(api.Column.translate_type(\"timestamp\")) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.559979, "supported_languages": null}, "macro.dbt.type_float": {"unique_id": "macro.dbt.type_float", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_float", "macro_sql": "\n\n{%- macro type_float() -%}\n {{ return(adapter.dispatch('type_float', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_float"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.56046, "supported_languages": null}, "macro.dbt.default__type_float": {"unique_id": "macro.dbt.default__type_float", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_float", "macro_sql": "{% macro default__type_float() %}\n {{ return(api.Column.translate_type(\"float\")) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.56098, "supported_languages": null}, "macro.dbt.type_numeric": {"unique_id": "macro.dbt.type_numeric", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_numeric", "macro_sql": "\n\n{%- macro type_numeric() -%}\n {{ return(adapter.dispatch('type_numeric', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_numeric"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.561585, "supported_languages": null}, "macro.dbt.default__type_numeric": {"unique_id": "macro.dbt.default__type_numeric", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_numeric", "macro_sql": "{% macro default__type_numeric() %}\n {{ return(api.Column.numeric_type(\"numeric\", 28, 6)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5620632, "supported_languages": null}, "macro.dbt.type_bigint": {"unique_id": "macro.dbt.type_bigint", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_bigint", "macro_sql": "\n\n{%- macro type_bigint() -%}\n {{ return(adapter.dispatch('type_bigint', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_bigint"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.562495, "supported_languages": null}, "macro.dbt.default__type_bigint": {"unique_id": "macro.dbt.default__type_bigint", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_bigint", "macro_sql": "{% macro default__type_bigint() %}\n {{ return(api.Column.translate_type(\"bigint\")) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.562887, "supported_languages": null}, "macro.dbt.type_int": {"unique_id": "macro.dbt.type_int", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_int", "macro_sql": "\n\n{%- macro type_int() -%}\n {{ return(adapter.dispatch('type_int', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_int"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.563301, "supported_languages": null}, "macro.dbt.default__type_int": {"unique_id": "macro.dbt.default__type_int", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_int", "macro_sql": "{%- macro default__type_int() -%}\n {{ return(api.Column.translate_type(\"integer\")) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.563683, "supported_languages": null}, "macro.dbt.type_boolean": {"unique_id": "macro.dbt.type_boolean", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_boolean", "macro_sql": "\n\n{%- macro type_boolean() -%}\n {{ return(adapter.dispatch('type_boolean', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_boolean"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.564095, "supported_languages": null}, "macro.dbt.default__type_boolean": {"unique_id": "macro.dbt.default__type_boolean", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_boolean", "macro_sql": "{%- macro default__type_boolean() -%}\n {{ return(api.Column.translate_type(\"boolean\")) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.564472, "supported_languages": null}, "macro.dbt.array_concat": {"unique_id": "macro.dbt.array_concat", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "name": "array_concat", "macro_sql": "{% macro array_concat(array_1, array_2) -%}\n {{ return(adapter.dispatch('array_concat', 'dbt')(array_1, array_2)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__array_concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5655942, "supported_languages": null}, "macro.dbt.default__array_concat": {"unique_id": "macro.dbt.default__array_concat", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "name": "default__array_concat", "macro_sql": "{% macro default__array_concat(array_1, array_2) -%}\n array_cat({{ array_1 }}, {{ array_2 }})\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5660028, "supported_languages": null}, "macro.dbt.bool_or": {"unique_id": "macro.dbt.bool_or", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "name": "bool_or", "macro_sql": "{% macro bool_or(expression) -%}\n {{ return(adapter.dispatch('bool_or', 'dbt') (expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__bool_or"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.567043, "supported_languages": null}, "macro.dbt.default__bool_or": {"unique_id": "macro.dbt.default__bool_or", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "name": "default__bool_or", "macro_sql": "{% macro default__bool_or(expression) -%}\n\n bool_or({{ expression }})\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.567328, "supported_languages": null}, "macro.dbt.last_day": {"unique_id": "macro.dbt.last_day", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "name": "last_day", "macro_sql": "{% macro last_day(date, datepart) %}\n {{ return(adapter.dispatch('last_day', 'dbt') (date, datepart)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.568495, "supported_languages": null}, "macro.dbt.default_last_day": {"unique_id": "macro.dbt.default_last_day", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "name": "default_last_day", "macro_sql": "\n\n{%- macro default_last_day(date, datepart) -%}\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd(datepart, '1', dbt.date_trunc(datepart, date))\n )}}\n as date)\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.56929, "supported_languages": null}, "macro.dbt.default__last_day": {"unique_id": "macro.dbt.default__last_day", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "name": "default__last_day", "macro_sql": "{% macro default__last_day(date, datepart) -%}\n {{dbt.default_last_day(date, datepart)}}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.569667, "supported_languages": null}, "macro.dbt.split_part": {"unique_id": "macro.dbt.split_part", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "name": "split_part", "macro_sql": "{% macro split_part(string_text, delimiter_text, part_number) %}\n {{ return(adapter.dispatch('split_part', 'dbt') (string_text, delimiter_text, part_number)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__split_part"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.571143, "supported_languages": null}, "macro.dbt.default__split_part": {"unique_id": "macro.dbt.default__split_part", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "name": "default__split_part", "macro_sql": "{% macro default__split_part(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n {{ part_number }}\n )\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.571575, "supported_languages": null}, "macro.dbt._split_part_negative": {"unique_id": "macro.dbt._split_part_negative", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "name": "_split_part_negative", "macro_sql": "{% macro _split_part_negative(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n length({{ string_text }})\n - length(\n replace({{ string_text }}, {{ delimiter_text }}, '')\n ) + 2 {{ part_number }}\n )\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.57214, "supported_languages": null}, "macro.dbt.date_trunc": {"unique_id": "macro.dbt.date_trunc", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "name": "date_trunc", "macro_sql": "{% macro date_trunc(datepart, date) -%}\n {{ return(adapter.dispatch('date_trunc', 'dbt') (datepart, date)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5731661, "supported_languages": null}, "macro.dbt.default__date_trunc": {"unique_id": "macro.dbt.default__date_trunc", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "name": "default__date_trunc", "macro_sql": "{% macro default__date_trunc(datepart, date) -%}\n date_trunc('{{datepart}}', {{date}})\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.573505, "supported_languages": null}, "macro.dbt.array_construct": {"unique_id": "macro.dbt.array_construct", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "name": "array_construct", "macro_sql": "{% macro array_construct(inputs=[], data_type=api.Column.translate_type('integer')) -%}\n {{ return(adapter.dispatch('array_construct', 'dbt')(inputs, data_type)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__array_construct"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5747578, "supported_languages": null}, "macro.dbt.default__array_construct": {"unique_id": "macro.dbt.default__array_construct", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "name": "default__array_construct", "macro_sql": "{% macro default__array_construct(inputs, data_type) -%}\n {% if inputs|length > 0 %}\n array[ {{ inputs|join(' , ') }} ]\n {% else %}\n array[]::{{data_type}}[]\n {% endif %}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.57538, "supported_languages": null}, "macro.dbt.array_append": {"unique_id": "macro.dbt.array_append", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "name": "array_append", "macro_sql": "{% macro array_append(array, new_element) -%}\n {{ return(adapter.dispatch('array_append', 'dbt')(array, new_element)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__array_append"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.576419, "supported_languages": null}, "macro.dbt.default__array_append": {"unique_id": "macro.dbt.default__array_append", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "name": "default__array_append", "macro_sql": "{% macro default__array_append(array, new_element) -%}\n array_append({{ array }}, {{ new_element }})\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.576768, "supported_languages": null}, "macro.dbt.create_schema": {"unique_id": "macro.dbt.create_schema", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "name": "create_schema", "macro_sql": "{% macro create_schema(relation) -%}\n {{ adapter.dispatch('create_schema', 'dbt')(relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.577954, "supported_languages": null}, "macro.dbt.default__create_schema": {"unique_id": "macro.dbt.default__create_schema", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "name": "default__create_schema", "macro_sql": "{% macro default__create_schema(relation) -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier() }}\n {% endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.578434, "supported_languages": null}, "macro.dbt.drop_schema": {"unique_id": "macro.dbt.drop_schema", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "name": "drop_schema", "macro_sql": "{% macro drop_schema(relation) -%}\n {{ adapter.dispatch('drop_schema', 'dbt')(relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__drop_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.578851, "supported_languages": null}, "macro.dbt.default__drop_schema": {"unique_id": "macro.dbt.default__drop_schema", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "name": "default__drop_schema", "macro_sql": "{% macro default__drop_schema(relation) -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier() }} cascade\n {% endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5793228, "supported_languages": null}, "macro.dbt.current_timestamp": {"unique_id": "macro.dbt.current_timestamp", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "current_timestamp", "macro_sql": "{%- macro current_timestamp() -%}\n {{ adapter.dispatch('current_timestamp', 'dbt')() }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.580731, "supported_languages": null}, "macro.dbt.default__current_timestamp": {"unique_id": "macro.dbt.default__current_timestamp", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "default__current_timestamp", "macro_sql": "{% macro default__current_timestamp() -%}\n {{ exceptions.raise_not_implemented(\n 'current_timestamp macro not implemented for adapter ' + adapter.type()) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.581128, "supported_languages": null}, "macro.dbt.snapshot_get_time": {"unique_id": "macro.dbt.snapshot_get_time", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "snapshot_get_time", "macro_sql": "\n\n{%- macro snapshot_get_time() -%}\n {{ adapter.dispatch('snapshot_get_time', 'dbt')() }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.581523, "supported_languages": null}, "macro.dbt.default__snapshot_get_time": {"unique_id": "macro.dbt.default__snapshot_get_time", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "default__snapshot_get_time", "macro_sql": "{% macro default__snapshot_get_time() %}\n {{ current_timestamp() }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.581808, "supported_languages": null}, "macro.dbt.current_timestamp_backcompat": {"unique_id": "macro.dbt.current_timestamp_backcompat", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "current_timestamp_backcompat", "macro_sql": "{% macro current_timestamp_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5822232, "supported_languages": null}, "macro.dbt.default__current_timestamp_backcompat": {"unique_id": "macro.dbt.default__current_timestamp_backcompat", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "default__current_timestamp_backcompat", "macro_sql": "{% macro default__current_timestamp_backcompat() %}\n current_timestamp::timestamp\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5825438, "supported_languages": null}, "macro.dbt.current_timestamp_in_utc_backcompat": {"unique_id": "macro.dbt.current_timestamp_in_utc_backcompat", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "current_timestamp_in_utc_backcompat", "macro_sql": "{% macro current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_in_utc_backcompat', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.582957, "supported_languages": null}, "macro.dbt.default__current_timestamp_in_utc_backcompat": {"unique_id": "macro.dbt.default__current_timestamp_in_utc_backcompat", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "default__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro default__current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.current_timestamp_backcompat", "macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.583432, "supported_languages": null}, "macro.dbt.get_create_index_sql": {"unique_id": "macro.dbt.get_create_index_sql", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "name": "get_create_index_sql", "macro_sql": "{% macro get_create_index_sql(relation, index_dict) -%}\n {{ return(adapter.dispatch('get_create_index_sql', 'dbt')(relation, index_dict)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_create_index_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.585173, "supported_languages": null}, "macro.dbt.default__get_create_index_sql": {"unique_id": "macro.dbt.default__get_create_index_sql", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "name": "default__get_create_index_sql", "macro_sql": "{% macro default__get_create_index_sql(relation, index_dict) -%}\n {% do return(None) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5855622, "supported_languages": null}, "macro.dbt.create_indexes": {"unique_id": "macro.dbt.create_indexes", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "name": "create_indexes", "macro_sql": "{% macro create_indexes(relation) -%}\n {{ adapter.dispatch('create_indexes', 'dbt')(relation) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5859761, "supported_languages": null}, "macro.dbt.default__create_indexes": {"unique_id": "macro.dbt.default__create_indexes", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "name": "default__create_indexes", "macro_sql": "{% macro default__create_indexes(relation) -%}\n {%- set _indexes = config.get('indexes', default=[]) -%}\n\n {% for _index_dict in _indexes %}\n {% set create_index_sql = get_create_index_sql(relation, _index_dict) %}\n {% if create_index_sql %}\n {% do run_query(create_index_sql) %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_create_index_sql", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.587023, "supported_languages": null}, "macro.dbt.make_intermediate_relation": {"unique_id": "macro.dbt.make_intermediate_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "make_intermediate_relation", "macro_sql": "{% macro make_intermediate_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_intermediate_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_intermediate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5934248, "supported_languages": null}, "macro.dbt.default__make_intermediate_relation": {"unique_id": "macro.dbt.default__make_intermediate_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__make_intermediate_relation", "macro_sql": "{% macro default__make_intermediate_relation(base_relation, suffix) %}\n {{ return(default__make_temp_relation(base_relation, suffix)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5938601, "supported_languages": null}, "macro.dbt.make_temp_relation": {"unique_id": "macro.dbt.make_temp_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "make_temp_relation", "macro_sql": "{% macro make_temp_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_temp_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5944152, "supported_languages": null}, "macro.dbt.default__make_temp_relation": {"unique_id": "macro.dbt.default__make_temp_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__make_temp_relation", "macro_sql": "{% macro default__make_temp_relation(base_relation, suffix) %}\n {%- set temp_identifier = base_relation.identifier ~ suffix -%}\n {%- set temp_relation = base_relation.incorporate(\n path={\"identifier\": temp_identifier}) -%}\n\n {{ return(temp_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.595192, "supported_languages": null}, "macro.dbt.make_backup_relation": {"unique_id": "macro.dbt.make_backup_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "make_backup_relation", "macro_sql": "{% macro make_backup_relation(base_relation, backup_relation_type, suffix='__dbt_backup') %}\n {{ return(adapter.dispatch('make_backup_relation', 'dbt')(base_relation, backup_relation_type, suffix)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_backup_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.595815, "supported_languages": null}, "macro.dbt.default__make_backup_relation": {"unique_id": "macro.dbt.default__make_backup_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__make_backup_relation", "macro_sql": "{% macro default__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {%- set backup_identifier = base_relation.identifier ~ suffix -%}\n {%- set backup_relation = base_relation.incorporate(\n path={\"identifier\": backup_identifier},\n type=backup_relation_type\n ) -%}\n {{ return(backup_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5966232, "supported_languages": null}, "macro.dbt.drop_relation": {"unique_id": "macro.dbt.drop_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "drop_relation", "macro_sql": "{% macro drop_relation(relation) -%}\n {{ return(adapter.dispatch('drop_relation', 'dbt')(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__drop_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.597093, "supported_languages": null}, "macro.dbt.default__drop_relation": {"unique_id": "macro.dbt.default__drop_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__drop_relation", "macro_sql": "{% macro default__drop_relation(relation) -%}\n {% call statement('drop_relation', auto_begin=False) -%}\n drop {{ relation.type }} if exists {{ relation }} cascade\n {%- endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.597645, "supported_languages": null}, "macro.dbt.truncate_relation": {"unique_id": "macro.dbt.truncate_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "truncate_relation", "macro_sql": "{% macro truncate_relation(relation) -%}\n {{ return(adapter.dispatch('truncate_relation', 'dbt')(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__truncate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.598109, "supported_languages": null}, "macro.dbt.default__truncate_relation": {"unique_id": "macro.dbt.default__truncate_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__truncate_relation", "macro_sql": "{% macro default__truncate_relation(relation) -%}\n {% call statement('truncate_relation') -%}\n truncate table {{ relation }}\n {%- endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.598536, "supported_languages": null}, "macro.dbt.rename_relation": {"unique_id": "macro.dbt.rename_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "rename_relation", "macro_sql": "{% macro rename_relation(from_relation, to_relation) -%}\n {{ return(adapter.dispatch('rename_relation', 'dbt')(from_relation, to_relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__rename_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5990558, "supported_languages": null}, "macro.dbt.default__rename_relation": {"unique_id": "macro.dbt.default__rename_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__rename_relation", "macro_sql": "{% macro default__rename_relation(from_relation, to_relation) -%}\n {% set target_name = adapter.quote_as_configured(to_relation.identifier, 'identifier') %}\n {% call statement('rename_relation') -%}\n alter table {{ from_relation }} rename to {{ target_name }}\n {%- endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.60004, "supported_languages": null}, "macro.dbt.get_or_create_relation": {"unique_id": "macro.dbt.get_or_create_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "get_or_create_relation", "macro_sql": "{% macro get_or_create_relation(database, schema, identifier, type) -%}\n {{ return(adapter.dispatch('get_or_create_relation', 'dbt')(database, schema, identifier, type)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_or_create_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.600681, "supported_languages": null}, "macro.dbt.default__get_or_create_relation": {"unique_id": "macro.dbt.default__get_or_create_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__get_or_create_relation", "macro_sql": "{% macro default__get_or_create_relation(database, schema, identifier, type) %}\n {%- set target_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n\n {% if target_relation %}\n {% do return([true, target_relation]) %}\n {% endif %}\n\n {%- set new_relation = api.Relation.create(\n database=database,\n schema=schema,\n identifier=identifier,\n type=type\n ) -%}\n {% do return([false, new_relation]) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.602067, "supported_languages": null}, "macro.dbt.load_cached_relation": {"unique_id": "macro.dbt.load_cached_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "load_cached_relation", "macro_sql": "{% macro load_cached_relation(relation) %}\n {% do return(adapter.get_relation(\n database=relation.database,\n schema=relation.schema,\n identifier=relation.identifier\n )) -%}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.602644, "supported_languages": null}, "macro.dbt.load_relation": {"unique_id": "macro.dbt.load_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "load_relation", "macro_sql": "{% macro load_relation(relation) %}\n {{ return(load_cached_relation(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.load_cached_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.603012, "supported_languages": null}, "macro.dbt.drop_relation_if_exists": {"unique_id": "macro.dbt.drop_relation_if_exists", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "drop_relation_if_exists", "macro_sql": "{% macro drop_relation_if_exists(relation) %}\n {% if relation is not none %}\n {{ adapter.drop_relation(relation) }}\n {% endif %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.603508, "supported_languages": null}, "macro.dbt.collect_freshness": {"unique_id": "macro.dbt.collect_freshness", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "name": "collect_freshness", "macro_sql": "{% macro collect_freshness(source, loaded_at_field, filter) %}\n {{ return(adapter.dispatch('collect_freshness', 'dbt')(source, loaded_at_field, filter))}}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__collect_freshness"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6050408, "supported_languages": null}, "macro.dbt.default__collect_freshness": {"unique_id": "macro.dbt.default__collect_freshness", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "name": "default__collect_freshness", "macro_sql": "{% macro default__collect_freshness(source, loaded_at_field, filter) %}\n {% call statement('collect_freshness', fetch_result=True, auto_begin=False) -%}\n select\n max({{ loaded_at_field }}) as max_loaded_at,\n {{ current_timestamp() }} as snapshotted_at\n from {{ source }}\n {% if filter %}\n where {{ filter }}\n {% endif %}\n {% endcall %}\n {{ return(load_result('collect_freshness').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.606111, "supported_languages": null}, "macro.dbt.copy_grants": {"unique_id": "macro.dbt.copy_grants", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "copy_grants", "macro_sql": "{% macro copy_grants() %}\n {{ return(adapter.dispatch('copy_grants', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.609764, "supported_languages": null}, "macro.dbt.default__copy_grants": {"unique_id": "macro.dbt.default__copy_grants", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__copy_grants", "macro_sql": "{% macro default__copy_grants() %}\n {{ return(True) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.610076, "supported_languages": null}, "macro.dbt.support_multiple_grantees_per_dcl_statement": {"unique_id": "macro.dbt.support_multiple_grantees_per_dcl_statement", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "support_multiple_grantees_per_dcl_statement", "macro_sql": "{% macro support_multiple_grantees_per_dcl_statement() %}\n {{ return(adapter.dispatch('support_multiple_grantees_per_dcl_statement', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.610507, "supported_languages": null}, "macro.dbt.default__support_multiple_grantees_per_dcl_statement": {"unique_id": "macro.dbt.default__support_multiple_grantees_per_dcl_statement", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__support_multiple_grantees_per_dcl_statement", "macro_sql": "\n\n{%- macro default__support_multiple_grantees_per_dcl_statement() -%}\n {{ return(True) }}\n{%- endmacro -%}\n\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6107981, "supported_languages": null}, "macro.dbt.should_revoke": {"unique_id": "macro.dbt.should_revoke", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "should_revoke", "macro_sql": "{% macro should_revoke(existing_relation, full_refresh_mode=True) %}\n\n {% if not existing_relation %}\n {#-- The table doesn't already exist, so no grants to copy over --#}\n {{ return(False) }}\n {% elif full_refresh_mode %}\n {#-- The object is being REPLACED -- whether grants are copied over depends on the value of user config --#}\n {{ return(copy_grants()) }}\n {% else %}\n {#-- The table is being merged/upserted/inserted -- grants will be carried over --#}\n {{ return(True) }}\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.611671, "supported_languages": null}, "macro.dbt.get_show_grant_sql": {"unique_id": "macro.dbt.get_show_grant_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "get_show_grant_sql", "macro_sql": "{% macro get_show_grant_sql(relation) %}\n {{ return(adapter.dispatch(\"get_show_grant_sql\", \"dbt\")(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_show_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.612134, "supported_languages": null}, "macro.dbt.default__get_show_grant_sql": {"unique_id": "macro.dbt.default__get_show_grant_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__get_show_grant_sql", "macro_sql": "{% macro default__get_show_grant_sql(relation) %}\n show grants on {{ relation }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.612397, "supported_languages": null}, "macro.dbt.get_grant_sql": {"unique_id": "macro.dbt.get_grant_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "get_grant_sql", "macro_sql": "{% macro get_grant_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_grant_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6130052, "supported_languages": null}, "macro.dbt.default__get_grant_sql": {"unique_id": "macro.dbt.default__get_grant_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__get_grant_sql", "macro_sql": "\n\n{%- macro default__get_grant_sql(relation, privilege, grantees) -%}\n grant {{ privilege }} on {{ relation }} to {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.61358, "supported_languages": null}, "macro.dbt.get_revoke_sql": {"unique_id": "macro.dbt.get_revoke_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "get_revoke_sql", "macro_sql": "{% macro get_revoke_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_revoke_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_revoke_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6141498, "supported_languages": null}, "macro.dbt.default__get_revoke_sql": {"unique_id": "macro.dbt.default__get_revoke_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__get_revoke_sql", "macro_sql": "\n\n{%- macro default__get_revoke_sql(relation, privilege, grantees) -%}\n revoke {{ privilege }} on {{ relation }} from {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.61463, "supported_languages": null}, "macro.dbt.get_dcl_statement_list": {"unique_id": "macro.dbt.get_dcl_statement_list", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "get_dcl_statement_list", "macro_sql": "{% macro get_dcl_statement_list(relation, grant_config, get_dcl_macro) %}\n {{ return(adapter.dispatch('get_dcl_statement_list', 'dbt')(relation, grant_config, get_dcl_macro)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_dcl_statement_list"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6152372, "supported_languages": null}, "macro.dbt.default__get_dcl_statement_list": {"unique_id": "macro.dbt.default__get_dcl_statement_list", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__get_dcl_statement_list", "macro_sql": "\n\n{%- macro default__get_dcl_statement_list(relation, grant_config, get_dcl_macro) -%}\n {#\n -- Unpack grant_config into specific privileges and the set of users who need them granted/revoked.\n -- Depending on whether this database supports multiple grantees per statement, pass in the list of\n -- all grantees per privilege, or (if not) template one statement per privilege-grantee pair.\n -- `get_dcl_macro` will be either `get_grant_sql` or `get_revoke_sql`\n #}\n {%- set dcl_statements = [] -%}\n {%- for privilege, grantees in grant_config.items() %}\n {%- if support_multiple_grantees_per_dcl_statement() and grantees -%}\n {%- set dcl = get_dcl_macro(relation, privilege, grantees) -%}\n {%- do dcl_statements.append(dcl) -%}\n {%- else -%}\n {%- for grantee in grantees -%}\n {% set dcl = get_dcl_macro(relation, privilege, [grantee]) %}\n {%- do dcl_statements.append(dcl) -%}\n {% endfor -%}\n {%- endif -%}\n {%- endfor -%}\n {{ return(dcl_statements) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.617053, "supported_languages": null}, "macro.dbt.call_dcl_statements": {"unique_id": "macro.dbt.call_dcl_statements", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "call_dcl_statements", "macro_sql": "{% macro call_dcl_statements(dcl_statement_list) %}\n {{ return(adapter.dispatch(\"call_dcl_statements\", \"dbt\")(dcl_statement_list)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6175182, "supported_languages": null}, "macro.dbt.default__call_dcl_statements": {"unique_id": "macro.dbt.default__call_dcl_statements", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__call_dcl_statements", "macro_sql": "{% macro default__call_dcl_statements(dcl_statement_list) %}\n {#\n -- By default, supply all grant + revoke statements in a single semicolon-separated block,\n -- so that they're all processed together.\n\n -- Some databases do not support this. Those adapters will need to override this macro\n -- to run each statement individually.\n #}\n {% call statement('grants') %}\n {% for dcl_statement in dcl_statement_list %}\n {{ dcl_statement }};\n {% endfor %}\n {% endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.61847, "supported_languages": null}, "macro.dbt.apply_grants": {"unique_id": "macro.dbt.apply_grants", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "apply_grants", "macro_sql": "{% macro apply_grants(relation, grant_config, should_revoke) %}\n {{ return(adapter.dispatch(\"apply_grants\", \"dbt\")(relation, grant_config, should_revoke)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.619071, "supported_languages": null}, "macro.dbt.default__apply_grants": {"unique_id": "macro.dbt.default__apply_grants", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__apply_grants", "macro_sql": "{% macro default__apply_grants(relation, grant_config, should_revoke=True) %}\n {#-- If grant_config is {} or None, this is a no-op --#}\n {% if grant_config %}\n {% if should_revoke %}\n {#-- We think previous grants may have carried over --#}\n {#-- Show current grants and calculate diffs --#}\n {% set current_grants_table = run_query(get_show_grant_sql(relation)) %}\n {% set current_grants_dict = adapter.standardize_grants_dict(current_grants_table) %}\n {% set needs_granting = diff_of_two_dicts(grant_config, current_grants_dict) %}\n {% set needs_revoking = diff_of_two_dicts(current_grants_dict, grant_config) %}\n {% if not (needs_granting or needs_revoking) %}\n {{ log('On ' ~ relation ~': All grants are in place, no revocation or granting needed.')}}\n {% endif %}\n {% else %}\n {#-- We don't think there's any chance of previous grants having carried over. --#}\n {#-- Jump straight to granting what the user has configured. --#}\n {% set needs_revoking = {} %}\n {% set needs_granting = grant_config %}\n {% endif %}\n {% if needs_granting or needs_revoking %}\n {% set revoke_statement_list = get_dcl_statement_list(relation, needs_revoking, get_revoke_sql) %}\n {% set grant_statement_list = get_dcl_statement_list(relation, needs_granting, get_grant_sql) %}\n {% set dcl_statement_list = revoke_statement_list + grant_statement_list %}\n {% if dcl_statement_list %}\n {{ call_dcl_statements(dcl_statement_list) }}\n {% endif %}\n {% endif %}\n {% endif %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.get_show_grant_sql", "macro.dbt.get_dcl_statement_list", "macro.dbt.call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6220548, "supported_languages": null}, "macro.dbt.alter_column_comment": {"unique_id": "macro.dbt.alter_column_comment", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "alter_column_comment", "macro_sql": "{% macro alter_column_comment(relation, column_dict) -%}\n {{ return(adapter.dispatch('alter_column_comment', 'dbt')(relation, column_dict)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.623905, "supported_languages": null}, "macro.dbt.default__alter_column_comment": {"unique_id": "macro.dbt.default__alter_column_comment", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "default__alter_column_comment", "macro_sql": "{% macro default__alter_column_comment(relation, column_dict) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_column_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.62433, "supported_languages": null}, "macro.dbt.alter_relation_comment": {"unique_id": "macro.dbt.alter_relation_comment", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "alter_relation_comment", "macro_sql": "{% macro alter_relation_comment(relation, relation_comment) -%}\n {{ return(adapter.dispatch('alter_relation_comment', 'dbt')(relation, relation_comment)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_relation_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6248372, "supported_languages": null}, "macro.dbt.default__alter_relation_comment": {"unique_id": "macro.dbt.default__alter_relation_comment", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "default__alter_relation_comment", "macro_sql": "{% macro default__alter_relation_comment(relation, relation_comment) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_relation_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.62526, "supported_languages": null}, "macro.dbt.persist_docs": {"unique_id": "macro.dbt.persist_docs", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "persist_docs", "macro_sql": "{% macro persist_docs(relation, model, for_relation=true, for_columns=true) -%}\n {{ return(adapter.dispatch('persist_docs', 'dbt')(relation, model, for_relation, for_columns)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.625922, "supported_languages": null}, "macro.dbt.default__persist_docs": {"unique_id": "macro.dbt.default__persist_docs", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "default__persist_docs", "macro_sql": "{% macro default__persist_docs(relation, model, for_relation, for_columns) -%}\n {% if for_relation and config.persist_relation_docs() and model.description %}\n {% do run_query(alter_relation_comment(relation, model.description)) %}\n {% endif %}\n\n {% if for_columns and config.persist_column_docs() and model.columns %}\n {% do run_query(alter_column_comment(relation, model.columns)) %}\n {% endif %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.alter_relation_comment", "macro.dbt.alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.627101, "supported_languages": null}, "macro.dbt.get_catalog": {"unique_id": "macro.dbt.get_catalog", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "get_catalog", "macro_sql": "{% macro get_catalog(information_schema, schemas) -%}\n {{ return(adapter.dispatch('get_catalog', 'dbt')(information_schema, schemas)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_catalog"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.630411, "supported_languages": null}, "macro.dbt.default__get_catalog": {"unique_id": "macro.dbt.default__get_catalog", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__get_catalog", "macro_sql": "{% macro default__get_catalog(information_schema, schemas) -%}\n\n {% set typename = adapter.type() %}\n {% set msg -%}\n get_catalog not implemented for {{ typename }}\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.631063, "supported_languages": null}, "macro.dbt.information_schema_name": {"unique_id": "macro.dbt.information_schema_name", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "information_schema_name", "macro_sql": "{% macro information_schema_name(database) %}\n {{ return(adapter.dispatch('information_schema_name', 'dbt')(database)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__information_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.631524, "supported_languages": null}, "macro.dbt.default__information_schema_name": {"unique_id": "macro.dbt.default__information_schema_name", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__information_schema_name", "macro_sql": "{% macro default__information_schema_name(database) -%}\n {%- if database -%}\n {{ database }}.INFORMATION_SCHEMA\n {%- else -%}\n INFORMATION_SCHEMA\n {%- endif -%}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6319208, "supported_languages": null}, "macro.dbt.list_schemas": {"unique_id": "macro.dbt.list_schemas", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "list_schemas", "macro_sql": "{% macro list_schemas(database) -%}\n {{ return(adapter.dispatch('list_schemas', 'dbt')(database)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.632376, "supported_languages": null}, "macro.dbt.default__list_schemas": {"unique_id": "macro.dbt.default__list_schemas", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__list_schemas", "macro_sql": "{% macro default__list_schemas(database) -%}\n {% set sql %}\n select distinct schema_name\n from {{ information_schema_name(database) }}.SCHEMATA\n where catalog_name ilike '{{ database }}'\n {% endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.information_schema_name", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.633168, "supported_languages": null}, "macro.dbt.check_schema_exists": {"unique_id": "macro.dbt.check_schema_exists", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "check_schema_exists", "macro_sql": "{% macro check_schema_exists(information_schema, schema) -%}\n {{ return(adapter.dispatch('check_schema_exists', 'dbt')(information_schema, schema)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__check_schema_exists"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.633694, "supported_languages": null}, "macro.dbt.default__check_schema_exists": {"unique_id": "macro.dbt.default__check_schema_exists", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__check_schema_exists", "macro_sql": "{% macro default__check_schema_exists(information_schema, schema) -%}\n {% set sql -%}\n select count(*)\n from {{ information_schema.replace(information_schema_view='SCHEMATA') }}\n where catalog_name='{{ information_schema.database }}'\n and schema_name='{{ schema }}'\n {%- endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.replace", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.63451, "supported_languages": null}, "macro.dbt.list_relations_without_caching": {"unique_id": "macro.dbt.list_relations_without_caching", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "list_relations_without_caching", "macro_sql": "{% macro list_relations_without_caching(schema_relation) %}\n {{ return(adapter.dispatch('list_relations_without_caching', 'dbt')(schema_relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_relations_without_caching"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.634974, "supported_languages": null}, "macro.dbt.default__list_relations_without_caching": {"unique_id": "macro.dbt.default__list_relations_without_caching", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__list_relations_without_caching", "macro_sql": "{% macro default__list_relations_without_caching(schema_relation) %}\n {{ exceptions.raise_not_implemented(\n 'list_relations_without_caching macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.635378, "supported_languages": null}, "macro.dbt.get_columns_in_relation": {"unique_id": "macro.dbt.get_columns_in_relation", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "get_columns_in_relation", "macro_sql": "{% macro get_columns_in_relation(relation) -%}\n {{ return(adapter.dispatch('get_columns_in_relation', 'dbt')(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.63885, "supported_languages": null}, "macro.dbt.default__get_columns_in_relation": {"unique_id": "macro.dbt.default__get_columns_in_relation", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "default__get_columns_in_relation", "macro_sql": "{% macro default__get_columns_in_relation(relation) -%}\n {{ exceptions.raise_not_implemented(\n 'get_columns_in_relation macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.639395, "supported_languages": null}, "macro.dbt.sql_convert_columns_in_relation": {"unique_id": "macro.dbt.sql_convert_columns_in_relation", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "sql_convert_columns_in_relation", "macro_sql": "{% macro sql_convert_columns_in_relation(table) -%}\n {% set columns = [] %}\n {% for row in table %}\n {% do columns.append(api.Column(*row)) %}\n {% endfor %}\n {{ return(columns) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6401641, "supported_languages": null}, "macro.dbt.get_columns_in_query": {"unique_id": "macro.dbt.get_columns_in_query", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "get_columns_in_query", "macro_sql": "{% macro get_columns_in_query(select_sql) -%}\n {{ return(adapter.dispatch('get_columns_in_query', 'dbt')(select_sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.640618, "supported_languages": null}, "macro.dbt.default__get_columns_in_query": {"unique_id": "macro.dbt.default__get_columns_in_query", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "default__get_columns_in_query", "macro_sql": "{% macro default__get_columns_in_query(select_sql) %}\n {% call statement('get_columns_in_query', fetch_result=True, auto_begin=False) -%}\n select * from (\n {{ select_sql }}\n ) as __dbt_sbq\n where false\n limit 0\n {% endcall %}\n\n {{ return(load_result('get_columns_in_query').table.columns | map(attribute='name') | list) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.64155, "supported_languages": null}, "macro.dbt.alter_column_type": {"unique_id": "macro.dbt.alter_column_type", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "alter_column_type", "macro_sql": "{% macro alter_column_type(relation, column_name, new_column_type) -%}\n {{ return(adapter.dispatch('alter_column_type', 'dbt')(relation, column_name, new_column_type)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.642115, "supported_languages": null}, "macro.dbt.default__alter_column_type": {"unique_id": "macro.dbt.default__alter_column_type", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "default__alter_column_type", "macro_sql": "{% macro default__alter_column_type(relation, column_name, new_column_type) -%}\n {#\n 1. Create a new column (w/ temp name and correct type)\n 2. Copy data over to it\n 3. Drop the existing column (cascade!)\n 4. Rename the new column to existing column\n #}\n {%- set tmp_column = column_name + \"__dbt_alter\" -%}\n\n {% call statement('alter_column_type') %}\n alter table {{ relation }} add column {{ adapter.quote(tmp_column) }} {{ new_column_type }};\n update {{ relation }} set {{ adapter.quote(tmp_column) }} = {{ adapter.quote(column_name) }};\n alter table {{ relation }} drop column {{ adapter.quote(column_name) }} cascade;\n alter table {{ relation }} rename column {{ adapter.quote(tmp_column) }} to {{ adapter.quote(column_name) }}\n {% endcall %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.643635, "supported_languages": null}, "macro.dbt.alter_relation_add_remove_columns": {"unique_id": "macro.dbt.alter_relation_add_remove_columns", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "alter_relation_add_remove_columns", "macro_sql": "{% macro alter_relation_add_remove_columns(relation, add_columns = none, remove_columns = none) -%}\n {{ return(adapter.dispatch('alter_relation_add_remove_columns', 'dbt')(relation, add_columns, remove_columns)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__alter_relation_add_remove_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.644268, "supported_languages": null}, "macro.dbt.default__alter_relation_add_remove_columns": {"unique_id": "macro.dbt.default__alter_relation_add_remove_columns", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "default__alter_relation_add_remove_columns", "macro_sql": "{% macro default__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}\n\n {% if add_columns is none %}\n {% set add_columns = [] %}\n {% endif %}\n {% if remove_columns is none %}\n {% set remove_columns = [] %}\n {% endif %}\n\n {% set sql -%}\n\n alter {{ relation.type }} {{ relation }}\n\n {% for column in add_columns %}\n add column {{ column.name }} {{ column.data_type }}{{ ',' if not loop.last }}\n {% endfor %}{{ ',' if add_columns and remove_columns }}\n\n {% for column in remove_columns %}\n drop column {{ column.name }}{{ ',' if not loop.last }}\n {% endfor %}\n\n {%- endset -%}\n\n {% do run_query(sql) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.646218, "supported_languages": null}, "macro.dbt.build_ref_function": {"unique_id": "macro.dbt.build_ref_function", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "build_ref_function", "macro_sql": "{% macro build_ref_function(model) %}\n\n {%- set ref_dict = {} -%}\n {%- for _ref in model.refs -%}\n {%- set resolved = ref(*_ref) -%}\n {%- do ref_dict.update({_ref | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef ref(*args,dbt_load_df_function):\n refs = {{ ref_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(refs[key])\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.649544, "supported_languages": null}, "macro.dbt.build_source_function": {"unique_id": "macro.dbt.build_source_function", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "build_source_function", "macro_sql": "{% macro build_source_function(model) %}\n\n {%- set source_dict = {} -%}\n {%- for _source in model.sources -%}\n {%- set resolved = source(*_source) -%}\n {%- do source_dict.update({_source | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef source(*args, dbt_load_df_function):\n sources = {{ source_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(sources[key])\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6507342, "supported_languages": null}, "macro.dbt.build_config_dict": {"unique_id": "macro.dbt.build_config_dict", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "build_config_dict", "macro_sql": "{% macro build_config_dict(model) %}\n {%- set config_dict = {} -%}\n {%- for key in model.config.config_keys_used -%}\n {# weird type testing with enum, would be much easier to write this logic in Python! #}\n {%- if key == 'language' -%}\n {%- set value = 'python' -%}\n {%- endif -%}\n {%- set value = model.config[key] -%}\n {%- do config_dict.update({key: value}) -%}\n {%- endfor -%}\nconfig_dict = {{ config_dict }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.652128, "supported_languages": null}, "macro.dbt.py_script_postfix": {"unique_id": "macro.dbt.py_script_postfix", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "py_script_postfix", "macro_sql": "{% macro py_script_postfix(model) %}\n# This part is user provided model code\n# you will need to copy the next section to run the code\n# COMMAND ----------\n# this part is dbt logic for get ref work, do not modify\n\n{{ build_ref_function(model ) }}\n{{ build_source_function(model ) }}\n{{ build_config_dict(model) }}\n\nclass config:\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def get(key, default=None):\n return config_dict.get(key, default)\n\nclass this:\n \"\"\"dbt.this() or dbt.this.identifier\"\"\"\n database = '{{ this.database }}'\n schema = '{{ this.schema }}'\n identifier = '{{ this.identifier }}'\n def __repr__(self):\n return '{{ this }}'\n\n\nclass dbtObj:\n def __init__(self, load_df_function) -> None:\n self.source = lambda *args: source(*args, dbt_load_df_function=load_df_function)\n self.ref = lambda *args: ref(*args, dbt_load_df_function=load_df_function)\n self.config = config\n self.this = this()\n self.is_incremental = {{ is_incremental() }}\n\n# COMMAND ----------\n{{py_script_comment()}}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.build_ref_function", "macro.dbt.build_source_function", "macro.dbt.build_config_dict", "macro.dbt.is_incremental", "macro.dbt.py_script_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.653216, "supported_languages": null}, "macro.dbt.py_script_comment": {"unique_id": "macro.dbt.py_script_comment", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "py_script_comment", "macro_sql": "{%macro py_script_comment()%}\n{%endmacro%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.653416, "supported_languages": null}, "macro.dbt.test_unique": {"unique_id": "macro.dbt.test_unique", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "name": "test_unique", "macro_sql": "{% test unique(model, column_name) %}\n {% set macro = adapter.dispatch('test_unique', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__test_unique"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.655508, "supported_languages": null}, "macro.dbt.test_not_null": {"unique_id": "macro.dbt.test_not_null", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "name": "test_not_null", "macro_sql": "{% test not_null(model, column_name) %}\n {% set macro = adapter.dispatch('test_not_null', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__test_not_null"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.656121, "supported_languages": null}, "macro.dbt.test_accepted_values": {"unique_id": "macro.dbt.test_accepted_values", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "name": "test_accepted_values", "macro_sql": "{% test accepted_values(model, column_name, values, quote=True) %}\n {% set macro = adapter.dispatch('test_accepted_values', 'dbt') %}\n {{ macro(model, column_name, values, quote) }}\n{% endtest %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__test_accepted_values"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6568232, "supported_languages": null}, "macro.dbt.test_relationships": {"unique_id": "macro.dbt.test_relationships", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "name": "test_relationships", "macro_sql": "{% test relationships(model, column_name, to, field) %}\n {% set macro = adapter.dispatch('test_relationships', 'dbt') %}\n {{ macro(model, column_name, to, field) }}\n{% endtest %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__test_relationships"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.657504, "supported_languages": null}}, "docs": {"dbt.__overview__": {"unique_id": "dbt.__overview__", "package_name": "dbt", "path": "overview.md", "original_file_path": "docs/overview.md", "name": "__overview__", "block_contents": "### Welcome!\n\nWelcome to the auto-generated documentation for your dbt project!\n\n### Navigation\n\nYou can use the `Project` and `Database` navigation tabs on the left side of the window to explore the models\nin your project.\n\n#### Project Tab\nThe `Project` tab mirrors the directory structure of your dbt project. In this tab, you can see all of the\nmodels defined in your dbt project, as well as models imported from dbt packages.\n\n#### Database Tab\nThe `Database` tab also exposes your models, but in a format that looks more like a database explorer. This view\nshows relations (tables and views) grouped into database schemas. Note that ephemeral models are _not_ shown\nin this interface, as they do not exist in the database.\n\n### Graph Exploration\nYou can click the blue icon on the bottom-right corner of the page to view the lineage graph of your models.\n\nOn model pages, you'll see the immediate parents and children of the model you're exploring. By clicking the `Expand`\nbutton at the top-right of this lineage pane, you'll be able to see all of the models that are used to build,\nor are built from, the model you're exploring.\n\nOnce expanded, you'll be able to use the `--select` and `--exclude` model selection syntax to filter the\nmodels in the graph. For more information on model selection, check out the [dbt docs](https://docs.getdbt.com/docs/model-selection-syntax).\n\nNote that you can also right-click on models to interactively filter and explore the graph.\n\n---\n\n### More information\n\n- [What is dbt](https://docs.getdbt.com/docs/introduction)?\n- Read the [dbt viewpoint](https://docs.getdbt.com/docs/viewpoint)\n- [Installation](https://docs.getdbt.com/docs/installation)\n- Join the [dbt Community](https://www.getdbt.com/community/) for questions and discussion"}}, "exposures": {}, "metrics": {"metric.test.my_metric": {"fqn": ["test", "my_metric"], "unique_id": "metric.test.my_metric", "package_name": "test", "path": "metric.yml", "original_file_path": "models/metric.yml", "name": "my_metric", "description": "", "label": "Count records", "calculation_method": "count", "timestamp": "updated_at", "expression": "*", "filters": [], "time_grains": ["day"], "dimensions": [], "window": null, "model": "ref('my_model')", "model_unique_id": null, "resource_type": "metric", "meta": {}, "tags": [], "config": {"enabled": true}, "unrendered_config": {}, "sources": [], "depends_on": {"macros": [], "nodes": ["model.test.my_model"]}, "refs": [["my_model"]], "metrics": [], "created_at": 1667573259.027725}}, "selectors": {}, "disabled": {}, "parent_map": {"model.test.my_model": [], "metric.test.my_metric": ["model.test.my_model"]}, "child_map": {"model.test.my_model": ["metric.test.my_metric"], "metric.test.my_metric": []}} diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 32c9dcfbfa1..fabd960dd94 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -241,7 +241,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "build_path": None, "created_at": ANY, "name": "model", - "root_path": project.project_root, "relation_name": relation_name_node_format.format( model_database, my_schema_name, "model" ), @@ -321,7 +320,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "build_path": None, "created_at": ANY, "name": "second_model", - "root_path": project.project_root, "relation_name": relation_name_node_format.format( project.database, alternate_schema, "second_model" ), @@ -510,7 +508,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "refs": [["model"]], "relation_name": None, "resource_type": "test", - "root_path": project.project_root, "schema": test_audit_schema, "database": project.database, "tags": [], @@ -571,7 +568,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): project.database, alternate_schema, "snapshot_seed" ), "resource_type": "snapshot", - "root_path": project.project_root, "schema": alternate_schema, "sources": [], "tags": [], @@ -608,7 +604,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "refs": [["model"]], "relation_name": None, "resource_type": "test", - "root_path": project.project_root, "schema": test_audit_schema, "database": project.database, "tags": [], @@ -659,7 +654,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "refs": [["model"]], "relation_name": None, "resource_type": "test", - "root_path": project.project_root, "schema": test_audit_schema, "database": project.database, "tags": [], @@ -725,7 +719,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): project.database, my_schema_name, "seed" ), "resource_type": "source", - "root_path": project.project_root, "schema": my_schema_name, "source_description": "My source", "source_name": "my_source", @@ -759,7 +752,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "path": "schema.yml", "refs": [["model"], ["second_model"]], "resource_type": "exposure", - "root_path": project.project_root, "sources": [], "type": "notebook", "unique_id": "exposure.test.notebook_exposure", @@ -788,7 +780,6 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "path": "schema.yml", "refs": [["model"]], "resource_type": "exposure", - "root_path": project.project_root, "sources": [["my_source", "my_table"]], "type": "dashboard", "unique_id": "exposure.test.simple_exposure", @@ -892,7 +883,6 @@ def expected_references_manifest(project): "refs": [], "relation_name": None, "resource_type": "model", - "root_path": project.project_root, "schema": my_schema_name, "database": project.database, "tags": [], @@ -948,7 +938,6 @@ def expected_references_manifest(project): model_database, my_schema_name ), "resource_type": "model", - "root_path": project.project_root, "schema": my_schema_name, "database": project.database, "tags": [], @@ -1002,7 +991,6 @@ def expected_references_manifest(project): "refs": [["ephemeral_summary"]], "relation_name": '"{0}"."{1}".view_summary'.format(model_database, my_schema_name), "resource_type": "model", - "root_path": project.project_root, "schema": my_schema_name, "sources": [], "tags": [], @@ -1125,7 +1113,6 @@ def expected_references_manifest(project): model_database, alternate_schema ), "resource_type": "snapshot", - "root_path": project.project_root, "schema": alternate_schema, "sources": [], "tags": [], @@ -1176,7 +1163,6 @@ def expected_references_manifest(project): "patch_path": None, "relation_name": '{0}."{1}"."seed"'.format(project.database, my_schema_name), "resource_type": "source", - "root_path": project.project_root, "schema": my_schema_name, "source_description": "My source", "source_name": "my_source", @@ -1207,7 +1193,6 @@ def expected_references_manifest(project): "path": "schema.yml", "refs": [["view_summary"]], "resource_type": "exposure", - "root_path": project.project_root, "sources": [], "type": "notebook", "unique_id": "exposure.test.notebook_exposure", @@ -1225,7 +1210,6 @@ def expected_references_manifest(project): "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, "unique_id": "test.column_info", }, "test.ephemeral_summary": { @@ -1234,7 +1218,6 @@ def expected_references_manifest(project): "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, "unique_id": "test.ephemeral_summary", }, "test.source_info": { @@ -1243,7 +1226,6 @@ def expected_references_manifest(project): "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, "unique_id": "test.source_info", }, "test.summary_count": { @@ -1252,7 +1234,6 @@ def expected_references_manifest(project): "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, "unique_id": "test.summary_count", }, "test.summary_first_name": { @@ -1261,7 +1242,6 @@ def expected_references_manifest(project): "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, "unique_id": "test.summary_first_name", }, "test.table_info": { @@ -1270,7 +1250,6 @@ def expected_references_manifest(project): "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, "unique_id": "test.table_info", }, "test.view_summary": { @@ -1281,7 +1260,6 @@ def expected_references_manifest(project): "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, "unique_id": "test.view_summary", }, "test.macro_info": { @@ -1290,7 +1268,6 @@ def expected_references_manifest(project): "original_file_path": os.path.join("macros", "macro.md"), "package_name": "test", "path": "macro.md", - "root_path": project.project_root, "unique_id": "test.macro_info", }, "test.notebook_info": { @@ -1299,7 +1276,6 @@ def expected_references_manifest(project): "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "root_path": project.project_root, "unique_id": "test.notebook_info", }, "test.macro_arg_info": { @@ -1308,7 +1284,6 @@ def expected_references_manifest(project): "original_file_path": os.path.join("macros", "macro.md"), "package_name": "test", "path": "macro.md", - "root_path": project.project_root, "unique_id": "test.macro_arg_info", }, }, @@ -1349,7 +1324,6 @@ def expected_references_manifest(project): "resource_type": "macro", "unique_id": "macro.test.test_nothing", "tags": [], - "root_path": project.project_root, "supported_languages": None, "arguments": [ { diff --git a/tests/functional/artifacts/test_previous_version_state.py b/tests/functional/artifacts/test_previous_version_state.py index c835e5a001c..a7a7ed5417c 100644 --- a/tests/functional/artifacts/test_previous_version_state.py +++ b/tests/functional/artifacts/test_previous_version_state.py @@ -42,7 +42,7 @@ class TestPreviousVersionState: - CURRENT_EXPECTED_MANIFEST_VERSION = 7 + CURRENT_EXPECTED_MANIFEST_VERSION = 8 @pytest.fixture(scope="class") def models(self): From 930bd3541e19f160ed79f3cc6c26ea7085ced760 Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Mon, 7 Nov 2022 08:44:29 -0800 Subject: [PATCH 023/156] properly track hook running (#6059) --- .../unreleased/Fixes-20221107-095314.yaml | 7 +++++++ core/dbt/contracts/results.py | 4 +++- core/dbt/task/run.py | 21 ++++++++++++------- core/dbt/task/runnable.py | 17 +++++++-------- 4 files changed, 31 insertions(+), 18 deletions(-) create mode 100644 .changes/unreleased/Fixes-20221107-095314.yaml diff --git a/.changes/unreleased/Fixes-20221107-095314.yaml b/.changes/unreleased/Fixes-20221107-095314.yaml new file mode 100644 index 00000000000..f3763b7d039 --- /dev/null +++ b/.changes/unreleased/Fixes-20221107-095314.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Allow hooks to fail without halting execution flow +time: 2022-11-07T09:53:14.340257-06:00 +custom: + Author: ChenyuLInx + Issue: "5625" + PR: "6059" diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index a3b7ce2b506..91eb22a2f85 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -220,7 +220,9 @@ def from_execution_results( generated_at: datetime, args: Dict, ): - processed_results = [process_run_result(result) for result in results] + processed_results = [ + process_run_result(result) for result in results if isinstance(result, RunResult) + ] meta = RunResultsMetadata( dbt_schema_version=str(cls.dbt_schema_version), generated_at=generated_at, diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 93cb2c1569a..4f6bf037d6c 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -20,7 +20,7 @@ from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.model_config import Hook from dbt.contracts.graph.parsed import ParsedHookNode -from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus +from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus, BaseResult from dbt.exceptions import ( CompilationException, InternalException, @@ -394,12 +394,22 @@ def safe_run_hooks( ) -> None: try: self.run_hooks(adapter, hook_type, extra_context) - except RuntimeException: + except RuntimeException as exc: fire_event(DatabaseErrorRunningHook(hook_type=hook_type.value)) - raise + self.node_results.append( + BaseResult( + status=RunStatus.Error, + thread_id="main", + timing=[], + message=f"{hook_type.value} failed, error:\n {exc.msg}", + adapter_response=exc.msg, + execution_time=0, + failures=1, + ) + ) def print_results_line(self, results, execution_time): - nodes = [r.node for r in results] + self.ran_hooks + nodes = [r.node for r in results if hasattr(r, "node")] + self.ran_hooks stat_line = get_counts(nodes) execution = "" @@ -444,9 +454,6 @@ def after_run(self, adapter, results): with adapter.connection_named("master"): self.safe_run_hooks(adapter, RunHookType.End, extras) - def after_hooks(self, adapter, results, elapsed): - self.print_results_line(results, elapsed) - def get_node_selector(self) -> ResourceTypeSelector: if self.manifest is None or self.graph is None: raise InternalException("manifest and graph must be set to get perform node selection") diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index b4ee8152994..0e3e8328b11 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -413,9 +413,6 @@ def populate_adapter_cache(self, adapter, required_schemas: Set[BaseRelation] = {"adapter_cache_construction_elapsed": cache_populate_time} ) - def before_hooks(self, adapter): - pass - def before_run(self, adapter, selected_uids: AbstractSet[str]): with adapter.connection_named("master"): self.populate_adapter_cache(adapter) @@ -423,24 +420,24 @@ def before_run(self, adapter, selected_uids: AbstractSet[str]): def after_run(self, adapter, results): pass - def after_hooks(self, adapter, results, elapsed): + def print_results_line(self, node_results, elapsed): pass def execute_with_hooks(self, selected_uids: AbstractSet[str]): adapter = get_adapter(self.config) + started = time.time() try: - self.before_hooks(adapter) - started = time.time() self.before_run(adapter, selected_uids) res = self.execute_nodes() self.after_run(adapter, res) - elapsed = time.time() - started - self.after_hooks(adapter, res, elapsed) - finally: adapter.cleanup_connections() + elapsed = time.time() - started + self.print_results_line(self.node_results, elapsed) + result = self.get_result( + results=self.node_results, elapsed_time=elapsed, generated_at=datetime.utcnow() + ) - result = self.get_result(results=res, elapsed_time=elapsed, generated_at=datetime.utcnow()) return result def write_result(self, result): From 4e786184d220888e344e6be53255f09d4f361cd1 Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Tue, 8 Nov 2022 08:56:10 -0600 Subject: [PATCH 024/156] Convert threading tests to pytest (#6226) --- .../Under the Hood-20221108-074550.yaml | 7 +++ .../031_thread_count_tests/models/.gitkeep | 0 .../models/do_nothing_1.sql | 1 - .../models/do_nothing_10.sql | 1 - .../models/do_nothing_11.sql | 1 - .../models/do_nothing_12.sql | 1 - .../models/do_nothing_13.sql | 1 - .../models/do_nothing_14.sql | 1 - .../models/do_nothing_15.sql | 1 - .../models/do_nothing_16.sql | 1 - .../models/do_nothing_17.sql | 1 - .../models/do_nothing_18.sql | 1 - .../models/do_nothing_19.sql | 1 - .../models/do_nothing_2.sql | 1 - .../models/do_nothing_20.sql | 1 - .../models/do_nothing_3.sql | 1 - .../models/do_nothing_4.sql | 1 - .../models/do_nothing_5.sql | 1 - .../models/do_nothing_6.sql | 1 - .../models/do_nothing_7.sql | 1 - .../models/do_nothing_8.sql | 1 - .../models/do_nothing_9.sql | 1 - .../test_thread_count.py | 28 ----------- .../functional/threading/test_thread_count.py | 46 +++++++++++++++++++ 24 files changed, 53 insertions(+), 48 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221108-074550.yaml delete mode 100644 test/integration/031_thread_count_tests/models/.gitkeep delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_1.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_10.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_11.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_12.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_13.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_14.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_15.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_16.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_17.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_18.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_19.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_2.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_20.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_3.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_4.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_5.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_6.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_7.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_8.sql delete mode 100644 test/integration/031_thread_count_tests/models/do_nothing_9.sql delete mode 100644 test/integration/031_thread_count_tests/test_thread_count.py create mode 100644 tests/functional/threading/test_thread_count.py diff --git a/.changes/unreleased/Under the Hood-20221108-074550.yaml b/.changes/unreleased/Under the Hood-20221108-074550.yaml new file mode 100644 index 00000000000..351887f767a --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221108-074550.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Convert threading tests to pytest +time: 2022-11-08T07:45:50.589147-06:00 +custom: + Author: stu-k + Issue: "5942" + PR: "6226" diff --git a/test/integration/031_thread_count_tests/models/.gitkeep b/test/integration/031_thread_count_tests/models/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/integration/031_thread_count_tests/models/do_nothing_1.sql b/test/integration/031_thread_count_tests/models/do_nothing_1.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_1.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_10.sql b/test/integration/031_thread_count_tests/models/do_nothing_10.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_10.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_11.sql b/test/integration/031_thread_count_tests/models/do_nothing_11.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_11.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_12.sql b/test/integration/031_thread_count_tests/models/do_nothing_12.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_12.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_13.sql b/test/integration/031_thread_count_tests/models/do_nothing_13.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_13.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_14.sql b/test/integration/031_thread_count_tests/models/do_nothing_14.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_14.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_15.sql b/test/integration/031_thread_count_tests/models/do_nothing_15.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_15.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_16.sql b/test/integration/031_thread_count_tests/models/do_nothing_16.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_16.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_17.sql b/test/integration/031_thread_count_tests/models/do_nothing_17.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_17.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_18.sql b/test/integration/031_thread_count_tests/models/do_nothing_18.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_18.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_19.sql b/test/integration/031_thread_count_tests/models/do_nothing_19.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_19.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_2.sql b/test/integration/031_thread_count_tests/models/do_nothing_2.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_2.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_20.sql b/test/integration/031_thread_count_tests/models/do_nothing_20.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_20.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_3.sql b/test/integration/031_thread_count_tests/models/do_nothing_3.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_3.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_4.sql b/test/integration/031_thread_count_tests/models/do_nothing_4.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_4.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_5.sql b/test/integration/031_thread_count_tests/models/do_nothing_5.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_5.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_6.sql b/test/integration/031_thread_count_tests/models/do_nothing_6.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_6.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_7.sql b/test/integration/031_thread_count_tests/models/do_nothing_7.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_7.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_8.sql b/test/integration/031_thread_count_tests/models/do_nothing_8.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_8.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/models/do_nothing_9.sql b/test/integration/031_thread_count_tests/models/do_nothing_9.sql deleted file mode 100644 index 341741be33b..00000000000 --- a/test/integration/031_thread_count_tests/models/do_nothing_9.sql +++ /dev/null @@ -1 +0,0 @@ -with x as (select pg_sleep(1)) select 1 \ No newline at end of file diff --git a/test/integration/031_thread_count_tests/test_thread_count.py b/test/integration/031_thread_count_tests/test_thread_count.py deleted file mode 100644 index 042e2cd8a94..00000000000 --- a/test/integration/031_thread_count_tests/test_thread_count.py +++ /dev/null @@ -1,28 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestThreadCount(DBTIntegrationTest): - - @property - def project_config(self): - return {'config-version': 2} - - @property - def profile_config(self): - return { - 'threads': 2, - } - - @property - def schema(self): - return "thread_tests_031" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_threading_8x(self): - results = self.run_dbt(args=['run', '--threads', '16']) - self.assertTrue(len(results), 20) diff --git a/tests/functional/threading/test_thread_count.py b/tests/functional/threading/test_thread_count.py new file mode 100644 index 00000000000..c31f5ed6312 --- /dev/null +++ b/tests/functional/threading/test_thread_count.py @@ -0,0 +1,46 @@ +import pytest +from dbt.tests.util import run_dbt + + +models__do_nothing__sql = """ +with x as (select pg_sleep(1)) select 1 +""" + + +class TestThreadCount: + @pytest.fixture(scope="class") + def models(self): + return { + "do_nothing_1.sql": models__do_nothing__sql, + "do_nothing_2.sql": models__do_nothing__sql, + "do_nothing_3.sql": models__do_nothing__sql, + "do_nothing_4.sql": models__do_nothing__sql, + "do_nothing_5.sql": models__do_nothing__sql, + "do_nothing_6.sql": models__do_nothing__sql, + "do_nothing_7.sql": models__do_nothing__sql, + "do_nothing_8.sql": models__do_nothing__sql, + "do_nothing_9.sql": models__do_nothing__sql, + "do_nothing_10.sql": models__do_nothing__sql, + "do_nothing_11.sql": models__do_nothing__sql, + "do_nothing_12.sql": models__do_nothing__sql, + "do_nothing_13.sql": models__do_nothing__sql, + "do_nothing_14.sql": models__do_nothing__sql, + "do_nothing_15.sql": models__do_nothing__sql, + "do_nothing_16.sql": models__do_nothing__sql, + "do_nothing_17.sql": models__do_nothing__sql, + "do_nothing_18.sql": models__do_nothing__sql, + "do_nothing_19.sql": models__do_nothing__sql, + "do_nothing_20.sql": models__do_nothing__sql, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return {"config-version": 2} + + @pytest.fixture(scope="class") + def profiles_config_update(self): + return {"threads": 2} + + def test_threading_8x(self, project): + results = run_dbt(args=["run", "--threads", "16"]) + assert len(results), 20 From d5e9ce1797c70c0dffc3c1a5c5563c7263ea2a86 Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Tue, 8 Nov 2022 15:25:57 -0600 Subject: [PATCH 025/156] Convert color tests to pytest (#6230) --- .../Under the Hood-20221108-133104.yaml | 7 +++ .../models/do_nothing_then_fail.sql | 1 - .../test_no_use_colors.py | 29 ------------- .../061_use_colors_tests/test_use_colors.py | 29 ------------- tests/functional/colors/test_colors.py | 43 +++++++++++++++++++ 5 files changed, 50 insertions(+), 59 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221108-133104.yaml delete mode 100644 test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql delete mode 100644 test/integration/061_use_colors_tests/test_no_use_colors.py delete mode 100644 test/integration/061_use_colors_tests/test_use_colors.py create mode 100644 tests/functional/colors/test_colors.py diff --git a/.changes/unreleased/Under the Hood-20221108-133104.yaml b/.changes/unreleased/Under the Hood-20221108-133104.yaml new file mode 100644 index 00000000000..4aea5ee8cd9 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221108-133104.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Convert use color tests to pytest +time: 2022-11-08T13:31:04.788547-06:00 +custom: + Author: stu-k + Issue: "5771" + PR: "6230" diff --git a/test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql b/test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql deleted file mode 100644 index 30f1a53ec18..00000000000 --- a/test/integration/061_use_colors_tests/models/do_nothing_then_fail.sql +++ /dev/null @@ -1 +0,0 @@ -select 1, diff --git a/test/integration/061_use_colors_tests/test_no_use_colors.py b/test/integration/061_use_colors_tests/test_no_use_colors.py deleted file mode 100644 index a923c8d855e..00000000000 --- a/test/integration/061_use_colors_tests/test_no_use_colors.py +++ /dev/null @@ -1,29 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile -import logging -import re -import sys - -class TestNoUseColors(DBTIntegrationTest): - - @property - def project_config(self): - return {'config-version': 2} - - @property - def schema(self): - return "use_colors_tests_061" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_no_use_colors(self): - # pattern to match formatted log output - pattern = re.compile(r'\[31m.*|\[33m.*') - - results, stdout = self.run_dbt_and_capture(args=['--no-use-colors', 'run'], expect_pass=False) - - stdout_contains_formatting_characters = bool(pattern.search(stdout)) - self.assertFalse(stdout_contains_formatting_characters) diff --git a/test/integration/061_use_colors_tests/test_use_colors.py b/test/integration/061_use_colors_tests/test_use_colors.py deleted file mode 100644 index 6b3dac6a1f1..00000000000 --- a/test/integration/061_use_colors_tests/test_use_colors.py +++ /dev/null @@ -1,29 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile -import logging -import re -import sys - -class TestUseColors(DBTIntegrationTest): - - @property - def project_config(self): - return {'config-version': 2} - - @property - def schema(self): - return "use_colors_tests_061" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_use_colors(self): - # pattern to match formatted log output - pattern = re.compile(r'\[31m.*|\[33m.*') - - results, stdout = self.run_dbt_and_capture(args=['--use-colors', 'run'], expect_pass=False) - - stdout_contains_formatting_characters = bool(pattern.search(stdout)) - self.assertTrue(stdout_contains_formatting_characters) diff --git a/tests/functional/colors/test_colors.py b/tests/functional/colors/test_colors.py new file mode 100644 index 00000000000..7e92e039506 --- /dev/null +++ b/tests/functional/colors/test_colors.py @@ -0,0 +1,43 @@ +import pytest +import re +from dbt.tests.util import run_dbt_and_capture + + +models__do_nothing_then_fail_sql = """ +select 1, + +""" + + +@pytest.fixture(scope="class") +def models(): + return {"do_nothing_then_fail.sql": models__do_nothing_then_fail_sql} + + +@pytest.fixture(scope="class") +def project_config_update(): + return {'config-version': 2} + + +class TestColors: + def test_use_colors(self, project): + self.assert_colors_used( + "--use-colors", + expect_colors=True, + ) + + def test_no_use_colors(self, project): + self.assert_colors_used( + "--no-use-colors", + expect_colors=False, + ) + + def assert_colors_used(self, flag, expect_colors): + _, stdout = run_dbt_and_capture(args=[flag, "run"], expect_pass=False) + # pattern to match formatted log output + pattern = re.compile(r"\[31m.*|\[33m.*") + stdout_contains_formatting_characters = bool(pattern.search(stdout)) + if expect_colors: + assert stdout_contains_formatting_characters + else: + assert not stdout_contains_formatting_characters From f02243506df2a9b6851abe6bcfe10af8808d41cc Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Tue, 8 Nov 2022 15:30:29 -0600 Subject: [PATCH 026/156] Convert postgres index tests (#6228) --- .../Under the Hood-20221108-115633.yaml | 7 + .../models-invalid/invalid_columns_type.sql | 10 -- .../models-invalid/invalid_type.sql | 10 -- .../models-invalid/invalid_unique_config.sql | 10 -- .../models-invalid/missing_columns.sql | 10 -- .../models/incremental.sql | 18 --- .../065_postgres_index_tests/models/table.sql | 14 -- .../065_postgres_index_tests/seeds/seed.csv | 4 - .../snapshots/colors.sql | 29 ---- .../test_postgres_indexes.py | 134 ---------------- tests/functional/postgres/fixtures.py | 134 ++++++++++++++++ .../postgres/test_postgres_indexes.py | 149 ++++++++++++++++++ 12 files changed, 290 insertions(+), 239 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221108-115633.yaml delete mode 100644 test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql delete mode 100644 test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql delete mode 100644 test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql delete mode 100644 test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql delete mode 100644 test/integration/065_postgres_index_tests/models/incremental.sql delete mode 100644 test/integration/065_postgres_index_tests/models/table.sql delete mode 100644 test/integration/065_postgres_index_tests/seeds/seed.csv delete mode 100644 test/integration/065_postgres_index_tests/snapshots/colors.sql delete mode 100644 test/integration/065_postgres_index_tests/test_postgres_indexes.py create mode 100644 tests/functional/postgres/fixtures.py create mode 100644 tests/functional/postgres/test_postgres_indexes.py diff --git a/.changes/unreleased/Under the Hood-20221108-115633.yaml b/.changes/unreleased/Under the Hood-20221108-115633.yaml new file mode 100644 index 00000000000..2ba10536728 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221108-115633.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Convert postgres index tests to pytest +time: 2022-11-08T11:56:33.743042-06:00 +custom: + Author: stu-k + Issue: "5770" + PR: "6228" diff --git a/test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql b/test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql deleted file mode 100644 index 10f41526abd..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/invalid_columns_type.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': 'column_a, column_b'}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql b/test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql deleted file mode 100644 index 824ca36595f..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/invalid_type.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': ['column_a'], 'type': 'non_existent_type'}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql b/test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql deleted file mode 100644 index ca0113272ea..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/invalid_unique_config.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': ['column_a'], 'unique': 'yes'}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql b/test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql deleted file mode 100644 index 9b47943e6cf..00000000000 --- a/test/integration/065_postgres_index_tests/models-invalid/missing_columns.sql +++ /dev/null @@ -1,10 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'unique': True}, - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/models/incremental.sql b/test/integration/065_postgres_index_tests/models/incremental.sql deleted file mode 100644 index 7cd24bdcf8c..00000000000 --- a/test/integration/065_postgres_index_tests/models/incremental.sql +++ /dev/null @@ -1,18 +0,0 @@ -{{ - config( - materialized = "incremental", - indexes=[ - {'columns': ['column_a'], 'type': 'hash'}, - {'columns': ['column_a', 'column_b'], 'unique': True}, - ] - ) -}} - -select * -from ( - select 1 as column_a, 2 as column_b -) t - -{% if is_incremental() %} - where column_a > (select max(column_a) from {{this}}) -{% endif %} diff --git a/test/integration/065_postgres_index_tests/models/table.sql b/test/integration/065_postgres_index_tests/models/table.sql deleted file mode 100644 index 39fccc14b15..00000000000 --- a/test/integration/065_postgres_index_tests/models/table.sql +++ /dev/null @@ -1,14 +0,0 @@ -{{ - config( - materialized = "table", - indexes=[ - {'columns': ['column_a']}, - {'columns': ['column_b']}, - {'columns': ['column_a', 'column_b']}, - {'columns': ['column_b', 'column_a'], 'type': 'btree', 'unique': True}, - {'columns': ['column_a'], 'type': 'hash'} - ] - ) -}} - -select 1 as column_a, 2 as column_b diff --git a/test/integration/065_postgres_index_tests/seeds/seed.csv b/test/integration/065_postgres_index_tests/seeds/seed.csv deleted file mode 100644 index e744edef675..00000000000 --- a/test/integration/065_postgres_index_tests/seeds/seed.csv +++ /dev/null @@ -1,4 +0,0 @@ -country_code,country_name -US,United States -CA,Canada -GB,United Kingdom diff --git a/test/integration/065_postgres_index_tests/snapshots/colors.sql b/test/integration/065_postgres_index_tests/snapshots/colors.sql deleted file mode 100644 index f3a901d615f..00000000000 --- a/test/integration/065_postgres_index_tests/snapshots/colors.sql +++ /dev/null @@ -1,29 +0,0 @@ -{% snapshot colors %} - - {{ - config( - target_database=database, - target_schema=schema, - unique_key='id', - strategy='check', - check_cols=['color'], - indexes=[ - {'columns': ['id'], 'type': 'hash'}, - {'columns': ['id', 'color'], 'unique': True}, - ] - ) - }} - - {% if var('version') == 1 %} - - select 1 as id, 'red' as color union all - select 2 as id, 'green' as color - - {% else %} - - select 1 as id, 'blue' as color union all - select 2 as id, 'green' as color - - {% endif %} - -{% endsnapshot %} diff --git a/test/integration/065_postgres_index_tests/test_postgres_indexes.py b/test/integration/065_postgres_index_tests/test_postgres_indexes.py deleted file mode 100644 index 56dc557d5ac..00000000000 --- a/test/integration/065_postgres_index_tests/test_postgres_indexes.py +++ /dev/null @@ -1,134 +0,0 @@ -import re - -from test.integration.base import DBTIntegrationTest, use_profile - - -INDEX_DEFINITION_PATTERN = re.compile(r'using\s+(\w+)\s+\((.+)\)\Z') - -class TestPostgresIndex(DBTIntegrationTest): - @property - def schema(self): - return "postgres_index_065" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'quote_columns': False, - 'indexes': [ - {'columns': ['country_code'], 'unique': False, 'type': 'hash'}, - {'columns': ['country_code', 'country_name'], 'unique': True}, - ], - }, - 'vars': { - 'version': 1 - }, - } - - @use_profile('postgres') - def test__postgres__table(self): - results = self.run_dbt(['run', '--models', 'table']) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('table') - self.assertCountEqual( - indexes, - [ - {'columns': 'column_a', 'unique': False, 'type': 'btree'}, - {'columns': 'column_b', 'unique': False, 'type': 'btree'}, - {'columns': 'column_a, column_b', 'unique': False, 'type': 'btree'}, - {'columns': 'column_b, column_a', 'unique': True, 'type': 'btree'}, - {'columns': 'column_a', 'unique': False, 'type': 'hash'} - ] - ) - - @use_profile('postgres') - def test__postgres__incremental(self): - for additional_argument in [[], [], ['--full-refresh']]: - results = self.run_dbt(['run', '--models', 'incremental'] + additional_argument) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('incremental') - self.assertCountEqual( - indexes, - [ - {'columns': 'column_a', 'unique': False, 'type': 'hash'}, - {'columns': 'column_a, column_b', 'unique': True, 'type': 'btree'}, - ] - ) - - @use_profile('postgres') - def test__postgres__seed(self): - for additional_argument in [[], [], ['--full-refresh']]: - results = self.run_dbt(["seed"] + additional_argument) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('seed') - self.assertCountEqual( - indexes, - [ - {'columns': 'country_code', 'unique': False, 'type': 'hash'}, - {'columns': 'country_code, country_name', 'unique': True, 'type': 'btree'}, - ] - ) - - @use_profile('postgres') - def test__postgres__snapshot(self): - for version in [1, 2]: - results = self.run_dbt(["snapshot", '--vars', 'version: {}'.format(version)]) - self.assertEqual(len(results), 1) - - indexes = self.get_indexes('colors') - self.assertCountEqual( - indexes, - [ - {'columns': 'id', 'unique': False, 'type': 'hash'}, - {'columns': 'id, color', 'unique': True, 'type': 'btree'}, - ] - ) - - def get_indexes(self, table_name): - sql = """ - SELECT - pg_get_indexdef(idx.indexrelid) as index_definition - FROM pg_index idx - JOIN pg_class tab ON tab.oid = idx.indrelid - WHERE - tab.relname = '{table}' - AND tab.relnamespace = ( - SELECT oid FROM pg_namespace WHERE nspname = '{schema}' - ); - """ - - sql = sql.format(table=table_name, schema=self.unique_schema()) - results = self.run_sql(sql, fetch='all') - return [self.parse_index_definition(row[0]) for row in results] - - def parse_index_definition(self, index_definition): - index_definition = index_definition.lower() - is_unique = 'unique' in index_definition - m = INDEX_DEFINITION_PATTERN.search(index_definition) - return {'columns': m.group(2), 'unique': is_unique, 'type': m.group(1)} - -class TestPostgresInvalidIndex(DBTIntegrationTest): - @property - def schema(self): - return "postgres_index_065" - - @property - def models(self): - return "models-invalid" - - @use_profile('postgres') - def test__postgres__invalid_index_configs(self): - results, output = self.run_dbt_and_capture(expect_pass=False) - self.assertEqual(len(results), 4) - self.assertRegex(output, r'columns.*is not of type \'array\'') - self.assertRegex(output, r'unique.*is not of type \'boolean\'') - self.assertRegex(output, r'\'columns\' is a required property') - self.assertRegex(output, r'Database Error in model invalid_type') diff --git a/tests/functional/postgres/fixtures.py b/tests/functional/postgres/fixtures.py new file mode 100644 index 00000000000..93b26b4f31b --- /dev/null +++ b/tests/functional/postgres/fixtures.py @@ -0,0 +1,134 @@ +models__incremental_sql = """ +{{ + config( + materialized = "incremental", + indexes=[ + {'columns': ['column_a'], 'type': 'hash'}, + {'columns': ['column_a', 'column_b'], 'unique': True}, + ] + ) +}} + +select * +from ( + select 1 as column_a, 2 as column_b +) t + +{% if is_incremental() %} + where column_a > (select max(column_a) from {{this}}) +{% endif %} + +""" + +models__table_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': ['column_a']}, + {'columns': ['column_b']}, + {'columns': ['column_a', 'column_b']}, + {'columns': ['column_b', 'column_a'], 'type': 'btree', 'unique': True}, + {'columns': ['column_a'], 'type': 'hash'} + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__invalid_columns_type_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': 'column_a, column_b'}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__invalid_type_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': ['column_a'], 'type': 'non_existent_type'}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__invalid_unique_config_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'columns': ['column_a'], 'unique': 'yes'}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +models_invalid__missing_columns_sql = """ +{{ + config( + materialized = "table", + indexes=[ + {'unique': True}, + ] + ) +}} + +select 1 as column_a, 2 as column_b + +""" + +snapshots__colors_sql = """ +{% snapshot colors %} + + {{ + config( + target_database=database, + target_schema=schema, + unique_key='id', + strategy='check', + check_cols=['color'], + indexes=[ + {'columns': ['id'], 'type': 'hash'}, + {'columns': ['id', 'color'], 'unique': True}, + ] + ) + }} + + {% if var('version') == 1 %} + + select 1 as id, 'red' as color union all + select 2 as id, 'green' as color + + {% else %} + + select 1 as id, 'blue' as color union all + select 2 as id, 'green' as color + + {% endif %} + +{% endsnapshot %} + +""" + +seeds__seed_csv = """country_code,country_name +US,United States +CA,Canada +GB,United Kingdom +""" diff --git a/tests/functional/postgres/test_postgres_indexes.py b/tests/functional/postgres/test_postgres_indexes.py new file mode 100644 index 00000000000..64d61d2df87 --- /dev/null +++ b/tests/functional/postgres/test_postgres_indexes.py @@ -0,0 +1,149 @@ +import pytest +import re +from dbt.tests.util import ( + run_dbt, + run_dbt_and_capture, +) +from tests.functional.postgres.fixtures import ( + models__incremental_sql, + models__table_sql, + models_invalid__missing_columns_sql, + models_invalid__invalid_columns_type_sql, + models_invalid__invalid_type_sql, + models_invalid__invalid_unique_config_sql, + seeds__seed_csv, + snapshots__colors_sql, +) + + +INDEX_DEFINITION_PATTERN = re.compile(r"using\s+(\w+)\s+\((.+)\)\Z") + + +class TestPostgresIndex: + @pytest.fixture(scope="class") + def models(self): + return { + "table.sql": models__table_sql, + "incremental.sql": models__incremental_sql, + } + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": seeds__seed_csv} + + @pytest.fixture(scope="class") + def snapshots(self): + return {"colors.sql": snapshots__colors_sql} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "seeds": { + "quote_columns": False, + "indexes": [ + {"columns": ["country_code"], "unique": False, "type": "hash"}, + {"columns": ["country_code", "country_name"], "unique": True}, + ], + }, + "vars": { + "version": 1, + }, + } + + def test_table(self, project, unique_schema): + results = run_dbt(["run", "--models", "table"]) + assert len(results) == 1 + + indexes = self.get_indexes("table", project, unique_schema) + expected = [ + {"columns": "column_a", "unique": False, "type": "btree"}, + {"columns": "column_b", "unique": False, "type": "btree"}, + {"columns": "column_a, column_b", "unique": False, "type": "btree"}, + {"columns": "column_b, column_a", "unique": True, "type": "btree"}, + {"columns": "column_a", "unique": False, "type": "hash"}, + ] + assert len(indexes) == len(expected) + + def test_incremental(self, project, unique_schema): + for additional_argument in [[], [], ["--full-refresh"]]: + results = run_dbt(["run", "--models", "incremental"] + additional_argument) + assert len(results) == 1 + + indexes = self.get_indexes('incremental', project, unique_schema) + expected = [ + {"columns": "column_a", "unique": False, "type": "hash"}, + {"columns": "column_a, column_b", "unique": True, "type": "btree"}, + ] + assert len(indexes) == len(expected) + + def test_seed(self, project, unique_schema): + for additional_argument in [[], [], ['--full-refresh']]: + results = run_dbt(["seed"] + additional_argument) + assert len(results) == 1 + + indexes = self.get_indexes('seed', project, unique_schema) + expected = [ + {"columns": "country_code", "unique": False, "type": "hash"}, + {"columns": "country_code, country_name", "unique": True, "type": "btree"}, + ] + assert len(indexes) == len(expected) + + def test_snapshot(self, project, unique_schema): + for version in [1, 2]: + results = run_dbt(["snapshot", "--vars", f"version: {version}"]) + assert len(results) == 1 + + indexes = self.get_indexes('colors', project, unique_schema) + expected = [ + {"columns": "id", "unique": False, "type": "hash"}, + {"columns": "id, color", "unique": True, "type": "btree"}, + ] + assert len(indexes) == len(expected) + + def get_indexes(self, table_name, project, unique_schema): + sql = f""" + SELECT + pg_get_indexdef(idx.indexrelid) as index_definition + FROM pg_index idx + JOIN pg_class tab ON tab.oid = idx.indrelid + WHERE + tab.relname = '{table_name}' + AND tab.relnamespace = ( + SELECT oid FROM pg_namespace WHERE nspname = '{unique_schema}' + ); + """ + results = project.run_sql(sql, fetch="all") + return [self.parse_index_definition(row[0]) for row in results] + + def parse_index_definition(self, index_definition): + index_definition = index_definition.lower() + is_unique = "unique" in index_definition + m = INDEX_DEFINITION_PATTERN.search(index_definition) + return { + "columns": m.group(2), + "unique": is_unique, + "type": m.group(1), + } + + def assertCountEqual(self, a, b): + assert len(a) == len(b) + + +class TestPostgresInvalidIndex(): + @pytest.fixture(scope="class") + def models(self): + return { + "invalid_unique_config.sql": models_invalid__invalid_unique_config_sql, + "invalid_type.sql": models_invalid__invalid_type_sql, + "invalid_columns_type.sql": models_invalid__invalid_columns_type_sql, + "missing_columns.sql": models_invalid__missing_columns_sql, + } + + def test_invalid_index_configs(self, project): + results, output = run_dbt_and_capture(expect_pass=False) + assert len(results) == 4 + assert re.search(r"columns.*is not of type 'array'", output) + assert re.search(r"unique.*is not of type 'boolean'", output) + assert re.search(r"'columns' is a required property", output) + assert re.search(r"Database Error in model invalid_type", output) From 73116fb816498c4c45a01a2498199465202ec01b Mon Sep 17 00:00:00 2001 From: Joe Berni <99652623+josephberni@users.noreply.github.com> Date: Wed, 9 Nov 2022 16:58:01 +0000 Subject: [PATCH 027/156] feature/favor-state-node (#5859) --- .../unreleased/Features-20220408-165459.yaml | 8 ++ core/dbt/contracts/graph/manifest.py | 6 +- core/dbt/flags.py | 1 + core/dbt/main.py | 16 +++ .../062_defer_state_tests/test_defer_state.py | 124 ++++++++++++++++++ 5 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Features-20220408-165459.yaml diff --git a/.changes/unreleased/Features-20220408-165459.yaml b/.changes/unreleased/Features-20220408-165459.yaml new file mode 100644 index 00000000000..c29cfc617c7 --- /dev/null +++ b/.changes/unreleased/Features-20220408-165459.yaml @@ -0,0 +1,8 @@ +kind: Features +body: Added favor-state flag to optionally favor state nodes even if unselected node + exists +time: 2022-04-08T16:54:59.696564+01:00 +custom: + Author: daniel-murray josephberni + Issue: "2968" + PR: "5859" diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 8b4565fc7e9..73034ec80f2 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -1011,6 +1011,7 @@ def merge_from_artifact( adapter, other: "WritableManifest", selected: AbstractSet[UniqueID], + favor_state: bool = False, ) -> None: """Given the selected unique IDs and a writable manifest, update this manifest by replacing any unselected nodes with their counterpart. @@ -1025,7 +1026,10 @@ def merge_from_artifact( node.resource_type in refables and not node.is_ephemeral and unique_id not in selected - and not adapter.get_relation(current.database, current.schema, current.identifier) + and ( + not adapter.get_relation(current.database, current.schema, current.identifier) + or favor_state + ) ): merged.add(unique_id) self.nodes[unique_id] = node.replace(deferred=True) diff --git a/core/dbt/flags.py b/core/dbt/flags.py index 974aa50620c..367286ccb8c 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -113,6 +113,7 @@ def env_set_path(key: str) -> Optional[Path]: MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING") DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE") +FAVOR_STATE_MODE = env_set_truthy("DBT_FAVOR_STATE_STATE") ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH") ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER") diff --git a/core/dbt/main.py b/core/dbt/main.py index 88196fd98ea..153c120a6e0 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -501,6 +501,20 @@ def _add_defer_argument(*subparsers): ) +def _add_favor_state_argument(*subparsers): + for sub in subparsers: + sub.add_optional_argument_inverse( + "--favor-state", + enable_help=""" + If set, defer to the state variable for resolving unselected nodes, even if node exist as a database object in the current environment. + """, + disable_help=""" + If defer is set, expect standard defer behaviour. + """, + default=flags.FAVOR_STATE_MODE, + ) + + def _build_run_subparser(subparsers, base_subparser): run_sub = subparsers.add_parser( "run", @@ -1173,6 +1187,8 @@ def parse_args(args, cls=DBTArgumentParser): _add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub) # --defer _add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub, compile_sub) + # --favor-state + _add_favor_state_argument(run_sub, test_sub, build_sub, snapshot_sub) # --full-refresh _add_table_mutability_arguments(run_sub, compile_sub, build_sub) diff --git a/test/integration/062_defer_state_tests/test_defer_state.py b/test/integration/062_defer_state_tests/test_defer_state.py index 56004a1f28c..058e43ef05f 100644 --- a/test/integration/062_defer_state_tests/test_defer_state.py +++ b/test/integration/062_defer_state_tests/test_defer_state.py @@ -89,6 +89,9 @@ def run_and_snapshot_defer(self): # defer test, it succeeds results = self.run_dbt(['snapshot', '--state', 'state', '--defer']) + # favor_state test, it succeeds + results = self.run_dbt(['snapshot', '--state', 'state', '--defer', '--favor-state']) + def run_and_defer(self): results = self.run_dbt(['seed']) assert len(results) == 1 @@ -123,6 +126,40 @@ def run_and_defer(self): assert len(results) == 1 + def run_and_defer_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + assert not any(r.node.deferred for r in results) + results = self.run_dbt(['run']) + assert len(results) == 2 + assert not any(r.node.deferred for r in results) + results = self.run_dbt(['test']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + + # test tests first, because run will change things + # no state, wrong schema, failure. + self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False) + + # no state, run also fails + self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False) + + # defer test, it succeeds + results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) + + # with state it should work though + results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) + assert self.other_schema not in results[0].node.compiled_code + assert self.unique_schema() in results[0].node.compiled_code + + with open('target/manifest.json') as fp: + data = json.load(fp) + assert data['nodes']['seed.test.seed']['deferred'] + + assert len(results) == 1 + def run_switchdirs_defer(self): results = self.run_dbt(['seed']) assert len(results) == 1 @@ -152,6 +189,35 @@ def run_switchdirs_defer(self): expect_pass=False, ) + def run_switchdirs_defer_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + + self.use_default_project({'model-paths': ['changed_models']}) + # the sql here is just wrong, so it should fail + self.run_dbt( + ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=False, + ) + # but this should work since we just use the old happy model + self.run_dbt( + ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=True, + ) + + self.use_default_project({'model-paths': ['changed_models_bad']}) + # this should fail because the table model refs a broken ephemeral + # model, which it should see + self.run_dbt( + ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=False, + ) + def run_defer_iff_not_exists(self): results = self.run_dbt(['seed', '--target', 'otherschema']) assert len(results) == 1 @@ -169,6 +235,23 @@ def run_defer_iff_not_exists(self): assert self.other_schema not in results[0].node.compiled_code assert self.unique_schema() in results[0].node.compiled_code + def run_defer_iff_not_exists_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) + assert len(results) == 2 + + # because the seed exists in other schema, we should defer it + assert self.other_schema not in results[0].node.compiled_code + assert self.unique_schema() in results[0].node.compiled_code + def run_defer_deleted_upstream(self): results = self.run_dbt(['seed']) assert len(results) == 1 @@ -191,6 +274,27 @@ def run_defer_deleted_upstream(self): assert self.other_schema not in results[0].node.compiled_code assert self.unique_schema() in results[0].node.compiled_code + def run_defer_deleted_upstream_favor_state(self): + results = self.run_dbt(['seed']) + assert len(results) == 1 + results = self.run_dbt(['run']) + assert len(results) == 2 + + # copy files over from the happy times when we had a good target + self.copy_state() + + self.use_default_project({'model-paths': ['changed_models_missing']}) + + self.run_dbt( + ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], + expect_pass=True, + ) + + # despite deferral, test should use models just created in our schema + results = self.run_dbt(['test', '--state', 'state', '--defer', '--favor-state']) + assert self.other_schema not in results[0].node.compiled_code + assert self.unique_schema() in results[0].node.compiled_code + @use_profile('postgres') def test_postgres_state_changetarget(self): self.run_and_defer() @@ -199,18 +303,38 @@ def test_postgres_state_changetarget(self): with pytest.raises(SystemExit): self.run_dbt(['seed', '--defer']) + @use_profile('postgres') + def test_postgres_state_changetarget_favor_state(self): + self.run_and_defer_favor_state() + + # make sure these commands don't work with --defer + with pytest.raises(SystemExit): + self.run_dbt(['seed', '--defer']) + @use_profile('postgres') def test_postgres_state_changedir(self): self.run_switchdirs_defer() + @use_profile('postgres') + def test_postgres_state_changedir_favor_state(self): + self.run_switchdirs_defer_favor_state() + @use_profile('postgres') def test_postgres_state_defer_iffnotexists(self): self.run_defer_iff_not_exists() + @use_profile('postgres') + def test_postgres_state_defer_iffnotexists_favor_state(self): + self.run_defer_iff_not_exists_favor_state() + @use_profile('postgres') def test_postgres_state_defer_deleted_upstream(self): self.run_defer_deleted_upstream() + @use_profile('postgres') + def test_postgres_state_defer_deleted_upstream_favor_state(self): + self.run_defer_deleted_upstream_favor_state() + @use_profile('postgres') def test_postgres_state_snapshot_defer(self): self.run_and_snapshot_defer() From 9f280a84694fde5644cad1cc2263a0e8494f9b3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Nov 2022 09:57:33 -0500 Subject: [PATCH 028/156] Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core (#6144) * Update colorama requirement in /core Updates the requirements on [colorama](https://github.com/tartley/colorama) to permit the latest version. - [Release notes](https://github.com/tartley/colorama/releases) - [Changelog](https://github.com/tartley/colorama/blob/master/CHANGELOG.rst) - [Commits](https://github.com/tartley/colorama/compare/0.3.9...0.4.6) --- updated-dependencies: - dependency-name: colorama dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Add automated changelog yaml from template for bot PR Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Github Build Bot --- .changes/unreleased/Dependency-20221026-000910.yaml | 7 +++++++ core/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Dependency-20221026-000910.yaml diff --git a/.changes/unreleased/Dependency-20221026-000910.yaml b/.changes/unreleased/Dependency-20221026-000910.yaml new file mode 100644 index 00000000000..a5e5756e4cb --- /dev/null +++ b/.changes/unreleased/Dependency-20221026-000910.yaml @@ -0,0 +1,7 @@ +kind: "Dependency" +body: "Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core" +time: 2022-10-26T00:09:10.00000Z +custom: + Author: dependabot[bot] + Issue: 4904 + PR: 6144 diff --git a/core/setup.py b/core/setup.py index eaad87423c2..28aa67fddf3 100644 --- a/core/setup.py +++ b/core/setup.py @@ -50,7 +50,7 @@ "agate>=1.6,<1.6.4", "betterproto==1.2.5", "click>=7.0,<9", - "colorama>=0.3.9,<0.4.6", + "colorama>=0.3.9,<0.4.7", "hologram>=0.0.14,<=0.0.15", "isodate>=0.6,<0.7", "logbook>=1.5,<1.6", From 39c5c42215b4d6751e9644324040a1c716742224 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Mon, 14 Nov 2022 10:39:57 -0500 Subject: [PATCH 029/156] converting 044_test_run_operations (#6122) * converting 044_test_run_operations --- .../macros/sad_macros.sql | 7 -- .../044_run_operations_tests/models/model.sql | 1 - .../test_run_operations.py | 76 ------------- .../functional/run_operations/fixtures.py | 18 ++- .../run_operations/test_run_operations.py | 104 ++++++++++++++++++ 5 files changed, 121 insertions(+), 85 deletions(-) delete mode 100644 test/integration/044_run_operations_tests/macros/sad_macros.sql delete mode 100644 test/integration/044_run_operations_tests/models/model.sql delete mode 100644 test/integration/044_run_operations_tests/test_run_operations.py rename test/integration/044_run_operations_tests/macros/happy_macros.sql => tests/functional/run_operations/fixtures.py (82%) create mode 100644 tests/functional/run_operations/test_run_operations.py diff --git a/test/integration/044_run_operations_tests/macros/sad_macros.sql b/test/integration/044_run_operations_tests/macros/sad_macros.sql deleted file mode 100644 index 4f2c80bc40f..00000000000 --- a/test/integration/044_run_operations_tests/macros/sad_macros.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% macro syntax_error() %} - {% if execute %} - {% call statement() %} - select NOPE NOT A VALID QUERY - {% endcall %} - {% endif %} -{% endmacro %} diff --git a/test/integration/044_run_operations_tests/models/model.sql b/test/integration/044_run_operations_tests/models/model.sql deleted file mode 100644 index 43258a71464..00000000000 --- a/test/integration/044_run_operations_tests/models/model.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as id diff --git a/test/integration/044_run_operations_tests/test_run_operations.py b/test/integration/044_run_operations_tests/test_run_operations.py deleted file mode 100644 index d0308abe9b9..00000000000 --- a/test/integration/044_run_operations_tests/test_run_operations.py +++ /dev/null @@ -1,76 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import yaml - - -class TestOperations(DBTIntegrationTest): - @property - def schema(self): - return "run_operations_044" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "macro-paths": ['macros'], - } - - def run_operation(self, macro, expect_pass=True, extra_args=None, **kwargs): - args = ['run-operation', macro] - if kwargs: - args.extend(('--args', yaml.safe_dump(kwargs))) - if extra_args: - args.extend(extra_args) - return self.run_dbt(args, expect_pass=expect_pass) - - @use_profile('postgres') - def test__postgres_macro_noargs(self): - self.run_operation('no_args') - self.assertTableDoesExist('no_args') - - @use_profile('postgres') - def test__postgres_macro_args(self): - self.run_operation('table_name_args', table_name='my_fancy_table') - self.assertTableDoesExist('my_fancy_table') - - @use_profile('postgres') - def test__postgres_macro_exception(self): - self.run_operation('syntax_error', False) - - @use_profile('postgres') - def test__postgres_macro_missing(self): - self.run_operation('this_macro_does_not_exist', False) - - @use_profile('postgres') - def test__postgres_cannot_connect(self): - self.run_operation('no_args', - extra_args=['--target', 'noaccess'], - expect_pass=False) - - @use_profile('postgres') - def test__postgres_vacuum(self): - self.run_dbt(['run']) - # this should succeed - self.run_operation('vacuum', table_name='model') - - @use_profile('postgres') - def test__postgres_vacuum_ref(self): - self.run_dbt(['run']) - # this should succeed - self.run_operation('vacuum_ref', ref_target='model') - - @use_profile('postgres') - def test__postgres_select(self): - self.run_operation('select_something', name='world') - - @use_profile('postgres') - def test__postgres_access_graph(self): - self.run_operation('log_graph') - - @use_profile('postgres') - def test__postgres_print(self): - # Tests that calling the `print()` macro does not cause an exception - self.run_operation('print_something') diff --git a/test/integration/044_run_operations_tests/macros/happy_macros.sql b/tests/functional/run_operations/fixtures.py similarity index 82% rename from test/integration/044_run_operations_tests/macros/happy_macros.sql rename to tests/functional/run_operations/fixtures.py index c5c6df4dc8a..f6ed82e20ec 100644 --- a/test/integration/044_run_operations_tests/macros/happy_macros.sql +++ b/tests/functional/run_operations/fixtures.py @@ -1,3 +1,4 @@ +happy_macros_sql = """ {% macro no_args() %} {% if execute %} {% call statement(auto_begin=True) %} @@ -53,4 +54,19 @@ {% macro print_something() %} {{ print("You're doing awesome!") }} -{% endmacro %} \ No newline at end of file +{% endmacro %} +""" + +sad_macros_sql = """ +{% macro syntax_error() %} + {% if execute %} + {% call statement() %} + select NOPE NOT A VALID QUERY + {% endcall %} + {% endif %} +{% endmacro %} +""" + +model_sql = """ +select 1 as id +""" diff --git a/tests/functional/run_operations/test_run_operations.py b/tests/functional/run_operations/test_run_operations.py new file mode 100644 index 00000000000..f91ef2d8359 --- /dev/null +++ b/tests/functional/run_operations/test_run_operations.py @@ -0,0 +1,104 @@ +import os +import pytest +import yaml + +from dbt.tests.util import ( + check_table_does_exist, + run_dbt +) +from tests.functional.run_operations.fixtures import ( + happy_macros_sql, + sad_macros_sql, + model_sql +) + + +class TestOperations: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": model_sql} + + @pytest.fixture(scope="class") + def macros(self): + return { + "happy_macros.sql": happy_macros_sql, + "sad_macros.sql": sad_macros_sql + } + + @pytest.fixture(scope="class") + def dbt_profile_data(self, unique_schema): + return { + "config": {"send_anonymous_usage_stats": False}, + "test": { + "outputs": { + "default": { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": os.getenv("POSTGRES_TEST_USER", "root"), + "pass": os.getenv("POSTGRES_TEST_PASS", "password"), + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + "schema": unique_schema, + }, + "noaccess": { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": 'noaccess', + "pass": 'password', + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + 'schema': unique_schema + } + }, + "target": "default", + }, + } + + def run_operation(self, macro, expect_pass=True, extra_args=None, **kwargs): + args = ['run-operation', macro] + if kwargs: + args.extend(('--args', yaml.safe_dump(kwargs))) + if extra_args: + args.extend(extra_args) + return run_dbt(args, expect_pass=expect_pass) + + def test_macro_noargs(self, project): + self.run_operation('no_args') + check_table_does_exist(project.adapter, 'no_args') + + def test_macro_args(self, project): + self.run_operation('table_name_args', table_name='my_fancy_table') + check_table_does_exist(project.adapter, 'my_fancy_table') + + def test_macro_exception(self, project): + self.run_operation('syntax_error', False) + + def test_macro_missing(self, project): + self.run_operation('this_macro_does_not_exist', False) + + def test_cannot_connect(self, project): + self.run_operation('no_args', + extra_args=['--target', 'noaccess'], + expect_pass=False) + + def test_vacuum(self, project): + run_dbt(['run']) + # this should succeed + self.run_operation('vacuum', table_name='model') + + def test_vacuum_ref(self, project): + run_dbt(['run']) + # this should succeed + self.run_operation('vacuum_ref', ref_target='model') + + def test_select(self, project): + self.run_operation('select_something', name='world') + + def test_access_graph(self, project): + self.run_operation('log_graph') + + def test_print(self, project): + # Tests that calling the `print()` macro does not cause an exception + self.run_operation('print_something') From 66ac107409749ff1cf5dafeab371dd1baf916b9f Mon Sep 17 00:00:00 2001 From: Matthew McKnight <91097623+McKnight-42@users.noreply.github.com> Date: Mon, 14 Nov 2022 14:22:48 -0600 Subject: [PATCH 030/156] [CT-1262] Convert dbt_debug (#6125) * init pr for dbt_debug test conversion * removal of old test * minor test format change * add new Base class and Test classes * reformatting test, new method for capsys and error messgae to check, todo fix badproject * refomatting tests, ready for review * checking yaml file, and small reformat * modifying since update wasn't working in ci/cd --- .../049_dbt_debug_tests/models/model.sql | 1 - .../049_dbt_debug_tests/test_debug.py | 158 ------------------ .../tests/adapter/dbt_debug/test_dbt_debug.py | 107 ++++++++++++ 3 files changed, 107 insertions(+), 159 deletions(-) delete mode 100644 test/integration/049_dbt_debug_tests/models/model.sql delete mode 100644 test/integration/049_dbt_debug_tests/test_debug.py create mode 100644 tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py diff --git a/test/integration/049_dbt_debug_tests/models/model.sql b/test/integration/049_dbt_debug_tests/models/model.sql deleted file mode 100644 index 2c2d9c8de90..00000000000 --- a/test/integration/049_dbt_debug_tests/models/model.sql +++ /dev/null @@ -1 +0,0 @@ -seled 1 as id diff --git a/test/integration/049_dbt_debug_tests/test_debug.py b/test/integration/049_dbt_debug_tests/test_debug.py deleted file mode 100644 index 8a5fbd774f3..00000000000 --- a/test/integration/049_dbt_debug_tests/test_debug.py +++ /dev/null @@ -1,158 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os -import re -import yaml - -import pytest - - -class TestDebug(DBTIntegrationTest): - @property - def schema(self): - return 'dbt_debug_049' - - @staticmethod - def dir(value): - return os.path.normpath(value) - - @property - def models(self): - return self.dir('models') - - def postgres_profile(self): - profile = super(TestDebug, self).postgres_profile() - profile['test']['outputs'].update({ - 'nopass': { - 'type': 'postgres', - 'threads': 4, - 'host': self.database_host, - 'port': 5432, - 'user': 'root', - # 'pass': 'password', - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - 'wronguser': { - 'type': 'postgres', - 'threads': 4, - 'host': self.database_host, - 'port': 5432, - 'user': 'notmyuser', - 'pass': 'notmypassword', - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - 'none_target': None - }) - return profile - - @pytest.fixture(autouse=True) - def capsys(self, capsys): - self.capsys = capsys - - def assertGotValue(self, linepat, result): - found = False - output = self.capsys.readouterr().out - for line in output.split('\n'): - if linepat.match(line): - found = True - self.assertIn(result, line, 'result "{}" not found in "{}" line'.format(result, linepat)) - self.assertTrue(found, 'linepat {} not found in stdout: {}'.format(linepat, output)) - - @use_profile('postgres') - def test_postgres_ok(self): - self.run_dbt(['debug']) - self.assertNotIn('ERROR', self.capsys.readouterr().out) - - @use_profile('postgres') - def test_postgres_nopass(self): - self.run_dbt(['debug', '--target', 'nopass'], expect_pass=False) - self.assertGotValue(re.compile(r'\s+profiles\.yml file'), 'ERROR invalid') - - @use_profile('postgres') - def test_postgres_wronguser(self): - self.run_dbt(['debug', '--target', 'wronguser'], expect_pass=False) - self.assertGotValue(re.compile(r'\s+Connection test'), 'ERROR') - - @use_profile('postgres') - def test_postgres_empty_target(self): - self.run_dbt(['debug', '--target', 'none_target'], expect_pass=False) - self.assertGotValue(re.compile(r"\s+output 'none_target'"), 'misconfigured') - - -class TestDebugProfileVariable(TestDebug): - @property - def project_config(self): - return { - 'config-version': 2, - 'profile': '{{ "te" ~ "st" }}' - } - - -class TestDebugInvalidProject(DBTIntegrationTest): - @property - def schema(self): - return 'dbt_debug_049' - - @staticmethod - def dir(value): - return os.path.normpath(value) - - @property - def models(self): - return self.dir('models') - - @pytest.fixture(autouse=True) - def capsys(self, capsys): - self.capsys = capsys - - @use_profile('postgres') - def test_postgres_empty_project(self): - with open('dbt_project.yml', 'w') as f: - pass - self.run_dbt(['debug', '--profile', 'test'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR invalid', line) - elif line.strip().startswith('profiles.yml file'): - self.assertNotIn('ERROR invalid', line) - - @use_profile('postgres') - def test_postgres_badproject(self): - # load a special project that is an error - self.use_default_project(overrides={ - 'invalid-key': 'not a valid key so this is bad project', - }) - self.run_dbt(['debug', '--profile', 'test'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR invalid', line) - elif line.strip().startswith('profiles.yml file'): - self.assertNotIn('ERROR invalid', line) - - @use_profile('postgres') - def test_postgres_not_found_project_dir(self): - self.run_dbt(['debug', '--project-dir', 'nopass'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR not found', line) - elif line.strip().startswith('profiles.yml file'): - self.assertNotIn('ERROR invalid', line) - - @use_profile('postgres') - def test_postgres_invalid_project_outside_current_dir(self): - # create a dbt_project.yml - project_config = { - 'invalid-key': 'not a valid key in this project' - } - os.makedirs('custom', exist_ok=True) - with open("custom/dbt_project.yml", 'w') as f: - yaml.safe_dump(project_config, f, default_flow_style=True) - self.run_dbt(['debug', '--project-dir', 'custom'], expect_pass=False) - splitout = self.capsys.readouterr().out.split('\n') - for line in splitout: - if line.strip().startswith('dbt_project.yml file'): - self.assertIn('ERROR invalid', line) diff --git a/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py new file mode 100644 index 00000000000..b7b0ff9ac17 --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py @@ -0,0 +1,107 @@ +import pytest +import os +import re +import yaml +from dbt.tests.util import run_dbt + +MODELS__MODEL_SQL = """ +seled 1 as id +""" + + +class BaseDebug: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": MODELS__MODEL_SQL} + + @pytest.fixture(autouse=True) + def capsys(self, capsys): + self.capsys = capsys + + def assertGotValue(self, linepat, result): + found = False + output = self.capsys.readouterr().out + for line in output.split('\n'): + if linepat.match(line): + found = True + assert result in line + if not found: + with pytest.raises(Exception) as exc: + msg = f"linepat {linepat} not found in stdout: {output}" + assert msg in str(exc.value) + + def check_project(self, splitout, msg="ERROR invalid"): + for line in splitout: + if line.strip().startswith("dbt_project.yml file"): + assert msg in line + elif line.strip().startswith("profiles.yml file"): + assert "ERROR invalid" not in line + + +class BaseDebugProfileVariable(BaseDebug): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "profile": '{{ "te" ~ "st" }}' + } + + +class TestDebugPostgres(BaseDebug): + def test_ok(self, project): + run_dbt(["debug"]) + assert "ERROR" not in self.capsys.readouterr().out + + def test_nopass(self, project): + run_dbt(["debug", "--target", "nopass"], expect_pass=False) + self.assertGotValue(re.compile(r"\s+profiles\.yml file"), "ERROR invalid") + + def test_wronguser(self, project): + run_dbt(["debug", "--target", "wronguser"], expect_pass=False) + self.assertGotValue(re.compile(r"\s+Connection test"), "ERROR") + + def test_empty_target(self, project): + run_dbt(["debug", "--target", "none_target"], expect_pass=False) + self.assertGotValue(re.compile(r"\s+output 'none_target'"), "misconfigured") + + +class TestDebugProfileVariablePostgres(BaseDebugProfileVariable): + pass + + +class TestDebugInvalidProjectPostgres(BaseDebug): + + def test_empty_project(self, project): + with open("dbt_project.yml", "w") as f: # noqa: F841 + pass + + run_dbt(["debug", "--profile", "test"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout) + + def test_badproject(self, project): + update_project = {"invalid-key": "not a valid key so this is bad project"} + + with open("dbt_project.yml", "w") as f: + yaml.safe_dump(update_project, f) + + run_dbt(["debug", "--profile", "test"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout) + + def test_not_found_project(self, project): + run_dbt(["debug", "--project-dir", "nopass"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout, msg="ERROR not found") + + def test_invalid_project_outside_current_dir(self, project): + # create a dbt_project.yml + project_config = { + "invalid-key": "not a valid key in this project" + } + os.makedirs("custom", exist_ok=True) + with open("custom/dbt_project.yml", "w") as f: + yaml.safe_dump(project_config, f, default_flow_style=True) + run_dbt(["debug", "--project-dir", "custom"], expect_pass=False) + splitout = self.capsys.readouterr().out.split("\n") + self.check_project(splitout) From eae98677b9f06ef733fcf12419051b33e36d6811 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Tue, 15 Nov 2022 10:30:00 -0500 Subject: [PATCH 031/156] s/gitlab/github for flake8 precommit repo (#6252) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a0290fdf762..6877497ae37 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,7 +30,7 @@ repos: args: - "--check" - "--diff" -- repo: https://gitlab.com/pycqa/flake8 +- repo: https://github.com/pycqa/flake8 rev: 4.0.1 hooks: - id: flake8 From 9297e4d55cbcdb815d7eb74250f5475ea174675c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Nov 2022 22:02:37 -0500 Subject: [PATCH 032/156] Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core (#5917) * Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core Updates the requirements on [pathspec](https://github.com/cpburnz/python-pathspec) to permit the latest version. - [Release notes](https://github.com/cpburnz/python-pathspec/releases) - [Changelog](https://github.com/cpburnz/python-pathspec/blob/master/CHANGES.rst) - [Commits](https://github.com/cpburnz/python-pathspec/compare/v0.9.0...v0.10.1) --- updated-dependencies: - dependency-name: pathspec dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Add automated changelog yaml from template for bot PR Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Github Build Bot --- .changes/unreleased/Dependency-20220923-000646.yaml | 7 +++++++ core/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Dependency-20220923-000646.yaml diff --git a/.changes/unreleased/Dependency-20220923-000646.yaml b/.changes/unreleased/Dependency-20220923-000646.yaml new file mode 100644 index 00000000000..a8d3c0a64ad --- /dev/null +++ b/.changes/unreleased/Dependency-20220923-000646.yaml @@ -0,0 +1,7 @@ +kind: "Dependency" +body: "Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core" +time: 2022-09-23T00:06:46.00000Z +custom: + Author: dependabot[bot] + Issue: 4904 + PR: 5917 diff --git a/core/setup.py b/core/setup.py index 28aa67fddf3..61d712ed79a 100644 --- a/core/setup.py +++ b/core/setup.py @@ -63,7 +63,7 @@ "dbt-extractor~=0.4.1", "typing-extensions>=3.7.4", "werkzeug>=1,<3", - "pathspec~=0.9.0", + "pathspec>=0.9,<0.11", # the following are all to match snowflake-connector-python "requests<3.0.0", "idna>=2.5,<4", From a235abd176cb46977c6c86b85215a2547e287a13 Mon Sep 17 00:00:00 2001 From: FishtownBuildBot <77737458+FishtownBuildBot@users.noreply.github.com> Date: Wed, 16 Nov 2022 11:00:33 -0500 Subject: [PATCH 033/156] Add new index.html and changelog yaml files from dbt-docs (#6265) --- .changes/unreleased/Docs-20221116-155743.yaml | 7 +++++++ core/dbt/include/index.html | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Docs-20221116-155743.yaml diff --git a/.changes/unreleased/Docs-20221116-155743.yaml b/.changes/unreleased/Docs-20221116-155743.yaml new file mode 100644 index 00000000000..0b5ce05ee69 --- /dev/null +++ b/.changes/unreleased/Docs-20221116-155743.yaml @@ -0,0 +1,7 @@ +kind: Docs +body: Fix rendering of sample code for metrics +time: 2022-11-16T15:57:43.204201+01:00 +custom: + Author: jtcohen6 + Issue: "323" + PR: "346" diff --git a/core/dbt/include/index.html b/core/dbt/include/index.html index cd86f14ef7b..65749e446d0 100644 --- a/core/dbt/include/index.html +++ b/core/dbt/include/index.html @@ -90,7 +90,7 @@ https://github.com/jquery/jquery/blob/master/src/event.js */var r=function(e,t){this.recycle(e,t)};function i(){return!1}function o(){return!0}r.prototype={instanceString:function(){return"event"},recycle:function(e,t){if(this.isImmediatePropagationStopped=this.isPropagationStopped=this.isDefaultPrevented=i,null!=e&&e.preventDefault?(this.type=e.type,this.isDefaultPrevented=e.defaultPrevented?o:i):null!=e&&e.type?t=e:this.type=e,null!=t&&(this.originalEvent=t.originalEvent,this.type=null!=t.type?t.type:this.type,this.cy=t.cy,this.target=t.target,this.position=t.position,this.renderedPosition=t.renderedPosition,this.namespace=t.namespace,this.layout=t.layout),null!=this.cy&&null!=this.position&&null==this.renderedPosition){var n=this.position,r=this.cy.zoom(),a=this.cy.pan();this.renderedPosition={x:n.x*r+a.x,y:n.y*r+a.y}}this.timeStamp=e&&e.timeStamp||Date.now()},preventDefault:function(){this.isDefaultPrevented=o;var e=this.originalEvent;e&&e.preventDefault&&e.preventDefault()},stopPropagation:function(){this.isPropagationStopped=o;var e=this.originalEvent;e&&e.stopPropagation&&e.stopPropagation()},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=o,this.stopPropagation()},isDefaultPrevented:i,isPropagationStopped:i,isImmediatePropagationStopped:i},e.exports=r},function(e,t,n){"use strict";var r=n(1);e.exports=function(e,t){var n=e.cy().hasCompoundNodes();function i(e){var t=e.pstyle("z-compound-depth");return"auto"===t.value?n?e.zDepth():0:"bottom"===t.value?-1:"top"===t.value?r.MAX_INT:0}var o=i(e)-i(t);if(0!==o)return o;function a(e){return"auto"===e.pstyle("z-index-compare").value&&e.isNode()?1:0}var s=a(e)-a(t);if(0!==s)return s;var l=e.pstyle("z-index").value-t.pstyle("z-index").value;return 0!==l?l:e.poolIndex()-t.poolIndex()}},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(6),a=function e(t){if(!(this instanceof e))return new e(t);r.core(t)?(this._private={cy:t,coreStyle:{}},this.length=0,this.resetToDefault()):i.error("A style must have a core reference")},s=a.prototype;s.instanceString=function(){return"style"},s.clear=function(){for(var e=0;e=e.deqFastCost*m)break}else if(o){if(h>=e.deqCost*c||h>=e.deqAvgCost*l)break}else if(g>=e.deqNoDrawCost*(1e3/60))break;var v=e.deq(t,f,d);if(!(v.length>0))break;for(var b=0;b0&&(e.onDeqd(t,u),!o&&e.shouldRedraw(t,u,f,d)&&i())}),o(t))}}}}},function(e,t,n){"use strict";var r=n(0),i=n(12),o=n(94),a=n(136),s=function(e){return void 0===e&&(e={}),r.plainObject(e)?new i(e):r.string(e)?o.apply(o,arguments):void 0};s.use=function(e){var t=Array.prototype.slice.call(arguments,1);return t.unshift(s),e.apply(null,t),this},s.version=n(137),s.stylesheet=s.Stylesheet=a,e.exports=s},function(e,t,n){"use strict";var r=n(0);e.exports={hex2tuple:function(e){if((4===e.length||7===e.length)&&"#"===e[0]){var t=void 0,n=void 0,r=void 0;return 4===e.length?(t=parseInt(e[1]+e[1],16),n=parseInt(e[2]+e[2],16),r=parseInt(e[3]+e[3],16)):(t=parseInt(e[1]+e[2],16),n=parseInt(e[3]+e[4],16),r=parseInt(e[5]+e[6],16)),[t,n,r]}},hsl2tuple:function(e){var t=void 0,n=void 0,r=void 0,i=void 0,o=void 0,a=void 0,s=void 0,l=void 0;function c(e,t,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?e+6*(t-e)*n:n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}var u=new RegExp("^"+this.regex.hsla+"$").exec(e);if(u){if((n=parseInt(u[1]))<0?n=(360- -1*n%360)%360:n>360&&(n%=360),n/=360,(r=parseFloat(u[2]))<0||r>100)return;if(r/=100,(i=parseFloat(u[3]))<0||i>100)return;if(i/=100,void 0!==(o=u[4])&&((o=parseFloat(o))<0||o>1))return;if(0===r)a=s=l=Math.round(255*i);else{var d=i<.5?i*(1+r):i+r-i*r,f=2*i-d;a=Math.round(255*c(f,d,n+1/3)),s=Math.round(255*c(f,d,n)),l=Math.round(255*c(f,d,n-1/3))}t=[a,s,l,o]}return t},rgb2tuple:function(e){var t=void 0,n=new RegExp("^"+this.regex.rgba+"$").exec(e);if(n){t=[];for(var r=[],i=1;i<=3;i++){var o=n[i];if("%"===o[o.length-1]&&(r[i]=!0),o=parseFloat(o),r[i]&&(o=o/100*255),o<0||o>255)return;t.push(Math.floor(o))}var a=r[1]||r[2]||r[3],s=r[1]&&r[2]&&r[3];if(a&&!s)return;var l=n[4];if(void 0!==l){if((l=parseFloat(l))<0||l>1)return;t.push(l)}}return t},colorname2tuple:function(e){return this.colors[e.toLowerCase()]},color2tuple:function(e){return(r.array(e)?e:null)||this.colorname2tuple(e)||this.hex2tuple(e)||this.rgb2tuple(e)||this.hsl2tuple(e)},colors:{transparent:[0,0,0,0],aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],grey:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]}}},function(e,t,n){"use strict";var r=n(0);e.exports={mapEmpty:function(e){return null==e||0===Object.keys(e).length},pushMap:function(e){var t=this.getMap(e);null==t?this.setMap(this.extend({},e,{value:[e.value]})):t.push(e.value)},setMap:function(e){for(var t=e.map,n=e.keys,i=n.length,o=0;ot?1:0}e.exports={sort:{ascending:r,descending:function(e,t){return-1*r(e,t)}}}},function(e,t,n){"use strict";function r(){this._obj={}}var i=r.prototype;i.set=function(e,t){this._obj[e]=t},i.delete=function(e){this._obj[e]=null},i.has=function(e){return null!=this._obj[e]},i.get=function(e){return this._obj[e]},e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};[n(30),n(31),n(33),n(34),n(35),n(36),n(37),n(38),n(39),n(40),n(41)].forEach((function(e){r.extend(i,e)})),e.exports=i},function(e,t,n){"use strict";var r=n(0),i=function(e){return e={bfs:e.bfs||!e.dfs,dfs:e.dfs||!e.bfs},function(t,n,i){var o;r.plainObject(t)&&!r.elementOrCollection(t)&&(t=(o=t).roots||o.root,n=o.visit,i=o.directed),i=2!==arguments.length||r.fn(n)?i:n,n=r.fn(n)?n:function(){};for(var a,s=this._private.cy,l=t=r.string(t)?this.filter(t):t,c=[],u=[],d={},f={},p={},h=0,g=this.nodes(),m=this.edges(),v=0;v0;){var y=g.pop(),x=p(y),w=y.id();if(u[w]=x,x!==1/0){var k=y.neighborhood().intersect(f);for(m=0;m0)for(n.unshift(t);c[i.id()];){var o=c[i.id()];n.unshift(o.edge),n.unshift(o.node),i=o.node}return a.collection(n)}}}};e.exports=o},function(e,t){e.exports=n},function(e,t,n){"use strict";var r=n(0),i={kruskal:function(e){var t=this.cy();function n(e){for(var t=0;t0;){var y=n(p,v),x=i.getElementById(p[y]),w=x.id();if(b++,w==d){var k=t(u,d,h,[]);return{found:!0,distance:m[w],path:this.spawn(k),steps:b}}f.push(w),p.splice(y,1);for(var A=x._private.edges,E=0;Eb&&(u[m][v]=b,p[m][v]=v,h[m][v]=o[c])}if(!i)for(c=0;cb&&(u[m][v]=b,p[m][v]=v,h[m][v]=o[c]);for(var y=0;yu&&(u=t)},f=function(e){return c[e]},p=0;p0?S.edgesTo(E)[0]:E.edgesTo(S)[0]);E=E.id(),y[E]>y[k]+$&&(y[E]=y[k]+$,x.nodes.indexOf(E)<0?x.push(E):x.updateItem(E),b[E]=0,v[E]=[]),y[E]==y[k]+$&&(b[E]=b[E]+b[k],v[E].push(k))}else for(A=0;A0;)for(E=m.pop(),A=0;A0:void 0}},clearQueue:function(){return function(){var e=void 0!==this.length?this:[this];if(!(this._private.cy||this).styleEnabled())return this;for(var t=0;t0&&this.spawn(n).updateStyle().emit("class"),t},addClass:function(e){return this.toggleClass(e,!0)},hasClass:function(e){var t=this[0];return null!=t&&t._private.classes.has(e)},toggleClass:function(e,t){for(var n=e.match(/\S+/g)||[],r=[],i=0,o=this.length;i0&&this.spawn(r).updateStyle().emit("class"),this},removeClass:function(e){return this.toggleClass(e,!1)},flashClass:function(e,t){var n=this;if(null==t)t=250;else if(0===t)return n;return n.addClass(e),setTimeout((function(){n.removeClass(e)}),t),n}};e.exports=i},function(e,t,n){"use strict";n(0);var r=n(6),i={allAre:function(e){var t=new r(e);return this.every((function(e){return t.matches(e)}))},is:function(e){var t=new r(e);return this.some((function(e){return t.matches(e)}))},some:function(e,t){for(var n=0;n\\?\\@\\[\\]\\^\\`\\{\\|\\}\\~]",comparatorOp:"=|\\!=|>|>=|<|<=|\\$=|\\^=|\\*=",boolOp:"\\?|\\!|\\^",string:"\"(?:\\\\\"|[^\"])*\"|'(?:\\\\'|[^'])*'",number:n(1).regex.number,meta:"degree|indegree|outdegree",separator:"\\s*,\\s*",descendant:"\\s+",child:"\\s+>\\s+",subject:"\\$",group:"node|edge|\\*",directedEdge:"\\s+->\\s+",undirectedEdge:"\\s+<->\\s+"};r.variable="(?:[\\w-]|(?:\\\\"+r.metaChar+"))+",r.value=r.string+"|"+r.number,r.className=r.variable,r.id=r.variable,function(){var e=void 0,t=void 0,n=void 0;for(e=r.comparatorOp.split("|"),n=0;n=0||"="!==t&&(r.comparatorOp+="|\\!"+t)}(),e.exports=r},function(e,t,n){"use strict";var r=n(15).stateSelectorMatches,i=n(0),o=function(e,t){for(var n=!0,r=0;r=0&&(d=d.toLowerCase(),f=f.toLowerCase(),a=a.replace("@",""),p=!0);var h=!1;a.indexOf("!")>=0&&(a=a.replace("!",""),h=!0),p&&(s=f.toLowerCase(),u=d.toLowerCase());var g=!1;switch(a){case"*=":c=d.indexOf(f)>=0;break;case"$=":c=d.indexOf(f,d.length-f.length)>=0;break;case"^=":c=0===d.indexOf(f);break;case"=":c=u===s;break;case">":g=!0,c=u>s;break;case">=":g=!0,c=u>=s;break;case"<":g=!0,c=u0;){var u=o.shift();t(u),a.add(u.id()),s&&i(o,a,u)}return e}function a(e,t,n){if(n.isParent())for(var r=n._private.children,i=0;i1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,a)},i.forEachUp=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,s)},i.forEachUpAndDown=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return o(this,e,t,l)},i.ancestors=i.parents,e.exports=i},function(e,t,n){"use strict";var r,i=n(4),o=void 0;(o=r={data:i.data({field:"data",bindingEvent:"data",allowBinding:!0,allowSetting:!0,settingEvent:"data",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),removeData:i.removeData({field:"data",event:"data",triggerFnName:"trigger",triggerEvent:!0,immutableKeys:{id:!0,source:!0,target:!0,parent:!0},updateStyle:!0}),scratch:i.data({field:"scratch",bindingEvent:"scratch",allowBinding:!0,allowSetting:!0,settingEvent:"scratch",settingTriggersEvent:!0,triggerFnName:"trigger",allowGetting:!0,updateStyle:!0}),removeScratch:i.removeData({field:"scratch",event:"scratch",triggerFnName:"trigger",triggerEvent:!0,updateStyle:!0}),rscratch:i.data({field:"rscratch",allowBinding:!1,allowSetting:!0,settingTriggersEvent:!1,allowGetting:!0}),removeRscratch:i.removeData({field:"rscratch",triggerEvent:!1}),id:function(){var e=this[0];if(e)return e._private.data.id}}).attr=o.data,o.removeAttr=o.removeData,e.exports=r},function(e,t,n){"use strict";var r=n(1),i={};function o(e){return function(t){if(void 0===t&&(t=!0),0!==this.length&&this.isNode()&&!this.removed()){for(var n=0,r=this[0],i=r._private.edges,o=0;ot})),minIndegree:a("indegree",(function(e,t){return et})),minOutdegree:a("outdegree",(function(e,t){return et}))}),r.extend(i,{totalDegree:function(e){for(var t=0,n=this.nodes(),r=0;r0,d=u;u&&(c=c[0]);var f=d?c.position():{x:0,y:0};return i={x:l.x-f.x,y:l.y-f.y},void 0===e?i:i[e]}for(var p=0;p0,v=m;m&&(g=g[0]);var b=v?g.position():{x:0,y:0};void 0!==t?h.position(e,t+b[e]):void 0!==i&&h.position({x:i.x+b.x,y:i.y+b.y})}}else if(!a)return;return this}}).modelPosition=s.point=s.position,s.modelPositions=s.points=s.positions,s.renderedPoint=s.renderedPosition,s.relativePoint=s.relativePosition,e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(2),a=void 0,s=void 0;a=s={},s.renderedBoundingBox=function(e){var t=this.boundingBox(e),n=this.cy(),r=n.zoom(),i=n.pan(),o=t.x1*r+i.x,a=t.x2*r+i.x,s=t.y1*r+i.y,l=t.y2*r+i.y;return{x1:o,x2:a,y1:s,y2:l,w:a-o,h:l-s}},s.dirtyCompoundBoundsCache=function(){var e=this.cy();return e.styleEnabled()&&e.hasCompoundNodes()?(this.forEachUp((function(e){e._private.compoundBoundsClean=!1,e.isParent()&&e.emit("bounds")})),this):this},s.updateCompoundBounds=function(){var e=this.cy();if(!e.styleEnabled()||!e.hasCompoundNodes())return this;if(e.batching())return this;var t=[];function n(e){if(e.isParent()){var n=e._private,r=e.children(),i="include"===e.pstyle("compound-sizing-wrt-labels").value,o={width:{val:e.pstyle("min-width").pfValue,left:e.pstyle("min-width-bias-left"),right:e.pstyle("min-width-bias-right")},height:{val:e.pstyle("min-height").pfValue,top:e.pstyle("min-height-bias-top"),bottom:e.pstyle("min-height-bias-bottom")}},a=r.boundingBox({includeLabels:i,includeOverlays:!1,useCache:!1}),s=n.position;0!==a.w&&0!==a.h||((a={w:e.pstyle("width").pfValue,h:e.pstyle("height").pfValue}).x1=s.x-a.w/2,a.x2=s.x+a.w/2,a.y1=s.y-a.h/2,a.y2=s.y+a.h/2);var l=o.width.left.value;"px"===o.width.left.units&&o.width.val>0&&(l=100*l/o.width.val);var c=o.width.right.value;"px"===o.width.right.units&&o.width.val>0&&(c=100*c/o.width.val);var u=o.height.top.value;"px"===o.height.top.units&&o.height.val>0&&(u=100*u/o.height.val);var d=o.height.bottom.value;"px"===o.height.bottom.units&&o.height.val>0&&(d=100*d/o.height.val);var f=b(o.width.val-a.w,l,c),p=f.biasDiff,h=f.biasComplementDiff,g=b(o.height.val-a.h,u,d),m=g.biasDiff,v=g.biasComplementDiff;n.autoPadding=function(e,t,n,r){if("%"!==n.units)return"px"===n.units?n.pfValue:0;switch(r){case"width":return e>0?n.pfValue*e:0;case"height":return t>0?n.pfValue*t:0;case"average":return e>0&&t>0?n.pfValue*(e+t)/2:0;case"min":return e>0&&t>0?e>t?n.pfValue*t:n.pfValue*e:0;case"max":return e>0&&t>0?e>t?n.pfValue*e:n.pfValue*t:0;default:return 0}}(a.w,a.h,e.pstyle("padding"),e.pstyle("padding-relative-to").value),n.autoWidth=Math.max(a.w,o.width.val),s.x=(-p+a.x1+a.x2+h)/2,n.autoHeight=Math.max(a.h,o.height.val),s.y=(-m+a.y1+a.y2+v)/2,t.push(e)}function b(e,t,n){var r=0,i=0,o=t+n;return e>0&&o>0&&(r=t/o*e,i=n/o*e),{biasDiff:r,biasComplementDiff:i}}}for(var r=0;re.x2?r:e.x2,e.y1=ne.y2?i:e.y2)},u=function(e,t,n){return i.getPrefixedProperty(e,t,n)},d=function(e,t,n){if(!t.cy().headless()){var r=t._private.rstyle,i=r.arrowWidth/2,o=void 0,a=void 0;"none"!==t.pstyle(n+"-arrow-shape").value&&("source"===n?(o=r.srcX,a=r.srcY):"target"===n?(o=r.tgtX,a=r.tgtY):(o=r.midX,a=r.midY),c(e,o-i,a-i,o+i,a+i))}},f=function(e,t,n){if(!t.cy().headless()){var r=void 0;r=n?n+"-":"";var i=t._private,o=i.rstyle;if(t.pstyle(r+"label").strValue){var a=t.pstyle("text-halign"),s=t.pstyle("text-valign"),l=u(o,"labelWidth",n),d=u(o,"labelHeight",n),f=u(o,"labelX",n),p=u(o,"labelY",n),h=t.pstyle(r+"text-margin-x").pfValue,g=t.pstyle(r+"text-margin-y").pfValue,m=t.isEdge(),v=t.pstyle(r+"text-rotation"),b=t.pstyle("text-outline-width").pfValue,y=t.pstyle("text-border-width").pfValue/2,x=t.pstyle("text-background-padding").pfValue,w=d+2*x,k=l+2*x,A=k/2,E=w/2,S=void 0,$=void 0,C=void 0,_=void 0;if(m)S=f-A,$=f+A,C=p-E,_=p+E;else{switch(a.value){case"left":S=f-k,$=f;break;case"center":S=f-A,$=f+A;break;case"right":S=f,$=f+k}switch(s.value){case"top":C=p-w,_=p;break;case"center":C=p-E,_=p+E;break;case"bottom":C=p,_=p+w}}var O=m&&"autorotate"===v.strValue,j=null!=v.pfValue&&0!==v.pfValue;if(O||j){var T=O?u(i.rstyle,"labelAngle",n):v.pfValue,P=Math.cos(T),D=Math.sin(T),R=function(e,t){return{x:(e-=f)*P-(t-=p)*D+f,y:e*D+t*P+p}},I=R(S,C),N=R(S,_),M=R($,C),z=R($,_);S=Math.min(I.x,N.x,M.x,z.x),$=Math.max(I.x,N.x,M.x,z.x),C=Math.min(I.y,N.y,M.y,z.y),_=Math.max(I.y,N.y,M.y,z.y)}S+=h-Math.max(b,y),$+=h+Math.max(b,y),C+=g-Math.max(b,y),_+=g+Math.max(b,y),c(e,S,C,$,_)}return e}},p=function(e){return e?"t":"f"},h=function(e){var t="";return t+=p(e.incudeNodes),t+=p(e.includeEdges),t+=p(e.includeLabels),t+=p(e.includeOverlays)},g=function(e,t){var n=e._private,r=void 0,i=e.cy().headless(),a=t===m?v:h(t);return t.useCache&&!i&&n.bbCache&&n.bbCache[a]?r=n.bbCache[a]:(r=function(e,t){var n=e._private.cy,r=n.styleEnabled(),i=n.headless(),a={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},s=e._private,u=r?e.pstyle("display").value:"element",p=e.isNode(),h=e.isEdge(),g=void 0,m=void 0,v=void 0,b=void 0,y=void 0,x=void 0,w="none"!==u;if(w){var k=0;r&&t.includeOverlays&&0!==e.pstyle("overlay-opacity").value&&(k=e.pstyle("overlay-padding").value);var A=0;if(r&&(A=e.pstyle("width").pfValue/2),p&&t.includeNodes){var E=e.position();y=E.x,x=E.y;var S=e.outerWidth()/2,$=e.outerHeight()/2;c(a,g=y-S-k,v=x-$-k,m=y+S+k,b=x+$+k)}else if(h&&t.includeEdges){var C=s.rstyle||{};if(r&&!i&&(g=Math.min(C.srcX,C.midX,C.tgtX),m=Math.max(C.srcX,C.midX,C.tgtX),v=Math.min(C.srcY,C.midY,C.tgtY),b=Math.max(C.srcY,C.midY,C.tgtY),c(a,g-=A,v-=A,m+=A,b+=A)),r&&!i&&"haystack"===e.pstyle("curve-style").strValue){var _=C.haystackPts||[];if(g=_[0].x,v=_[0].y,g>(m=_[1].x)){var O=g;g=m,m=O}if(v>(b=_[1].y)){var j=v;v=b,b=j}c(a,g-A,v-A,m+A,b+A)}else{for(var T=C.bezierPts||C.linePts||[],P=0;P(m=I.x)){var N=g;g=m,m=N}if((v=R.y)>(b=I.y)){var M=v;v=b,b=M}c(a,g-=A,v-=A,m+=A,b+=A)}}}if(r&&t.includeEdges&&h&&(d(a,e,"mid-source"),d(a,e,"mid-target"),d(a,e,"source"),d(a,e,"target")),r&&"yes"===e.pstyle("ghost").value){var z=e.pstyle("ghost-offset-x").pfValue,L=e.pstyle("ghost-offset-y").pfValue;c(a,a.x1+z,a.y1+L,a.x2+z,a.y2+L)}r&&(g=a.x1,m=a.x2,v=a.y1,b=a.y2,c(a,g-k,v-k,m+k,b+k)),r&&t.includeLabels&&(f(a,e,null),h&&(f(a,e,"source"),f(a,e,"target")))}return a.x1=l(a.x1),a.y1=l(a.y1),a.x2=l(a.x2),a.y2=l(a.y2),a.w=l(a.x2-a.x1),a.h=l(a.y2-a.y1),a.w>0&&a.h>0&&w&&o.expandBoundingBox(a,1),a}(e,t),i||(n.bbCache=n.bbCache||{},n.bbCache[a]=r)),r},m={includeNodes:!0,includeEdges:!0,includeLabels:!0,includeOverlays:!0,useCache:!0},v=h(m);function b(e){return{includeNodes:i.default(e.includeNodes,m.includeNodes),includeEdges:i.default(e.includeEdges,m.includeEdges),includeLabels:i.default(e.includeLabels,m.includeLabels),includeOverlays:i.default(e.includeOverlays,m.includeOverlays),useCache:i.default(e.useCache,m.useCache)}}s.boundingBox=function(e){if(1===this.length&&this[0]._private.bbCache&&(void 0===e||void 0===e.useCache||!0===e.useCache))return e=void 0===e?m:b(e),g(this[0],e);var t={x1:1/0,y1:1/0,x2:-1/0,y2:-1/0},n=b(e=e||i.staticEmptyObject()),r=this.cy().styleEnabled();r&&this.recalculateRenderedStyle(n.useCache),this.updateCompoundBounds();for(var o,a,s={},u=0;u1&&!a){var s=this.length-1,l=this[s],c=l._private.data.id;this[s]=void 0,this[o]=l,r.set(c,{ele:l,index:o})}return this.length--,this},unmerge:function(e){var t=this._private.cy;if(!e)return this;if(e&&r.string(e)){var n=e;e=t.mutableElements().filter(n)}for(var i=0;in&&(n=a,r=o)}return{value:n,ele:r}},min:function(e,t){for(var n=1/0,r=void 0,i=0;i=0&&i0&&t.push(u[0]),t.push(s[0])}return this.spawn(t,{unique:!0}).filter(e)}),"neighborhood"),closedNeighborhood:function(e){return this.neighborhood().add(this).filter(e)},openNeighborhood:function(e){return this.neighborhood(e)}}),o.neighbourhood=o.neighborhood,o.closedNeighbourhood=o.closedNeighborhood,o.openNeighbourhood=o.openNeighborhood,r.extend(o,{source:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.source||t.cy().collection()),n&&e?n.filter(e):n}),"source"),target:a((function(e){var t=this[0],n=void 0;return t&&(n=t._private.target||t.cy().collection()),n&&e?n.filter(e):n}),"target"),sources:u({attr:"source"}),targets:u({attr:"target"})}),r.extend(o,{edgesWith:a(d(),"edgesWith"),edgesTo:a(d({thisIsSrc:!0}),"edgesTo")}),r.extend(o,{connectedEdges:a((function(e){for(var t=[],n=0;n0);return i.map((function(e){var t=e.connectedEdges().stdFilter((function(t){return e.anySame(t.source())&&e.anySame(t.target())}));return e.union(t)}))}}),e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(7),a=n(14),s={add:function(e){var t=void 0,n=this;if(r.elementOrCollection(e)){var s=e;if(s._private.cy===n)t=s.restore();else{for(var l=[],c=0;c=0;t--)(0,e[t])();e.splice(0,e.length)},p=s.length-1;p>=0;p--){var h=s[p],g=h._private;g.stopped?(s.splice(p,1),g.hooked=!1,g.playing=!1,g.started=!1,f(g.frames)):(g.playing||g.applying)&&(g.playing&&g.applying&&(g.applying=!1),g.started||i(t,h,e,n),r(t,h,e,n),g.applying&&(g.applying=!1),f(g.frames),h.completed()&&(s.splice(p,1),g.hooked=!1,g.playing=!1,g.started=!1,f(g.completes)),c=!0)}return n||0!==s.length||0!==l.length||o.push(t),c}for(var s=!1,l=0;l0?(n.dirtyCompoundBoundsCache(),t.notify({type:"draw",eles:n})):t.notify({type:"draw"})),n.unmerge(o),t.emit("step")}},function(e,t,n){"use strict";var r=n(73),i=n(76),o=n(0);function a(e,t){return!!(null!=e&&null!=t&&(o.number(e)&&o.number(t)||e&&t))}e.exports=function(e,t,n,s){var l=!s,c=e._private,u=t._private,d=u.easing,f=u.startTime,p=(s?e:e.cy()).style();if(!u.easingImpl)if(null==d)u.easingImpl=r.linear;else{var h=void 0;h=o.string(d)?p.parse("transition-timing-function",d).value:d;var g=void 0,m=void 0;o.string(h)?(g=h,m=[]):(g=h[1],m=h.slice(2).map((function(e){return+e}))),m.length>0?("spring"===g&&m.push(u.duration),u.easingImpl=r[g].apply(null,m)):u.easingImpl=r[g]}var v=u.easingImpl,b=void 0;if(b=0===u.duration?1:(n-f)/u.duration,u.applying&&(b=u.progress),b<0?b=0:b>1&&(b=1),null==u.delay){var y=u.startPosition,x=u.position;if(x&&l&&!e.locked()){var w=e.position();a(y.x,x.x)&&(w.x=i(y.x,x.x,b,v)),a(y.y,x.y)&&(w.y=i(y.y,x.y,b,v)),e.emit("position")}var k=u.startPan,A=u.pan,E=c.pan,S=null!=A&&s;S&&(a(k.x,A.x)&&(E.x=i(k.x,A.x,b,v)),a(k.y,A.y)&&(E.y=i(k.y,A.y,b,v)),e.emit("pan"));var $=u.startZoom,C=u.zoom,_=null!=C&&s;_&&(a($,C)&&(c.zoom=i($,C,b,v)),e.emit("zoom")),(S||_)&&e.emit("viewport");var O=u.style;if(O&&O.length>0&&l){for(var j=0;j0?i=l:r=l}while(Math.abs(o)>a&&++c=o?b(t,s):0===u?s:x(t,r,r+c)}var k=!1;function A(){k=!0,e===t&&n===r||y()}var E=function(i){return k||A(),e===t&&n===r?i:0===i?0:1===i?1:m(w(i),t,r)};E.getControlPoints=function(){return[{x:e,y:t},{x:n,y:r}]};var S="generateBezier("+[e,t,n,r]+")";return E.toString=function(){return S},E}},function(e,t,n){"use strict"; -/*! Runge-Kutta spring physics function generator. Adapted from Framer.js, copyright Koen Bok. MIT License: http://en.wikipedia.org/wiki/MIT_License */var r=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,n,r){var i={x:t.x+r.dx*n,v:t.v+r.dv*n,tension:t.tension,friction:t.friction};return{dx:i.v,dv:e(i)}}function n(n,r){var i={dx:n.v,dv:e(n)},o=t(n,.5*r,i),a=t(n,.5*r,o),s=t(n,r,a),l=1/6*(i.dx+2*(o.dx+a.dx)+s.dx),c=1/6*(i.dv+2*(o.dv+a.dv)+s.dv);return n.x=n.x+l*r,n.v=n.v+c*r,n}return function e(t,r,i){var o,a={x:-1,v:0,tension:null,friction:null},s=[0],l=0,c=void 0,u=void 0;for(t=parseFloat(t)||500,r=parseFloat(r)||20,i=i||null,a.tension=t,a.friction=r,c=(o=null!==i)?(l=e(t,r))/i*.016:.016;u=n(u||a,c),s.push(1+u.x),l+=16,Math.abs(u.x)>1e-4&&Math.abs(u.v)>1e-4;);return o?function(e){return s[e*(s.length-1)|0]}:l}}();e.exports=r},function(e,t,n){"use strict";var r=n(0);function i(e,t,n,r,i){if(1===r)return n;var o=i(t,n,r);return null==e||((e.roundValue||e.color)&&(o=Math.round(o)),void 0!==e.min&&(o=Math.max(o,e.min)),void 0!==e.max&&(o=Math.min(o,e.max))),o}function o(e,t){return null!=e.pfValue||null!=e.value?null==e.pfValue||null!=t&&"%"===t.type.units?e.value:e.pfValue:e}e.exports=function(e,t,n,a,s){var l=null!=s?s.type:null;n<0?n=0:n>1&&(n=1);var c=o(e,s),u=o(t,s);if(r.number(c)&&r.number(u))return i(l,c,u,n,a);if(r.array(c)&&r.array(u)){for(var d=[],f=0;f0},startBatch:function(){var e=this._private;return null==e.batchCount&&(e.batchCount=0),0===e.batchCount&&(e.batchingStyle=e.batchingNotify=!0,e.batchStyleEles=this.collection(),e.batchNotifyEles=this.collection(),e.batchNotifyTypes=[],e.batchNotifyTypes.ids={}),e.batchCount++,this},endBatch:function(){var e=this._private;return e.batchCount--,0===e.batchCount&&(e.batchingStyle=!1,e.batchStyleEles.updateStyle(),e.batchingNotify=!1,this.notify({type:e.batchNotifyTypes,eles:e.batchNotifyEles})),this},batch:function(e){return this.startBatch(),e(),this.endBatch(),this},batchData:function(e){var t=this;return this.batch((function(){for(var n=Object.keys(e),r=0;r0;)e.removeChild(e.childNodes[0]);this._private.renderer=null},onRender:function(e){return this.on("render",e)},offRender:function(e){return this.off("render",e)}};i.invalidateDimensions=i.resize,e.exports=i},function(e,t,n){"use strict";var r=n(0),i=n(7),o={collection:function(e,t){return r.string(e)?this.$(e):r.elementOrCollection(e)?e.collection():r.array(e)?new i(this,e,t):new i(this)},nodes:function(e){var t=this.$((function(e){return e.isNode()}));return e?t.filter(e):t},edges:function(e){var t=this.$((function(e){return e.isEdge()}));return e?t.filter(e):t},$:function(e){var t=this._private.elements;return e?t.filter(e):t.spawnSelf()},mutableElements:function(){return this._private.elements}};o.elements=o.filter=o.$,e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(18),o={style:function(e){return e&&this.setStyle(e).update(),this._private.style},setStyle:function(e){var t=this._private;return r.stylesheet(e)?t.style=e.generateStyle(this):r.array(e)?t.style=i.fromJson(this,e):r.string(e)?t.style=i.fromString(this,e):t.style=i(this),t.style}};e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(5),a={apply:function(e){var t=this._private,n=t.cy.collection();t.newStyle&&(t.contextStyles={},t.propDiffs={},this.cleanElements(e,!0));for(var r=0;r0;if(c||u){var d=void 0;c&&u||c?d=l.properties:u&&(d=l.mappedProperties);for(var f=0;f0){n=!0;break}t.hasPie=n;var i=e.pstyle("text-transform").strValue,o=e.pstyle("label").strValue,a=e.pstyle("source-label").strValue,s=e.pstyle("target-label").strValue,l=e.pstyle("font-style").strValue,c=e.pstyle("font-size").pfValue+"px",u=e.pstyle("font-family").strValue,d=e.pstyle("font-weight").strValue,f=l+"$"+c+"$"+u+"$"+d+"$"+i+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-outline-width").pfValue+"$"+e.pstyle("text-wrap").strValue+"$"+e.pstyle("text-max-width").pfValue;t.labelStyleKey=f,t.sourceLabelKey=f+"$"+a,t.targetLabelKey=f+"$"+s,t.labelKey=f+"$"+o,t.fontKey=l+"$"+d+"$"+c+"$"+u,t.styleKey=Date.now()}},applyParsedProperty:function(e,t){var n=this,o=t,a=e._private.style,s=void 0,l=n.types,c=n.properties[o.name].type,u=o.bypass,d=a[o.name],f=d&&d.bypass,p=e._private,h=function(){n.checkZOrderTrigger(e,o.name,d?d.value:null,o.value)};if("curve-style"===t.name&&"haystack"===t.value&&e.isEdge()&&(e.isLoop()||e.source().isParent()||e.target().isParent())&&(o=t=this.parse(t.name,"bezier",u)),o.delete)return a[o.name]=void 0,h(),!0;if(o.deleteBypassed)return d?!!d.bypass&&(d.bypassed=void 0,h(),!0):(h(),!0);if(o.deleteBypass)return d?!!d.bypass&&(a[o.name]=d.bypassed,h(),!0):(h(),!0);var g=function(){r.error("Do not assign mappings to elements without corresponding data (e.g. ele `"+e.id()+"` for property `"+o.name+"` with data field `"+o.field+"`); try a `["+o.field+"]` selector to limit scope to elements with `"+o.field+"` defined")};switch(o.mapped){case l.mapData:for(var m=o.field.split("."),v=p.data,b=0;b1&&(y=1),c.color){var x=o.valueMin[0],w=o.valueMax[0],k=o.valueMin[1],A=o.valueMax[1],E=o.valueMin[2],S=o.valueMax[2],$=null==o.valueMin[3]?1:o.valueMin[3],C=null==o.valueMax[3]?1:o.valueMax[3],_=[Math.round(x+(w-x)*y),Math.round(k+(A-k)*y),Math.round(E+(S-E)*y),Math.round($+(C-$)*y)];s={bypass:o.bypass,name:o.name,value:_,strValue:"rgb("+_[0]+", "+_[1]+", "+_[2]+")"}}else{if(!c.number)return!1;var O=o.valueMin+(o.valueMax-o.valueMin)*y;s=this.parse(o.name,O,o.bypass,"mapping")}s||(s=this.parse(o.name,d.strValue,o.bypass,"mapping")),s||g(),s.mapping=o,o=s;break;case l.data:var j=o.field.split("."),T=p.data;if(T)for(var P=0;P0&&l>0){for(var u={},d=!1,f=0;f0?e.delayAnimation(c).play().promise().then(t):t()})).then((function(){return e.animation({style:u,duration:l,easing:e.pstyle("transition-timing-function").value,queue:!1}).play().promise()})).then((function(){r.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1}))}else a.transitioning&&(this.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1)},checkZOrderTrigger:function(e,t,n,r){var i=this.properties[t];null==i.triggersZOrder||null!=n&&!i.triggersZOrder(n,r)||this._private.cy.notify({type:"zorder",eles:e})}};e.exports=a},function(e,t,n){"use strict";var r=n(0),i=n(1),o={applyBypass:function(e,t,n,o){var a=[];if("*"===t||"**"===t){if(void 0!==n)for(var s=0;sn.length?t.substr(n.length):""}function l(){o=o.length>a.length?o.substr(a.length):""}for(t=t.replace(/[/][*](\s|.)+?[*][/]/g,"");!t.match(/^\s*$/);){var c=t.match(/^\s*((?:.|\s)+?)\s*\{((?:.|\s)+?)\}/);if(!c){r.error("Halting stylesheet parsing: String stylesheet contains more to parse but no selector and block found in: "+t);break}n=c[0];var u=c[1];if("core"!==u&&new i(u)._private.invalid)r.error("Skipping parsing of block: Invalid selector found in string stylesheet: "+u),s();else{var d=c[2],f=!1;o=d;for(var p=[];!o.match(/^\s*$/);){var h=o.match(/^\s*(.+?)\s*:\s*(.+?)\s*;/);if(!h){r.error("Skipping parsing of block: Invalid formatting of style property and value definitions found in:"+d),f=!0;break}a=h[0];var g=h[1],m=h[2];this.properties[g]?this.parse(g,m)?(p.push({name:g,val:m}),l()):(r.error("Skipping property: Invalid property definition in: "+a),l()):(r.error("Skipping property: Invalid property name in: "+a),l())}if(f){s();break}this.selector(u);for(var v=0;v node").css({shape:"rectangle",padding:10,"background-color":"#eee","border-color":"#ccc","border-width":1}).selector("edge").css({width:3,"curve-style":"haystack"}).selector(":parent <-> node").css({"curve-style":"bezier","source-endpoint":"outside-to-line","target-endpoint":"outside-to-line"}).selector(":selected").css({"background-color":"#0169D9","line-color":"#0169D9","source-arrow-color":"#0169D9","target-arrow-color":"#0169D9","mid-source-arrow-color":"#0169D9","mid-target-arrow-color":"#0169D9"}).selector("node:parent:selected").css({"background-color":"#CCE1F9","border-color":"#aec8e5"}).selector(":active").css({"overlay-color":"black","overlay-padding":10,"overlay-opacity":.25}).selector("core").css({"selection-box-color":"#ddd","selection-box-opacity":.65,"selection-box-border-color":"#aaa","selection-box-border-width":1,"active-bg-color":"black","active-bg-opacity":.15,"active-bg-size":30,"outside-texture-bg-color":"#000","outside-texture-bg-opacity":.125}),this.defaultLength=this.length},e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(2),a={parse:function(e,t,n,o){if(i.fn(t))return this.parseImplWarn(e,t,n,o);var a=[e,t,n,"mapping"===o||!0===o||!1===o||null==o?"dontcare":o].join("$"),s=this.propCache=this.propCache||{},l=void 0;return(l=s[a])||(l=s[a]=this.parseImplWarn(e,t,n,o)),(n||"mapping"===o)&&(l=r.copy(l))&&(l.value=r.copy(l.value)),l},parseImplWarn:function(e,t,n,i){var o=this.parseImpl(e,t,n,i);return o||null==t||r.error("The style property `%s: %s` is invalid",e,t),o},parseImpl:function(e,t,n,a){e=r.camel2dash(e);var s=this.properties[e],l=t,c=this.types;if(!s)return null;if(void 0===t)return null;s.alias&&(s=s.pointsTo,e=s.name);var u=i.string(t);u&&(t=t.trim());var d=s.type;if(!d)return null;if(n&&(""===t||null===t))return{name:e,value:t,bypass:!0,deleteBypass:!0};if(i.fn(t))return{name:e,value:t,strValue:"fn",mapped:c.fn,bypass:n};var f=void 0,p=void 0;if(!u||a);else{if(f=new RegExp(c.data.regex).exec(t)){if(n)return!1;var h=c.data;return{name:e,value:f,strValue:""+t,mapped:h,field:f[1],bypass:n}}if(p=new RegExp(c.mapData.regex).exec(t)){if(n)return!1;if(d.multiple)return!1;var g=c.mapData;if(!d.color&&!d.number)return!1;var m=this.parse(e,p[4]);if(!m||m.mapped)return!1;var v=this.parse(e,p[5]);if(!v||v.mapped)return!1;if(m.value===v.value)return!1;if(d.color){var b=m.value,y=v.value;if(!(b[0]!==y[0]||b[1]!==y[1]||b[2]!==y[2]||b[3]!==y[3]&&(null!=b[3]&&1!==b[3]||null!=y[3]&&1!==y[3])))return!1}return{name:e,value:p,strValue:""+t,mapped:g,field:p[1],fieldMin:parseFloat(p[2]),fieldMax:parseFloat(p[3]),valueMin:m.value,valueMax:v.value,bypass:n}}}if(d.multiple&&"multiple"!==a){var x=void 0;if(x=u?t.split(/\s+/):i.array(t)?t:[t],d.evenMultiple&&x.length%2!=0)return null;for(var w=[],k=[],A=[],E=!1,S=0;Sd.max||d.strictMax&&t===d.max))return null;var P={name:e,value:t,strValue:""+t+(_||""),units:_,bypass:n};return d.unitless||"px"!==_&&"em"!==_?P.pfValue=t:P.pfValue="px"!==_&&_?this.getEmSizeInPixels()*t:t,"ms"!==_&&"s"!==_||(P.pfValue="ms"===_?t:1e3*t),"deg"!==_&&"rad"!==_||(P.pfValue="rad"===_?t:o.deg2rad(t)),"%"===_&&(P.pfValue=t/100),P}if(d.propList){var D=[],R=""+t;if("none"===R);else{for(var I=R.split(","),N=0;N0&&s>0&&!isNaN(n.w)&&!isNaN(n.h)&&n.w>0&&n.h>0)return{zoom:l=(l=(l=Math.min((a-2*t)/n.w,(s-2*t)/n.h))>this._private.maxZoom?this._private.maxZoom:l)t.maxZoom?t.maxZoom:s)t.maxZoom||!t.zoomingEnabled?a=!0:(t.zoom=l,o.push("zoom"))}if(i&&(!a||!e.cancelOnFailedZoom)&&t.panningEnabled){var c=e.pan;r.number(c.x)&&(t.pan.x=c.x,s=!1),r.number(c.y)&&(t.pan.y=c.y,s=!1),s||o.push("pan")}return o.length>0&&(o.push("viewport"),this.emit(o.join(" ")),this.notify({type:"viewport"})),this},center:function(e){var t=this.getCenterPan(e);return t&&(this._private.pan=t,this.emit("pan viewport"),this.notify({type:"viewport"})),this},getCenterPan:function(e,t){if(this._private.panningEnabled){if(r.string(e)){var n=e;e=this.mutableElements().filter(n)}else r.elementOrCollection(e)||(e=this.mutableElements());if(0!==e.length){var i=e.boundingBox(),o=this.width(),a=this.height();return{x:(o-(t=void 0===t?this._private.zoom:t)*(i.x1+i.x2))/2,y:(a-t*(i.y1+i.y2))/2}}}},reset:function(){return this._private.panningEnabled&&this._private.zoomingEnabled?(this.viewport({pan:{x:0,y:0},zoom:1}),this):this},invalidateSize:function(){this._private.sizeCache=null},size:function(){var e,t,n=this._private,r=n.container;return n.sizeCache=n.sizeCache||(r?(e=i.getComputedStyle(r),t=function(t){return parseFloat(e.getPropertyValue(t))},{width:r.clientWidth-t("padding-left")-t("padding-right"),height:r.clientHeight-t("padding-top")-t("padding-bottom")}):{width:1,height:1})},width:function(){return this.size().width},height:function(){return this.size().height},extent:function(){var e=this._private.pan,t=this._private.zoom,n=this.renderedExtent(),r={x1:(n.x1-e.x)/t,x2:(n.x2-e.x)/t,y1:(n.y1-e.y)/t,y2:(n.y2-e.y)/t};return r.w=r.x2-r.x1,r.h=r.y2-r.y1,r},renderedExtent:function(){var e=this.width(),t=this.height();return{x1:0,y1:0,x2:e,y2:t,w:e,h:t}}};a.centre=a.center,a.autolockNodes=a.autolock,a.autoungrabifyNodes=a.autoungrabify,e.exports=a},function(e,t,n){"use strict";var r=n(1),i=n(4),o=n(7),a=n(12),s=n(95),l=n(0),c=n(11),u={},d={};function f(e,t,n){var s=n,d=function(n){r.error("Can not register `"+t+"` for `"+e+"` since `"+n+"` already exists in the prototype and can not be overridden")};if("core"===e){if(a.prototype[t])return d(t);a.prototype[t]=n}else if("collection"===e){if(o.prototype[t])return d(t);o.prototype[t]=n}else if("layout"===e){for(var f=function(e){this.options=e,n.call(this,e),l.plainObject(this._private)||(this._private={}),this._private.cy=e.cy,this._private.listeners=[],this.createEmitter()},h=f.prototype=Object.create(n.prototype),g=[],m=0;m0;)m();c=n.collection();for(var v=function(e){var t=h[e],n=t.maxDegree(!1),r=t.filter((function(e){return e.degree(!1)===n}));c=c.add(r)},b=0;by.length-1;)y.push([]);y[J].push(X),Z.depth=J,Z.index=y[J].length-1}N()}var K=0;if(t.avoidOverlap)for(var ee=0;eec||0===t)&&(r+=l/u,i++)}return r/=i=Math.max(1,i),0===i&&(r=void 0),ie[e.id()]=r,r},ae=function(e,t){return oe(e)-oe(t)},se=0;se<3;se++){for(var le=0;le0&&y[0].length<=3?u/2:0),f=2*Math.PI/y[i].length*o;return 0===i&&1===y[0].length&&(d=1),{x:de+d*Math.cos(f),y:fe+d*Math.sin(f)}}return{x:de+(o+1-(a+1)/2)*s,y:(i+1)*c}}var p={x:de+(o+1-(a+1)/2)*s,y:(i+1)*c};return p},he={},ge=y.length-1;ge>=0;ge--)for(var me=y[ge],ve=0;ve1&&t.avoidOverlap){p*=1.75;var b=Math.cos(d)-Math.cos(0),y=Math.sin(d)-Math.sin(0),x=Math.sqrt(p*p/(b*b+y*y));f=Math.max(x,f)}return s.layoutPositions(this,t,(function(e,n){var r=t.startAngle+n*d*(a?1:-1),i=f*Math.cos(r),o=f*Math.sin(r);return{x:c+i,y:u+o}})),this},e.exports=s},function(e,t,n){"use strict";var r=n(1),i=n(2),o={fit:!0,padding:30,startAngle:1.5*Math.PI,sweep:void 0,clockwise:!0,equidistant:!1,minNodeSpacing:10,boundingBox:void 0,avoidOverlap:!0,nodeDimensionsIncludeLabels:!1,height:void 0,width:void 0,spacingFactor:void 0,concentric:function(e){return e.degree()},levelWidth:function(e){return e.maxDegree()/4},animate:!1,animationDuration:500,animationEasing:void 0,animateFilter:function(e,t){return!0},ready:void 0,stop:void 0,transform:function(e,t){return t}};function a(e){this.options=r.extend({},o,e)}a.prototype.run=function(){for(var e=this.options,t=e,n=void 0!==t.counterclockwise?!t.counterclockwise:t.clockwise,r=e.cy,o=t.eles.nodes().not(":parent"),a=i.makeBoundingBox(t.boundingBox?t.boundingBox:{x1:0,y1:0,w:r.width(),h:r.height()}),s=a.x1+a.w/2,l=a.y1+a.h/2,c=[],u=(t.startAngle,0),d=0;d0&&Math.abs(b[0].value-x.value)>=m&&(b=[],v.push(b)),b.push(x)}var w=u+t.minNodeSpacing;if(!t.avoidOverlap){var k=v.length>0&&v[0].length>1,A=(Math.min(a.w,a.h)/2-w)/(v.length+k?1:0);w=Math.min(w,A)}for(var E=0,S=0;S1&&t.avoidOverlap){var O=Math.cos(_)-Math.cos(0),j=Math.sin(_)-Math.sin(0),T=Math.sqrt(w*w/(O*O+j*j));E=Math.max(T,E)}$.r=E,E+=w}if(t.equidistant){for(var P=0,D=0,R=0;R0)var c=(f=r.nodeOverlap*s)*i/(b=Math.sqrt(i*i+o*o)),d=f*o/b;else{var f,p=u(e,i,o),h=u(t,-1*i,-1*o),g=h.x-p.x,m=h.y-p.y,v=g*g+m*m,b=Math.sqrt(v);c=(f=(e.nodeRepulsion+t.nodeRepulsion)/v)*g/b,d=f*m/b}e.isLocked||(e.offsetX-=c,e.offsetY-=d),t.isLocked||(t.offsetX+=c,t.offsetY+=d)}},l=function(e,t,n,r){if(n>0)var i=e.maxX-t.minX;else i=t.maxX-e.minX;if(r>0)var o=e.maxY-t.minY;else o=t.maxY-e.minY;return i>=0&&o>=0?Math.sqrt(i*i+o*o):0},u=function(e,t,n){var r=e.positionX,i=e.positionY,o=e.height||1,a=e.width||1,s=n/t,l=o/a,c={};return 0===t&&0n?(c.x=r,c.y=i+o/2,c):0t&&-1*l<=s&&s<=l?(c.x=r-a/2,c.y=i-a*n/2/t,c):0=l)?(c.x=r+o*t/2/n,c.y=i+o/2,c):0>n&&(s<=-1*l||s>=l)?(c.x=r-o*t/2/n,c.y=i-o/2,c):c},d=function(e,t){for(var n=0;n1){var h=t.gravity*d/p,g=t.gravity*f/p;u.offsetX+=h,u.offsetY+=g}}}}},p=function(e,t){var n=[],r=0,i=-1;for(n.push.apply(n,e.graphSet[0]),i+=e.graphSet[0].length;r<=i;){var o=n[r++],a=e.idToIndex[o],s=e.layoutNodes[a],l=s.children;if(0n)var i={x:n*e/r,y:n*t/r};else i={x:e,y:t};return i},m=function e(t,n){var r=t.parentId;if(null!=r){var i=n.layoutNodes[n.idToIndex[r]],o=!1;return(null==i.maxX||t.maxX+i.padRight>i.maxX)&&(i.maxX=t.maxX+i.padRight,o=!0),(null==i.minX||t.minX-i.padLefti.maxY)&&(i.maxY=t.maxY+i.padBottom,o=!0),(null==i.minY||t.minY-i.padTopg&&(f+=h+t.componentSpacing,d=0,p=0,h=0)}}}(0,i),r})).then((function(e){d.layoutNodes=e.layoutNodes,o.stop(),b()}));var b=function(){!0===e.animate||!1===e.animate?v({force:!0,next:function(){n.one("layoutstop",e.stop),n.emit({type:"layoutstop",layout:n})}}):e.eles.nodes().layoutPositions(n,e,(function(e){var t=d.layoutNodes[d.idToIndex[e.data("id")]];return{x:t.positionX,y:t.positionY}}))};return this},c.prototype.stop=function(){return this.stopped=!0,this.thread&&this.thread.stop(),this.emit("layoutstop"),this},c.prototype.destroy=function(){return this.thread&&this.thread.stop(),this};var u=function(e,t,n){for(var r=n.eles.edges(),i=n.eles.nodes(),s={isCompound:e.hasCompoundNodes(),layoutNodes:[],idToIndex:{},nodeSize:i.size(),graphSet:[],indexToGraph:[],layoutEdges:[],edgeSize:r.size(),temperature:n.initialTemp,clientWidth:e.width(),clientHeight:e.width(),boundingBox:o.makeBoundingBox(n.boundingBox?n.boundingBox:{x1:0,y1:0,w:e.width(),h:e.height()})},l=n.eles.components(),c={},u=0;u0)for(s.graphSet.push(A),u=0;ur.count?0:r.graph},f=function e(t,n,r,i){var o=i.graphSet[r];if(-1a){var h=u(),g=d();(h-1)*g>=a?u(h-1):(g-1)*h>=a&&d(g-1)}else for(;c*l=a?d(v+1):u(m+1)}var b=o.w/c,y=o.h/l;if(t.condense&&(b=0,y=0),t.avoidOverlap)for(var x=0;x=c&&(T=0,j++)},D={},R=0;R(r=i.sqdistToFiniteLine(e,t,w[k],w[k+1],w[k+2],w[k+3])))return b(n,r),!0}else if("bezier"===a.edgeType||"multibezier"===a.edgeType||"self"===a.edgeType||"compound"===a.edgeType)for(w=a.allpts,k=0;k+5(r=i.sqdistToQuadraticBezier(e,t,w[k],w[k+1],w[k+2],w[k+3],w[k+4],w[k+5])))return b(n,r),!0;v=v||o.source,x=x||o.target;var A=l.getArrowWidth(s,u),E=[{name:"source",x:a.arrowStartX,y:a.arrowStartY,angle:a.srcArrowAngle},{name:"target",x:a.arrowEndX,y:a.arrowEndY,angle:a.tgtArrowAngle},{name:"mid-source",x:a.midX,y:a.midY,angle:a.midsrcArrowAngle},{name:"mid-target",x:a.midX,y:a.midY,angle:a.midtgtArrowAngle}];for(k=0;k0&&(y(v),y(x))}function w(e,t,n){return o.getPrefixedProperty(e,t,n)}function k(n,r){var o,a=n._private,s=m;o=r?r+"-":"";var l=n.pstyle(o+"label").value;if("yes"===n.pstyle("text-events").strValue&&l){var c=a.rstyle,u=n.pstyle("text-border-width").pfValue,d=n.pstyle("text-background-padding").pfValue,f=w(c,"labelWidth",r)+u+2*s+2*d,p=w(c,"labelHeight",r)+u+2*s+2*d,h=w(c,"labelX",r),g=w(c,"labelY",r),v=w(a.rscratch,"labelAngle",r),y=h-f/2,x=h+f/2,k=g-p/2,A=g+p/2;if(v){var E=Math.cos(v),S=Math.sin(v),$=function(e,t){return{x:(e-=h)*E-(t-=g)*S+h,y:e*S+t*E+g}},C=$(y,k),_=$(y,A),O=$(x,k),j=$(x,A),T=[C.x,C.y,O.x,O.y,j.x,j.y,_.x,_.y];if(i.pointInsidePolygonPoints(e,t,T))return b(n),!0}else{var P={w:f,h:p,x1:y,x2:x,y1:k,y2:A};if(i.inBoundingBox(P,e,t))return b(n),!0}}}n&&(u=u.interactive);for(var A=u.length-1;A>=0;A--){var E=u[A];E.isNode()?y(E)||k(E):x(E)||k(E)||k(E,"source")||k(E,"target")}return d},getAllInBox:function(e,t,n,r){var o=this.getCachedZSortedEles().interactive,a=[],s=Math.min(e,n),l=Math.max(e,n),c=Math.min(t,r),u=Math.max(t,r);e=s,n=l,t=c,r=u;for(var d=i.makeBoundingBox({x1:e,y1:t,x2:n,y2:r}),f=0;fb?b+"$-$"+v:v+"$-$"+b,g&&(t="unbundled$-$"+h.id);var y=u[t];null==y&&(y=u[t]=[],d.push(t)),y.push(Bt),g&&(y.hasUnbundled=!0),m&&(y.hasBezier=!0)}else f.push(Bt)}for(var x=0;xGt.id()){var k=Ht;Ht=Gt,Gt=k}Wt=Ht.position(),Yt=Gt.position(),Xt=Ht.outerWidth(),Qt=Ht.outerHeight(),Zt=Gt.outerWidth(),Jt=Gt.outerHeight(),n=l.nodeShapes[this.getNodeShape(Ht)],o=l.nodeShapes[this.getNodeShape(Gt)],s=!1;var A={north:0,west:0,south:0,east:0,northwest:0,southwest:0,northeast:0,southeast:0},E=Wt.x,S=Wt.y,$=Xt,C=Qt,_=Yt.x,O=Yt.y,j=Zt,T=Jt,P=w.length;for(p=0;p=d||w){p={cp:b,segment:x};break}}if(p)break}b=p.cp;var k=(d-g)/(x=p.segment).length,A=x.t1-x.t0,E=u?x.t0+A*k:x.t1-A*k;E=r.bound(0,E,1),t=r.qbezierPtAt(b.p0,b.p1,b.p2,E),c=function(e,t,n,i){var o=r.bound(0,i-.001,1),a=r.bound(0,i+.001,1),s=r.qbezierPtAt(e,t,n,o),l=r.qbezierPtAt(e,t,n,a);return f(s,l)}(b.p0,b.p1,b.p2,E);break;case"straight":case"segments":case"haystack":var S,$,C,_,O=0,j=i.allpts.length;for(v=0;v+3=d));v+=2);E=(d-$)/S,E=r.bound(0,E,1),t=r.lineAt(C,_,E),c=f(C,_)}l("labelX",o,t.x),l("labelY",o,t.y),l("labelAutoAngle",o,c)}};c("source"),c("target"),this.applyLabelDimensions(e)}},applyLabelDimensions:function(e){this.applyPrefixedLabelDimensions(e),e.isEdge()&&(this.applyPrefixedLabelDimensions(e,"source"),this.applyPrefixedLabelDimensions(e,"target"))},applyPrefixedLabelDimensions:function(e,t){var n=e._private,r=this.getLabelText(e,t),i=this.calculateLabelDimensions(e,r);o.setPrefixedProperty(n.rstyle,"labelWidth",t,i.width),o.setPrefixedProperty(n.rscratch,"labelWidth",t,i.width),o.setPrefixedProperty(n.rstyle,"labelHeight",t,i.height),o.setPrefixedProperty(n.rscratch,"labelHeight",t,i.height)},getLabelText:function(e,t){var n=e._private,r=t?t+"-":"",i=e.pstyle(r+"label").strValue,a=e.pstyle("text-transform").value,s=function(e,r){return r?(o.setPrefixedProperty(n.rscratch,e,t,r),r):o.getPrefixedProperty(n.rscratch,e,t)};"none"==a||("uppercase"==a?i=i.toUpperCase():"lowercase"==a&&(i=i.toLowerCase()));var l=e.pstyle("text-wrap").value;if("wrap"===l){var c=s("labelKey");if(c&&s("labelWrapKey")===c)return s("labelWrapCachedText");for(var u=i.split("\n"),d=e.pstyle("text-max-width").pfValue,f=[],p=0;pd){for(var g=h.split(/\s+/),m="",v=0;vd);k++)x+=i[k],k===i.length-1&&(w=!0);return w||(x+="…"),x}return i},calculateLabelDimensions:function(e,t,n){var r=e._private.labelStyleKey+"$@$"+t;n&&(r+="$@$"+n);var i=this.labelDimCache||(this.labelDimCache={});if(i[r])return i[r];var o=e.pstyle("font-style").strValue,a=1*e.pstyle("font-size").pfValue+"px",s=e.pstyle("font-family").strValue,l=e.pstyle("font-weight").strValue,c=this.labelCalcDiv;c||(c=this.labelCalcDiv=document.createElement("div"),document.body.appendChild(c));var u=c.style;return u.fontFamily=s,u.fontStyle=o,u.fontSize=a,u.fontWeight=l,u.position="absolute",u.left="-9999px",u.top="-9999px",u.zIndex="-1",u.visibility="hidden",u.pointerEvents="none",u.padding="0",u.lineHeight="1","wrap"===e.pstyle("text-wrap").value?u.whiteSpace="pre":u.whiteSpace="normal",c.textContent=t,i[r]={width:Math.ceil(c.clientWidth/1),height:Math.ceil(c.clientHeight/1)},i[r]},calculateLabelAngles:function(e){var t=e._private.rscratch,n=e.isEdge(),r=e.pstyle("text-rotation"),i=r.strValue;"none"===i?t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle=0:n&&"autorotate"===i?(t.labelAngle=Math.atan(t.midDispY/t.midDispX),t.sourceLabelAngle=t.sourceLabelAutoAngle,t.targetLabelAngle=t.targetLabelAutoAngle):t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle="autorotate"===i?0:r.pfValue}};e.exports=a},function(e,t,n){"use strict";var r={getNodeShape:function(e){var t=e.pstyle("shape").value;if(e.isParent())return"rectangle"===t||"roundrectangle"===t||"cutrectangle"===t||"barrel"===t?t:"rectangle";if("polygon"===t){var n=e.pstyle("shape-polygon-points").value;return this.nodeShapes.makePolygon(n).name}return t}};e.exports=r},function(e,t,n){"use strict";var r={registerCalculationListeners:function(){var e=this.cy,t=e.collection(),n=this,r=function(e,n){var r=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t.merge(e);for(var i=0;i=e.desktopTapThreshold2}var C=n(i);b&&(e.hoverData.tapholdCancelled=!0),s=!0,t(v,["mousemove","vmousemove","tapdrag"],i,{position:{x:p[0],y:p[1]}});var _=function(){e.data.bgActivePosistion=void 0,e.hoverData.selecting||l.emit("boxstart"),m[4]=1,e.hoverData.selecting=!0,e.redrawHint("select",!0),e.redraw()};if(3===e.hoverData.which){if(b){var O={originalEvent:i,type:"cxtdrag",position:{x:p[0],y:p[1]}};x?x.emit(O):l.emit(O),e.hoverData.cxtDragged=!0,e.hoverData.cxtOver&&v===e.hoverData.cxtOver||(e.hoverData.cxtOver&&e.hoverData.cxtOver.emit({originalEvent:i,type:"cxtdragout",position:{x:p[0],y:p[1]}}),e.hoverData.cxtOver=v,v&&v.emit({originalEvent:i,type:"cxtdragover",position:{x:p[0],y:p[1]}}))}}else if(e.hoverData.dragging){if(s=!0,l.panningEnabled()&&l.userPanningEnabled()){var T;if(e.hoverData.justStartedPan){var P=e.hoverData.mdownPos;T={x:(p[0]-P[0])*c,y:(p[1]-P[1])*c},e.hoverData.justStartedPan=!1}else T={x:w[0]*c,y:w[1]*c};l.panBy(T),e.hoverData.dragged=!0}p=e.projectIntoViewport(i.clientX,i.clientY)}else if(1!=m[4]||null!=x&&!x.isEdge()){if(x&&x.isEdge()&&x.active()&&x.unactivate(),x&&x.grabbed()||v==y||(y&&t(y,["mouseout","tapdragout"],i,{position:{x:p[0],y:p[1]}}),v&&t(v,["mouseover","tapdragover"],i,{position:{x:p[0],y:p[1]}}),e.hoverData.last=v),x)if(b){if(l.boxSelectionEnabled()&&C)x&&x.grabbed()&&(f(k),x.emit("free")),_();else if(x&&x.grabbed()&&e.nodeIsDraggable(x)){var D=!e.dragData.didDrag;D&&e.redrawHint("eles",!0),e.dragData.didDrag=!0;var R=[];e.hoverData.draggingEles||u(l.collection(k),{inDragLayer:!0});for(var I=0;I0&&e.redrawHint("eles",!0),e.dragData.possibleDragElements=l=[]),t(s,["mouseup","tapend","vmouseup"],r,{position:{x:o[0],y:o[1]}}),e.dragData.didDrag||e.hoverData.dragged||e.hoverData.selecting||e.hoverData.isOverThresholdDrag||t(c,["click","tap","vclick"],r,{position:{x:o[0],y:o[1]}}),s!=c||e.dragData.didDrag||e.hoverData.selecting||null!=s&&s._private.selectable&&(e.hoverData.dragging||("additive"===i.selectionType()||u?s.selected()?s.unselect():s.select():u||(i.$(":selected").unmerge(s).unselect(),s.select())),e.redrawHint("eles",!0)),e.hoverData.selecting){var h=i.collection(e.getAllInBox(a[0],a[1],a[2],a[3]));e.redrawHint("select",!0),h.length>0&&e.redrawHint("eles",!0),i.emit("boxend");var g=function(e){return e.selectable()&&!e.selected()};"additive"===i.selectionType()||u||i.$(":selected").unmerge(h).unselect(),h.emit("box").stdFilter(g).select().emit("boxselect"),e.redraw()}if(e.hoverData.dragging&&(e.hoverData.dragging=!1,e.redrawHint("select",!0),e.redrawHint("eles",!0),e.redraw()),!a[4]){e.redrawHint("drag",!0),e.redrawHint("eles",!0);var m=c&&c.grabbed();f(l),m&&c.emit("free")}}a[4]=0,e.hoverData.down=null,e.hoverData.cxtStarted=!1,e.hoverData.draggingEles=!1,e.hoverData.selecting=!1,e.hoverData.isOverThresholdDrag=!1,e.dragData.didDrag=!1,e.hoverData.dragged=!1,e.hoverData.dragDelta=[],e.hoverData.mdownPos=null,e.hoverData.mdownGPos=null}}),!1),e.registerBinding(e.container,"wheel",(function(t){if(!e.scrollingPage){var n,r=e.cy,i=e.projectIntoViewport(t.clientX,t.clientY),o=[i[0]*r.zoom()+r.pan().x,i[1]*r.zoom()+r.pan().y];e.hoverData.draggingEles||e.hoverData.dragging||e.hoverData.cxtStarted||0!==e.selection[4]?t.preventDefault():r.panningEnabled()&&r.userPanningEnabled()&&r.zoomingEnabled()&&r.userZoomingEnabled()&&(t.preventDefault(),e.data.wheelZooming=!0,clearTimeout(e.data.wheelTimeout),e.data.wheelTimeout=setTimeout((function(){e.data.wheelZooming=!1,e.redrawHint("eles",!0),e.redraw()}),150),n=null!=t.deltaY?t.deltaY/-250:null!=t.wheelDeltaY?t.wheelDeltaY/1e3:t.wheelDelta/1e3,n*=e.wheelSensitivity,1===t.deltaMode&&(n*=33),r.zoom({level:r.zoom()*Math.pow(10,n),renderedPosition:{x:o[0],y:o[1]}}))}}),!0),e.registerBinding(window,"scroll",(function(t){e.scrollingPage=!0,clearTimeout(e.scrollingPageTimeout),e.scrollingPageTimeout=setTimeout((function(){e.scrollingPage=!1}),250)}),!0),e.registerBinding(e.container,"mouseout",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseout",position:{x:n[0],y:n[1]}})}),!1),e.registerBinding(e.container,"mouseover",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseover",position:{x:n[0],y:n[1]}})}),!1);var T,P,D,R,I=function(e,t,n,r){return Math.sqrt((n-e)*(n-e)+(r-t)*(r-t))},N=function(e,t,n,r){return(n-e)*(n-e)+(r-t)*(r-t)};if(e.registerBinding(e.container,"touchstart",T=function(n){if(j(n)){e.touchData.capture=!0,e.data.bgActivePosistion=void 0;var r=e.cy,i=e.touchData.now,o=e.touchData.earlier;if(n.touches[0]){var a=e.projectIntoViewport(n.touches[0].clientX,n.touches[0].clientY);i[0]=a[0],i[1]=a[1]}if(n.touches[1]&&(a=e.projectIntoViewport(n.touches[1].clientX,n.touches[1].clientY),i[2]=a[0],i[3]=a[1]),n.touches[2]&&(a=e.projectIntoViewport(n.touches[2].clientX,n.touches[2].clientY),i[4]=a[0],i[5]=a[1]),n.touches[1]){f(e.dragData.touchDragEles);var s=e.findContainerClientCoords();S=s[0],$=s[1],C=s[2],_=s[3],v=n.touches[0].clientX-S,b=n.touches[0].clientY-$,y=n.touches[1].clientX-S,x=n.touches[1].clientY-$,O=0<=v&&v<=C&&0<=y&&y<=C&&0<=b&&b<=_&&0<=x&&x<=_;var c=r.pan(),p=r.zoom();if(w=I(v,b,y,x),k=N(v,b,y,x),E=[((A=[(v+y)/2,(b+x)/2])[0]-c.x)/p,(A[1]-c.y)/p],k<4e4&&!n.touches[2]){var h=e.findNearestElement(i[0],i[1],!0,!0),g=e.findNearestElement(i[2],i[3],!0,!0);return h&&h.isNode()?(h.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=h):g&&g.isNode()?(g.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=g):r.emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxt=!0,e.touchData.cxtDragged=!1,e.data.bgActivePosistion=void 0,void e.redraw()}}if(n.touches[2]);else if(n.touches[1]);else if(n.touches[0]){var m=e.findNearestElements(i[0],i[1],!0,!0),T=m[0];if(null!=T&&(T.activate(),e.touchData.start=T,e.touchData.starts=m,e.nodeIsGrabbable(T))){var P=e.dragData.touchDragEles=[],D=null;e.redrawHint("eles",!0),e.redrawHint("drag",!0),T.selected()?(D=r.$((function(t){return t.selected()&&e.nodeIsGrabbable(t)})),u(D,{addToList:P})):d(T,{addToList:P}),l(T);var R=function(e){return{originalEvent:n,type:e,position:{x:i[0],y:i[1]}}};T.emit(R("grabon")),D?D.forEach((function(e){e.emit(R("grab"))})):T.emit(R("grab"))}t(T,["touchstart","tapstart","vmousedown"],n,{position:{x:i[0],y:i[1]}}),null==T&&(e.data.bgActivePosistion={x:a[0],y:a[1]},e.redrawHint("select",!0),e.redraw()),e.touchData.singleTouchMoved=!1,e.touchData.singleTouchStartTime=+new Date,clearTimeout(e.touchData.tapholdTimeout),e.touchData.tapholdTimeout=setTimeout((function(){!1!==e.touchData.singleTouchMoved||e.pinching||e.touchData.selecting||(t(e.touchData.start,["taphold"],n,{position:{x:i[0],y:i[1]}}),e.touchData.start||r.$(":selected").unselect())}),e.tapholdDuration)}if(n.touches.length>=1){for(var M=e.touchData.startPosition=[],z=0;z=e.touchTapThreshold2}if(i&&e.touchData.cxt){n.preventDefault();var D=n.touches[0].clientX-S,R=n.touches[0].clientY-$,M=n.touches[1].clientX-S,z=n.touches[1].clientY-$,L=N(D,R,M,z);if(L/k>=2.25||L>=22500){e.touchData.cxt=!1,e.data.bgActivePosistion=void 0,e.redrawHint("select",!0);var B={originalEvent:n,type:"cxttapend",position:{x:c[0],y:c[1]}};e.touchData.start?(e.touchData.start.unactivate().emit(B),e.touchData.start=null):l.emit(B)}}if(i&&e.touchData.cxt){B={originalEvent:n,type:"cxtdrag",position:{x:c[0],y:c[1]}},e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),e.touchData.start?e.touchData.start.emit(B):l.emit(B),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxtDragged=!0;var F=e.findNearestElement(c[0],c[1],!0,!0);e.touchData.cxtOver&&F===e.touchData.cxtOver||(e.touchData.cxtOver&&e.touchData.cxtOver.emit({originalEvent:n,type:"cxtdragout",position:{x:c[0],y:c[1]}}),e.touchData.cxtOver=F,F&&F.emit({originalEvent:n,type:"cxtdragover",position:{x:c[0],y:c[1]}}))}else if(i&&n.touches[2]&&l.boxSelectionEnabled())n.preventDefault(),e.data.bgActivePosistion=void 0,this.lastThreeTouch=+new Date,e.touchData.selecting||l.emit("boxstart"),e.touchData.selecting=!0,e.redrawHint("select",!0),s&&0!==s.length&&void 0!==s[0]?(s[2]=(c[0]+c[2]+c[4])/3,s[3]=(c[1]+c[3]+c[5])/3):(s[0]=(c[0]+c[2]+c[4])/3,s[1]=(c[1]+c[3]+c[5])/3,s[2]=(c[0]+c[2]+c[4])/3+1,s[3]=(c[1]+c[3]+c[5])/3+1),s[4]=1,e.touchData.selecting=!0,e.redraw();else if(i&&n.touches[1]&&l.zoomingEnabled()&&l.panningEnabled()&&l.userZoomingEnabled()&&l.userPanningEnabled()){if(n.preventDefault(),e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),ee=e.dragData.touchDragEles){e.redrawHint("drag",!0);for(var q=0;q0)return h[0]}return null},p=Object.keys(d),h=0;h0?f:r.roundRectangleIntersectLine(o,a,e,t,n,i,s)},checkPoint:function(e,t,n,i,o,a,s){var l=r.getRoundRectangleRadius(i,o),c=2*l;if(r.pointInsidePolygon(e,t,this.points,a,s,i,o-c,[0,-1],n))return!0;if(r.pointInsidePolygon(e,t,this.points,a,s,i-c,o,[0,-1],n))return!0;var u=i/2+2*n,d=o/2+2*n,f=[a-u,s-d,a-u,s,a+u,s,a+u,s-d];return!!r.pointInsidePolygonPoints(e,t,f)||!!r.checkInEllipse(e,t,c,c,a+i/2-l,s+o/2-l,n)||!!r.checkInEllipse(e,t,c,c,a-i/2+l,s+o/2-l,n)}}},registerNodeShapes:function(){var e=this.nodeShapes={},t=this;this.generateEllipse(),this.generatePolygon("triangle",r.generateUnitNgonPointsFitToSquare(3,0)),this.generatePolygon("rectangle",r.generateUnitNgonPointsFitToSquare(4,0)),e.square=e.rectangle,this.generateRoundRectangle(),this.generateCutRectangle(),this.generateBarrel(),this.generateBottomRoundrectangle(),this.generatePolygon("diamond",[0,1,1,0,0,-1,-1,0]),this.generatePolygon("pentagon",r.generateUnitNgonPointsFitToSquare(5,0)),this.generatePolygon("hexagon",r.generateUnitNgonPointsFitToSquare(6,0)),this.generatePolygon("heptagon",r.generateUnitNgonPointsFitToSquare(7,0)),this.generatePolygon("octagon",r.generateUnitNgonPointsFitToSquare(8,0));var n=new Array(20),i=r.generateUnitNgonPoints(5,0),o=r.generateUnitNgonPoints(5,Math.PI/5),a=.5*(3-Math.sqrt(5));a*=1.57;for(var s=0;s0&&t.data.lyrTxrCache.invalidateElements(n)}))}l.CANVAS_LAYERS=3,l.SELECT_BOX=0,l.DRAG=1,l.NODE=2,l.BUFFER_COUNT=3,l.TEXTURE_BUFFER=0,l.MOTIONBLUR_BUFFER_NODE=1,l.MOTIONBLUR_BUFFER_DRAG=2,l.redrawHint=function(e,t){var n=this;switch(e){case"eles":n.data.canvasNeedsRedraw[l.NODE]=t;break;case"drag":n.data.canvasNeedsRedraw[l.DRAG]=t;break;case"select":n.data.canvasNeedsRedraw[l.SELECT_BOX]=t}};var u="undefined"!=typeof Path2D;l.path2dEnabled=function(e){if(void 0===e)return this.pathsEnabled;this.pathsEnabled=!!e},l.usePaths=function(){return u&&this.pathsEnabled},[n(126),n(127),n(128),n(129),n(130),n(131),n(132),n(133),n(134),n(135)].forEach((function(e){r.extend(l,e)})),e.exports=s},function(e,t,n){"use strict";var r=n(2),i=n(1),o=n(9),a=n(19),s={dequeue:"dequeue",downscale:"downscale",highQuality:"highQuality"},l=function(e){this.renderer=e,this.onDequeues=[],this.setupDequeueing()},c=l.prototype;c.reasons=s,c.getTextureQueue=function(e){return this.eleImgCaches=this.eleImgCaches||{},this.eleImgCaches[e]=this.eleImgCaches[e]||[]},c.getRetiredTextureQueue=function(e){var t=this.eleImgCaches.retired=this.eleImgCaches.retired||{};return t[e]=t[e]||[]},c.getElementQueue=function(){return this.eleCacheQueue=this.eleCacheQueue||new o((function(e,t){return t.reqs-e.reqs}))},c.getElementIdToQueue=function(){return this.eleIdToCacheQueue=this.eleIdToCacheQueue||{}},c.getElement=function(e,t,n,i,o){var a=this,l=this.renderer,c=e._private.rscratch,u=l.cy.zoom();if(0===t.w||0===t.h||!e.visible())return null;if(null==i&&(i=Math.ceil(r.log2(u*n))),i<-4)i=-4;else if(u>=3.99||i>2)return null;var d,f=Math.pow(2,i),p=t.h*f,h=t.w*f,g=c.imgCaches=c.imgCaches||{},m=g[i];if(m)return m;if(d=p<=25?25:p<=50?50:50*Math.ceil(p/50),p>1024||h>1024||e.isEdge()||e.isParent())return null;var v=a.getTextureQueue(d),b=v[v.length-2],y=function(){return a.recycleTexture(d,h)||a.addTexture(d,h)};b||(b=v[v.length-1]),b||(b=y()),b.width-b.usedWidthi;$--)C=a.getElement(e,t,n,$,s.downscale);_()}else{var O;if(!A&&!E&&!S)for($=i-1;$>=-4;$--){var j;if(j=g[$]){O=j;break}}if(k(O))return a.queueElement(e,i),O;b.context.translate(b.usedWidth,0),b.context.scale(f,f),l.drawElement(b.context,e,t,w),b.context.scale(1/f,1/f),b.context.translate(-b.usedWidth,0)}return m=g[i]={ele:e,x:b.usedWidth,texture:b,level:i,scale:f,width:h,height:p,scaledLabelShown:w},b.usedWidth+=Math.ceil(h+8),b.eleCaches.push(m),a.checkTextureFullness(b),m},c.invalidateElement=function(e){var t=e._private.rscratch.imgCaches;if(t)for(var n=-4;n<=2;n++){var r=t[n];if(r){var o=r.texture;o.invalidatedWidth+=r.width,t[n]=null,i.removeFromArray(o.eleCaches,r),this.removeFromQueue(e),this.checkTextureUtility(o)}}},c.checkTextureUtility=function(e){e.invalidatedWidth>=.5*e.width&&this.retireTexture(e)},c.checkTextureFullness=function(e){var t=this.getTextureQueue(e.height);e.usedWidth/e.width>.8&&e.fullnessChecks>=10?i.removeFromArray(t,e):e.fullnessChecks++},c.retireTexture=function(e){var t=e.height,n=this.getTextureQueue(t);i.removeFromArray(n,e),e.retired=!0;for(var r=e.eleCaches,o=0;o=t)return a.retired=!1,a.usedWidth=0,a.invalidatedWidth=0,a.fullnessChecks=0,i.clearArray(a.eleCaches),a.context.setTransform(1,0,0,1,0,0),a.context.clearRect(0,0,a.width,a.height),i.removeFromArray(r,a),n.push(a),a}},c.queueElement=function(e,t){var n=this.getElementQueue(),r=this.getElementIdToQueue(),i=e.id(),o=r[i];if(o)o.level=Math.max(o.level,t),o.reqs++,n.updateItem(o);else{var a={ele:e,level:t,reqs:1};n.push(a),r[i]=a}},c.dequeue=function(e){for(var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=[],i=0;i<1&&t.size()>0;i++){var o=t.pop(),a=o.ele;if(null==a._private.rscratch.imgCaches[o.level]){n[a.id()]=null,r.push(o);var l=a.boundingBox();this.getElement(a,l,e,o.level,s.dequeue)}}return r},c.removeFromQueue=function(e){var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=n[e.id()];null!=r&&(r.reqs=i.MAX_INT,t.updateItem(r),t.pop(),n[e.id()]=null)},c.onDequeue=function(e){this.onDequeues.push(e)},c.offDequeue=function(e){i.removeFromArray(this.onDequeues,e)},c.setupDequeueing=a.setupDequeueing({deqRedrawThreshold:100,deqCost:.15,deqAvgCost:.1,deqNoDrawCost:.9,deqFastCost:.9,deq:function(e,t,n){return e.dequeue(t,n)},onDeqd:function(e,t){for(var n=0;n=3.99||n>2)return null;o.validateLayersElesOrdering(n,e);var l,c,u=o.layersByLevel,d=Math.pow(2,n),f=u[n]=u[n]||[];if(o.levelIsComplete(n,e))return f;!function(){var t=function(t){if(o.validateLayersElesOrdering(t,e),o.levelIsComplete(t,e))return c=u[t],!0},i=function(e){if(!c)for(var r=n+e;-4<=r&&r<=2&&!t(r);r+=e);};i(1),i(-1);for(var a=f.length-1;a>=0;a--){var s=f[a];s.invalid&&r.removeFromArray(f,s)}}();var p=function(t){var r=(t=t||{}).after;if(function(){if(!l){l=i.makeBoundingBox();for(var t=0;t16e6)return null;var a=o.makeLayer(l,n);if(null!=r){var s=f.indexOf(r)+1;f.splice(s,0,a)}else(void 0===t.insert||t.insert)&&f.unshift(a);return a};if(o.skipping&&!s)return null;for(var h=null,g=e.length/1,m=!s,v=0;v=g||!i.boundingBoxInBoundingBox(h.bb,b.boundingBox()))&&!(h=p({insert:!0,after:h})))return null;c||m?o.queueLayer(h,b):o.drawEleInLayer(h,b,n,t),h.eles.push(b),x[n]=h}}return c||(m?null:f)},c.getEleLevelForLayerLevel=function(e,t){return e},c.drawEleInLayer=function(e,t,n,r){var i=this.renderer,o=e.context,a=t.boundingBox();if(0!==a.w&&0!==a.h&&t.visible()){var s=this.eleTxrCache,l=s.reasons.highQuality;n=this.getEleLevelForLayerLevel(n,r);var c=s.getElement(t,a,null,n,l);c?(f(o,!1),o.drawImage(c.texture.canvas,c.x,0,c.width,c.height,a.x1,a.y1,a.w,a.h),f(o,!0)):i.drawElement(o,t)}},c.levelIsComplete=function(e,t){var n=this.layersByLevel[e];if(!n||0===n.length)return!1;for(var r=0,i=0;i0)return!1;if(o.invalid)return!1;r+=o.eles.length}return r===t.length},c.validateLayersElesOrdering=function(e,t){var n=this.layersByLevel[e];if(n)for(var r=0;r0){e=!0;break}}return e},c.invalidateElements=function(e){var t=this;t.lastInvalidationTime=r.performanceNow(),0!==e.length&&t.haveLayers()&&t.updateElementsInLayers(e,(function(e,n,r){t.invalidateLayer(e)}))},c.invalidateLayer=function(e){if(this.lastInvalidationTime=r.performanceNow(),!e.invalid){var t=e.level,n=e.eles,i=this.layersByLevel[t];r.removeFromArray(i,e),e.elesQueue=[],e.invalid=!0,e.replacement&&(e.replacement.invalid=!0);for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:f;e.lineWidth=h,e.lineCap="butt",i.strokeStyle(e,d[0],d[1],d[2],n),i.drawEdgePath(t,e,o.allpts,p)},m=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:f;i.drawArrowheads(e,t,n)};if(e.lineJoin="round","yes"===t.pstyle("ghost").value){var v=t.pstyle("ghost-offset-x").pfValue,b=t.pstyle("ghost-offset-y").pfValue,y=t.pstyle("ghost-opacity").value,x=f*y;e.translate(v,b),g(x),m(x),e.translate(-v,-b)}g(),m(),function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c;e.lineWidth=l,"self"!==o.edgeType||a?e.lineCap="round":e.lineCap="butt",i.strokeStyle(e,u[0],u[1],u[2],n),i.drawEdgePath(t,e,o.allpts,"solid")}(),i.drawElementText(e,t,r),n&&e.translate(s.x1,s.y1)}},drawEdgePath:function(e,t,n,r){var i=e._private.rscratch,o=t,a=void 0,s=!1,l=this.usePaths();if(l){var c=n.join("$");i.pathCacheKey&&i.pathCacheKey===c?(a=t=i.pathCache,s=!0):(a=t=new Path2D,i.pathCacheKey=c,i.pathCache=a)}if(o.setLineDash)switch(r){case"dotted":o.setLineDash([1,1]);break;case"dashed":o.setLineDash([6,3]);break;case"solid":o.setLineDash([])}if(!s&&!i.badLine)switch(t.beginPath&&t.beginPath(),t.moveTo(n[0],n[1]),i.edgeType){case"bezier":case"self":case"compound":case"multibezier":if(e.hasClass("horizontal")){var u=n[4],d=n[5],f=(n[0]+n[4])/2;t.lineTo(n[0]+10,n[1]),t.bezierCurveTo(f,n[1],f,n[5],n[4]-10,n[5]),t.lineTo(u,d)}else if(e.hasClass("vertical")){var p=n[4],h=n[5],g=(n[1]+n[5])/2;t.bezierCurveTo(n[0],g,n[4],g,n[4],n[5]-10),t.lineTo(p,h)}else for(var m=2;m+30||j>0&&O>0){var P=f-T;switch(k){case"left":P-=m;break;case"center":P-=m/2}var D=p-v-T,R=m+2*T,I=v+2*T;if(_>0){var N=e.fillStyle,M=t.pstyle("text-background-color").value;e.fillStyle="rgba("+M[0]+","+M[1]+","+M[2]+","+_*o+")","roundrectangle"==t.pstyle("text-background-shape").strValue?(s=P,l=D,c=R,u=I,d=(d=2)||5,(a=e).beginPath(),a.moveTo(s+d,l),a.lineTo(s+c-d,l),a.quadraticCurveTo(s+c,l,s+c,l+d),a.lineTo(s+c,l+u-d),a.quadraticCurveTo(s+c,l+u,s+c-d,l+u),a.lineTo(s+d,l+u),a.quadraticCurveTo(s,l+u,s,l+u-d),a.lineTo(s,l+d),a.quadraticCurveTo(s,l,s+d,l),a.closePath(),a.fill()):e.fillRect(P,D,R,I),e.fillStyle=N}if(j>0&&O>0){var z=e.strokeStyle,L=e.lineWidth,B=t.pstyle("text-border-color").value,F=t.pstyle("text-border-style").value;if(e.strokeStyle="rgba("+B[0]+","+B[1]+","+B[2]+","+O*o+")",e.lineWidth=j,e.setLineDash)switch(F){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"double":e.lineWidth=j/4,e.setLineDash([]);break;case"solid":e.setLineDash([])}if(e.strokeRect(P,D,R,I),"double"===F){var q=j/2;e.strokeRect(P+q,D+q,R-2*q,I-2*q)}e.setLineDash&&e.setLineDash([]),e.lineWidth=L,e.strokeStyle=z}}var V=2*t.pstyle("text-outline-width").pfValue;if(V>0&&(e.lineWidth=V),"wrap"===t.pstyle("text-wrap").value){var U=r.getPrefixedProperty(i,"labelWrapCachedLines",n),H=v/U.length;switch(A){case"top":p-=(U.length-1)*H;break;case"center":case"bottom":p-=(U.length-1)*H}for(var G=0;G0&&e.strokeText(U[G],f,p),e.fillText(U[G],f,p),p+=H}else V>0&&e.strokeText(h,f,p),e.fillText(h,f,p);0!==E&&(e.rotate(-E),e.translate(-$,-C))}}},e.exports=o},function(e,t,n){"use strict";var r=n(0),i={drawNode:function(e,t,n,i){var o,a,s=this,l=t._private,c=l.rscratch,u=t.position();if(r.number(u.x)&&r.number(u.y)&&t.visible()){var d=t.effectiveOpacity(),f=s.usePaths(),p=void 0,h=!1,g=t.padding();o=t.width()+2*g,a=t.height()+2*g;var m=void 0;n&&(m=n,e.translate(-m.x1,-m.y1));for(var v=t.pstyle("background-image").value,b=new Array(v.length),y=new Array(v.length),x=0,w=0;w0&&void 0!==arguments[0]?arguments[0]:C;s.fillStyle(e,$[0],$[1],$[2],t)},P=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:j;s.strokeStyle(e,_[0],_[1],_[2],t)},D=t.pstyle("shape").strValue,R=t.pstyle("shape-polygon-points").pfValue;if(f){var I=D+"$"+o+"$"+a+("polygon"===D?"$"+R.join("$"):"");e.translate(u.x,u.y),c.pathCacheKey===I?(p=c.pathCache,h=!0):(p=new Path2D,c.pathCacheKey=I,c.pathCache=p)}var N,M,z,L=function(){if(!h){var n=u;f&&(n={x:0,y:0}),s.nodeShapes[s.getNodeShape(t)].draw(p||e,n.x,n.y,o,a)}f?e.fill(p):e.fill()},B=function(){for(var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,r=l.backgrounding,i=0,o=0;o0&&void 0!==arguments[0]&&arguments[0],r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:d;s.hasPie(t)&&(s.drawPie(e,t,r),n&&(f||s.nodeShapes[s.getNodeShape(t)].draw(e,u.x,u.y,o,a)))},q=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,n=(E>0?E:-E)*t,r=E>0?0:255;0!==E&&(s.fillStyle(e,r,r,r,n),f?e.fill(p):e.fill())},V=function(){if(S>0){if(e.lineWidth=S,e.lineCap="butt",e.setLineDash)switch(O){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"solid":case"double":e.setLineDash([])}if(f?e.stroke(p):e.stroke(),"double"===O){e.lineWidth=S/3;var t=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",f?e.stroke(p):e.stroke(),e.globalCompositeOperation=t}e.setLineDash&&e.setLineDash([])}};if("yes"===t.pstyle("ghost").value){var U=t.pstyle("ghost-offset-x").pfValue,H=t.pstyle("ghost-offset-y").pfValue,G=t.pstyle("ghost-opacity").value,W=G*d;e.translate(U,H),T(G*C),L(),B(W),F(0!==E||0!==S),q(W),P(G*j),V(),e.translate(-U,-H)}T(),L(),B(),F(0!==E||0!==S),q(),P(),V(),f&&e.translate(-u.x,-u.y),s.drawElementText(e,t,i),N=t.pstyle("overlay-padding").pfValue,M=t.pstyle("overlay-opacity").value,z=t.pstyle("overlay-color").value,M>0&&(s.fillStyle(e,z[0],z[1],z[2],M),s.nodeShapes.roundrectangle.draw(e,u.x,u.y,o+2*N,a+2*N),e.fill()),n&&e.translate(m.x1,m.y1)}},hasPie:function(e){return(e=e[0])._private.hasPie},drawPie:function(e,t,n,r){t=t[0],r=r||t.position();var i=t.cy().style(),o=t.pstyle("pie-size"),a=r.x,s=r.y,l=t.width(),c=t.height(),u=Math.min(l,c)/2,d=0;this.usePaths()&&(a=0,s=0),"%"===o.units?u*=o.pfValue:void 0!==o.pfValue&&(u=o.pfValue/2);for(var f=1;f<=i.pieBackgroundN;f++){var p=t.pstyle("pie-"+f+"-background-size").value,h=t.pstyle("pie-"+f+"-background-color").value,g=t.pstyle("pie-"+f+"-background-opacity").value*n,m=p/100;m+d>1&&(m=1-d);var v=1.5*Math.PI+2*Math.PI*d,b=v+2*Math.PI*m;0===p||d>=1||d+m>1||(e.beginPath(),e.moveTo(a,s),e.arc(a,s,u,v,b),e.closePath(),this.fillStyle(e,h[0],h[1],h[2],g),e.fill(),d+=m)}}};e.exports=i},function(e,t,n){"use strict";var r={},i=n(1);r.getPixelRatio=function(){var e=this.data.contexts[0];if(null!=this.forcedPixelRatio)return this.forcedPixelRatio;var t=e.backingStorePixelRatio||e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return(window.devicePixelRatio||1)/t},r.paintCache=function(e){for(var t,n=this.paintCaches=this.paintCaches||[],r=!0,i=0;is.minMbLowQualFrames&&(s.motionBlurPxRatio=s.mbPxRBlurry)),s.clearingMotionBlur&&(s.motionBlurPxRatio=1),s.textureDrawLastFrame&&!f&&(d[s.NODE]=!0,d[s.SELECT_BOX]=!0);var y=c.style()._private.coreStyle,x=c.zoom(),w=void 0!==o?o:x,k=c.pan(),A={x:k.x,y:k.y},E={zoom:x,pan:{x:k.x,y:k.y}},S=s.prevViewport;void 0===S||E.zoom!==S.zoom||E.pan.x!==S.pan.x||E.pan.y!==S.pan.y||m&&!g||(s.motionBlurPxRatio=1),a&&(A=a),w*=l,A.x*=l,A.y*=l;var $=s.getCachedZSortedEles();function C(e,t,n,r,i){var o=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",s.fillStyle(e,255,255,255,s.motionBlurTransparency),e.fillRect(t,n,r,i),e.globalCompositeOperation=o}function _(e,r){var i,l,c,d;s.clearingMotionBlur||e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]&&e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]?(i=A,l=w,c=s.canvasWidth,d=s.canvasHeight):(i={x:k.x*h,y:k.y*h},l=x*h,c=s.canvasWidth*h,d=s.canvasHeight*h),e.setTransform(1,0,0,1,0,0),"motionBlur"===r?C(e,0,0,c,d):t||void 0!==r&&!r||e.clearRect(0,0,c,d),n||(e.translate(i.x,i.y),e.scale(l,l)),a&&e.translate(a.x,a.y),o&&e.scale(o,o)}if(f||(s.textureDrawLastFrame=!1),f){if(s.textureDrawLastFrame=!0,!s.textureCache){s.textureCache={},s.textureCache.bb=c.mutableElements().boundingBox(),s.textureCache.texture=s.data.bufferCanvases[s.TEXTURE_BUFFER];var O=s.data.bufferContexts[s.TEXTURE_BUFFER];O.setTransform(1,0,0,1,0,0),O.clearRect(0,0,s.canvasWidth*s.textureMult,s.canvasHeight*s.textureMult),s.render({forcedContext:O,drawOnlyNodeLayer:!0,forcedPxRatio:l*s.textureMult}),(E=s.textureCache.viewport={zoom:c.zoom(),pan:c.pan(),width:s.canvasWidth,height:s.canvasHeight}).mpan={x:(0-E.pan.x)/E.zoom,y:(0-E.pan.y)/E.zoom}}d[s.DRAG]=!1,d[s.NODE]=!1;var j=u.contexts[s.NODE],T=s.textureCache.texture;E=s.textureCache.viewport,s.textureCache.bb,j.setTransform(1,0,0,1,0,0),p?C(j,0,0,E.width,E.height):j.clearRect(0,0,E.width,E.height);var P=y["outside-texture-bg-color"].value,D=y["outside-texture-bg-opacity"].value;s.fillStyle(j,P[0],P[1],P[2],D),j.fillRect(0,0,E.width,E.height),x=c.zoom(),_(j,!1),j.clearRect(E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l),j.drawImage(T,E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l)}else s.textureOnViewport&&!t&&(s.textureCache=null);var R=c.extent(),I=s.pinching||s.hoverData.dragging||s.swipePanning||s.data.wheelZooming||s.hoverData.draggingEles,N=s.hideEdgesOnViewport&&I,M=[];if(M[s.NODE]=!d[s.NODE]&&p&&!s.clearedForMotionBlur[s.NODE]||s.clearingMotionBlur,M[s.NODE]&&(s.clearedForMotionBlur[s.NODE]=!0),M[s.DRAG]=!d[s.DRAG]&&p&&!s.clearedForMotionBlur[s.DRAG]||s.clearingMotionBlur,M[s.DRAG]&&(s.clearedForMotionBlur[s.DRAG]=!0),d[s.NODE]||n||r||M[s.NODE]){var z=p&&!M[s.NODE]&&1!==h;_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]:u.contexts[s.NODE]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.nondrag,l,R):s.drawLayeredElements(j,$.nondrag,l,R),s.debug&&s.drawDebugPoints(j,$.nondrag),n||p||(d[s.NODE]=!1)}if(!r&&(d[s.DRAG]||n||M[s.DRAG])&&(z=p&&!M[s.DRAG]&&1!==h,_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]:u.contexts[s.DRAG]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.drag,l,R):s.drawCachedElements(j,$.drag,l,R),s.debug&&s.drawDebugPoints(j,$.drag),n||p||(d[s.DRAG]=!1)),s.showFps||!r&&d[s.SELECT_BOX]&&!n){if(_(j=t||u.contexts[s.SELECT_BOX]),1==s.selection[4]&&(s.hoverData.selecting||s.touchData.selecting)){x=s.cy.zoom();var L=y["selection-box-border-width"].value/x;j.lineWidth=L,j.fillStyle="rgba("+y["selection-box-color"].value[0]+","+y["selection-box-color"].value[1]+","+y["selection-box-color"].value[2]+","+y["selection-box-opacity"].value+")",j.fillRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]),L>0&&(j.strokeStyle="rgba("+y["selection-box-border-color"].value[0]+","+y["selection-box-border-color"].value[1]+","+y["selection-box-border-color"].value[2]+","+y["selection-box-opacity"].value+")",j.strokeRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]))}if(u.bgActivePosistion&&!s.hoverData.selecting){x=s.cy.zoom();var B=u.bgActivePosistion;j.fillStyle="rgba("+y["active-bg-color"].value[0]+","+y["active-bg-color"].value[1]+","+y["active-bg-color"].value[2]+","+y["active-bg-opacity"].value+")",j.beginPath(),j.arc(B.x,B.y,y["active-bg-size"].pfValue/x,0,2*Math.PI),j.fill()}var F=s.lastRedrawTime;if(s.showFps&&F){F=Math.round(F);var q=Math.round(1e3/F);j.setTransform(1,0,0,1,0,0),j.fillStyle="rgba(255, 0, 0, 0.75)",j.strokeStyle="rgba(255, 0, 0, 0.75)",j.lineWidth=1,j.fillText("1 frame = "+F+" ms = "+q+" fps",0,20),j.strokeRect(0,30,250,20),j.fillRect(0,30,250*Math.min(q/60,1),20)}n||(d[s.SELECT_BOX]=!1)}if(p&&1!==h){var V=u.contexts[s.NODE],U=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_NODE],H=u.contexts[s.DRAG],G=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_DRAG],W=function(e,t,n){e.setTransform(1,0,0,1,0,0),n||!b?e.clearRect(0,0,s.canvasWidth,s.canvasHeight):C(e,0,0,s.canvasWidth,s.canvasHeight);var r=h;e.drawImage(t,0,0,s.canvasWidth*r,s.canvasHeight*r,0,0,s.canvasWidth,s.canvasHeight)};(d[s.NODE]||M[s.NODE])&&(W(V,U,M[s.NODE]),d[s.NODE]=!1),(d[s.DRAG]||M[s.DRAG])&&(W(H,G,M[s.DRAG]),d[s.DRAG]=!1)}s.prevViewport=E,s.clearingMotionBlur&&(s.clearingMotionBlur=!1,s.motionBlurCleared=!0,s.motionBlur=!0),p&&(s.motionBlurTimeout=setTimeout((function(){s.motionBlurTimeout=null,s.clearedForMotionBlur[s.NODE]=!1,s.clearedForMotionBlur[s.DRAG]=!1,s.motionBlur=!1,s.clearingMotionBlur=!f,s.mbFrames=0,d[s.NODE]=!0,d[s.DRAG]=!0,s.redraw()}),100)),t||c.emit("render")},e.exports=r},function(e,t,n){"use strict";for(var r=n(2),i={drawPolygonPath:function(e,t,n,r,i,o){var a=r/2,s=i/2;e.beginPath&&e.beginPath(),e.moveTo(t+a*o[0],n+s*o[1]);for(var l=1;l0&&a>0){p.clearRect(0,0,o,a),p.globalCompositeOperation="source-over";var h=this.getCachedZSortedEles();if(e.full)p.translate(-n.x1*c,-n.y1*c),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(n.x1*c,n.y1*c);else{var g=t.pan(),m={x:g.x*c,y:g.y*c};c*=t.zoom(),p.translate(m.x,m.y),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(-m.x,-m.y)}e.bg&&(p.globalCompositeOperation="destination-over",p.fillStyle=e.bg,p.rect(0,0,o,a),p.fill())}return f},i.png=function(e){return a(e,this.bufferCanvasImage(e),"image/png")},i.jpg=function(e){return a(e,this.bufferCanvasImage(e),"image/jpeg")},e.exports=i},function(e,t,n){"use strict";var r={nodeShapeImpl:function(e,t,n,r,i,o,a){switch(e){case"ellipse":return this.drawEllipsePath(t,n,r,i,o);case"polygon":return this.drawPolygonPath(t,n,r,i,o,a);case"roundrectangle":return this.drawRoundRectanglePath(t,n,r,i,o);case"cutrectangle":return this.drawCutRectanglePath(t,n,r,i,o);case"bottomroundrectangle":return this.drawBottomRoundRectanglePath(t,n,r,i,o);case"barrel":return this.drawBarrelPath(t,n,r,i,o)}}};e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(18),a=function e(){if(!(this instanceof e))return new e;this.length=0},s=a.prototype;s.instanceString=function(){return"stylesheet"},s.selector=function(e){return this[this.length++]={selector:e,properties:[]},this},s.css=function(e,t){var n=this.length-1;if(r.string(e))this[n].properties.push({name:e,value:t});else if(r.plainObject(e))for(var a=e,s=0;s=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(239),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(35))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,i,o,a,s,l=1,c={},u=!1,d=e.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(e);f=f&&f.setTimeout?f:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((o=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){o.port2.postMessage(e)}):d&&"onreadystatechange"in d.createElement("script")?(i=d.documentElement,r=function(e){var t=d.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,i.removeChild(t),t=null},i.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",s=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",s,!1):e.attachEvent("onmessage",s),r=function(t){e.postMessage(a+t,"*")}),f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n=t||n<0||m&&e-c>=o}function w(){var e=p();if(x(e))return k(e);s=setTimeout(w,function(e){var n=t-(e-l);return m?f(n,o-(e-c)):n}(e))}function k(e){return s=void 0,v&&r?b(e):(r=i=void 0,a)}function A(){var e=p(),n=x(e);if(r=arguments,i=this,l=e,n){if(void 0===s)return y(l);if(m)return s=setTimeout(w,t),b(l)}return void 0===s&&(s=setTimeout(w,t)),a}return t=g(t)||0,h(n)&&(u=!!n.leading,o=(m="maxWait"in n)?d(g(n.maxWait)||0,t):o,v="trailing"in n?!!n.trailing:v),A.cancel=function(){void 0!==s&&clearTimeout(s),c=0,r=l=i=s=void 0},A.flush=function(){return void 0===s?a:k(p())},A}}).call(this,n(35))},function(e,t,n){e.exports=n(243)},function(e,t,n){var r,i,o;(function(){var n,a,s,l,c,u,d,f,p,h,g,m,v,b,y;s=Math.floor,h=Math.min,a=function(e,t){return et?1:0},p=function(e,t,n,r,i){var o;if(null==n&&(n=0),null==i&&(i=a),n<0)throw new Error("lo must be non-negative");for(null==r&&(r=e.length);nn;0<=n?t++:t--)c.push(t);return c}.apply(this).reverse()).length;rg;0<=g?++u:--u)m.push(c(e,n));return m},b=function(e,t,n,r){var i,o,s;for(null==r&&(r=a),i=e[n];n>t&&r(i,o=e[s=n-1>>1])<0;)e[n]=o,n=s;return e[n]=i},y=function(e,t,n){var r,i,o,s,l;for(null==n&&(n=a),i=e.length,l=t,o=e[t],r=2*t+1;r'+e.content+"":s+=">"+e.content+"";var l=t(s);return l.data("selector",e.selector),l.data("on-click-function",e.onClickFunction),l.data("show",void 0===e.show||e.show),l}function y(){var e;l("active")&&(e=s.children(),t(e).each((function(){x(t(this))})),i.off("tapstart",n),s.remove(),c(s=void 0,void 0),c("active",!1),c("anyVisibleChild",!1))}function x(e){var n="string"==typeof e?t("#"+e):e,r=n.data("cy-context-menus-cxtfcn"),o=n.data("selector"),a=n.data("call-on-click-function"),s=n.data("cy-context-menus-cxtcorefcn");r&&i.off("cxttap",o,r),s&&i.off("cxttap",s),a&&n.off("click",a),n.remove()}"get"!==e&&(c("options",a=function(e,t){var n={};for(var r in e)n[r]=e[r];for(var r in t)n[r]=t[r];return n}(r,e)),l("active")&&y(),c("active",!0),o=u(a.contextMenuClasses),(s=t("
    ")).addClass("cy-context-menus-cxt-menu"),c("cxtMenu",s),t("body").append(s),s=s,g(a.menuItems),i.on("tapstart",n=function(){f(s),c("cxtMenuPosition",void 0),c("currentCyEvent",void 0)}),t(".cy-context-menus-cxt-menu").contextmenu((function(){return!1})));return function(e){return{isActive:function(){return l("active")},appendMenuItem:function(t){return m(t),e},appendMenuItems:function(t){return g(t),e},removeMenuItem:function(t){return x(t),e},setTrailingDivider:function(n,r){return function(e,n){var r=t("#"+e);n?r.addClass("cy-context-menus-divider"):r.removeClass("cy-context-menus-divider")}(n,r),e},insertBeforeMenuItem:function(t,n){return v(t,n),e},moveBeforeOtherMenuItem:function(n,r){return function(e,n){if(e!==n){var r=t("#"+e).detach(),i=t("#"+n);r.insertBefore(i)}}(n,r),e},disableMenuItem:function(n){return t("#"+n).attr("disabled",!0),e},enableMenuItem:function(n){return t("#"+n).attr("disabled",!1),e},hideMenuItem:function(n){return t("#"+n).data("show",!1),f(t("#"+n)),e},showMenuItem:function(n){return t("#"+n).data("show",!0),d(t("#"+n)),e},destroy:function(){return y(),e}}}(this)}))}};e.exports&&(e.exports=o),void 0===(r=function(){return o}.call(t,n,t,e))||(e.exports=r),"undefined"!=typeof cytoscape&&i&&o(cytoscape,i)}()},function(e,t,n){var r;r=function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){var r=n(1),i=function(e){e&&e("layout","dagre",r)};"undefined"!=typeof cytoscape&&i(cytoscape),e.exports=i},function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=n(2),o=n(3),a=n(4);function s(e){this.options=o({},i,e)}s.prototype.run=function(){var e=this.options,t=e.cy,n=e.eles,i=function(e,t){return"function"==typeof t?t.apply(e,[e]):t},o=e.boundingBox||{x1:0,y1:0,w:t.width(),h:t.height()};void 0===o.x2&&(o.x2=o.x1+o.w),void 0===o.w&&(o.w=o.x2-o.x1),void 0===o.y2&&(o.y2=o.y1+o.h),void 0===o.h&&(o.h=o.y2-o.y1);var s=new a.graphlib.Graph({multigraph:!0,compound:!0}),l={},c=function(e,t){null!=t&&(l[e]=t)};c("nodesep",e.nodeSep),c("edgesep",e.edgeSep),c("ranksep",e.rankSep),c("rankdir",e.rankDir),c("ranker",e.ranker),s.setGraph(l),s.setDefaultEdgeLabel((function(){return{}})),s.setDefaultNodeLabel((function(){return{}}));for(var u=n.nodes(),d=0;d1?t-1:0),r=1;r-1}},function(e,t,n){var r=n(75);e.exports=function(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}},function(e,t,n){var r=n(74);e.exports=function(){this.__data__=new r,this.size=0}},function(e,t){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},function(e,t){e.exports=function(e){return this.__data__.get(e)}},function(e,t){e.exports=function(e){return this.__data__.has(e)}},function(e,t,n){var r=n(74),i=n(117),o=n(118);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(e,t),this.size=n.size,this}},function(e,t,n){var r=n(64),i=n(262),o=n(23),a=n(151),s=/^\[object .+?Constructor\]$/,l=Function.prototype,c=Object.prototype,u=l.toString,d=c.hasOwnProperty,f=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!o(e)||i(e))&&(r(e)?f:s).test(a(e))}},function(e,t,n){var r=n(58),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i}},function(e,t){var n=Object.prototype.toString;e.exports=function(e){return n.call(e)}},function(e,t,n){var r,i=n(263),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";e.exports=function(e){return!!o&&o in e}},function(e,t,n){var r=n(29)["__core-js_shared__"];e.exports=r},function(e,t){e.exports=function(e,t){return null==e?void 0:e[t]}},function(e,t,n){var r=n(266),i=n(74),o=n(117);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(e,t,n){var r=n(267),i=n(268),o=n(269),a=n(270),s=n(271);function l(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}},function(e,t,n){var r=n(173),i=n(340),o=n(344),a=n(174),s=n(345),l=n(129);e.exports=function(e,t,n){var c=-1,u=i,d=e.length,f=!0,p=[],h=p;if(n)f=!1,u=o;else if(d>=200){var g=t?null:s(e);if(g)return l(g);f=!1,u=a,h=new r}else h=t?[]:p;e:for(;++c-1}},function(e,t,n){var r=n(188),i=n(342),o=n(343);e.exports=function(e,t,n){return t==t?o(e,t,n):r(e,i,n)}},function(e,t){e.exports=function(e){return e!=e}},function(e,t){e.exports=function(e,t,n){for(var r=n-1,i=e.length;++r1||1===t.length&&e.hasEdge(t[0],t[0])}))}},function(e,t,n){var r=n(22);e.exports=function(e,t,n){return function(e,t,n){var r={},i=e.nodes();return i.forEach((function(e){r[e]={},r[e][e]={distance:0},i.forEach((function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})})),n(e).forEach((function(n){var i=n.v===e?n.w:n.v,o=t(n);r[e][i]={distance:o,predecessor:e}}))})),i.forEach((function(e){var t=r[e];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[e],i=t[n],a=o[n],s=r.distance+i.distance;s0;){if(n=l.removeMin(),r.has(s,n))a.setEdge(n,s[n]);else{if(u)throw new Error("Input graph is not connected: "+e);u=!0}e.nodeEdges(n).forEach(c)}return a}},function(e,t,n){"use strict";var r=n(11),i=n(399),o=n(402),a=n(403),s=n(20).normalizeRanks,l=n(405),c=n(20).removeEmptyRanks,u=n(406),d=n(407),f=n(408),p=n(409),h=n(418),g=n(20),m=n(28).Graph;e.exports=function(e,t){var n=t&&t.debugTiming?g.time:g.notime;n("layout",(function(){var t=n(" buildLayoutGraph",(function(){return function(e){var t=new m({multigraph:!0,compound:!0}),n=$(e.graph());return t.setGraph(r.merge({},b,S(n,v),r.pick(n,y))),r.forEach(e.nodes(),(function(n){var i=$(e.node(n));t.setNode(n,r.defaults(S(i,x),w)),t.setParent(n,e.parent(n))})),r.forEach(e.edges(),(function(n){var i=$(e.edge(n));t.setEdge(n,r.merge({},A,S(i,k),r.pick(i,E)))})),t}(e)}));n(" runLayout",(function(){!function(e,t){t(" makeSpaceForEdgeLabels",(function(){!function(e){var t=e.graph();t.ranksep/=2,r.forEach(e.edges(),(function(n){var r=e.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===t.rankdir||"BT"===t.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(e)})),t(" removeSelfEdges",(function(){!function(e){r.forEach(e.edges(),(function(t){if(t.v===t.w){var n=e.node(t.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:t,label:e.edge(t)}),e.removeEdge(t)}}))}(e)})),t(" acyclic",(function(){i.run(e)})),t(" nestingGraph.run",(function(){u.run(e)})),t(" rank",(function(){a(g.asNonCompoundGraph(e))})),t(" injectEdgeLabelProxies",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(n.width&&n.height){var r=e.node(t.v),i={rank:(e.node(t.w).rank-r.rank)/2+r.rank,e:t};g.addDummyNode(e,"edge-proxy",i,"_ep")}}))}(e)})),t(" removeEmptyRanks",(function(){c(e)})),t(" nestingGraph.cleanup",(function(){u.cleanup(e)})),t(" normalizeRanks",(function(){s(e)})),t(" assignRankMinMax",(function(){!function(e){var t=0;r.forEach(e.nodes(),(function(n){var i=e.node(n);i.borderTop&&(i.minRank=e.node(i.borderTop).rank,i.maxRank=e.node(i.borderBottom).rank,t=r.max(t,i.maxRank))})),e.graph().maxRank=t}(e)})),t(" removeEdgeLabelProxies",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);"edge-proxy"===n.dummy&&(e.edge(n.e).labelRank=n.rank,e.removeNode(t))}))}(e)})),t(" normalize.run",(function(){o.run(e)})),t(" parentDummyChains",(function(){l(e)})),t(" addBorderSegments",(function(){d(e)})),t(" order",(function(){p(e)})),t(" insertSelfEdges",(function(){!function(e){var t=g.buildLayerMatrix(e);r.forEach(t,(function(t){var n=0;r.forEach(t,(function(t,i){var o=e.node(t);o.order=i+n,r.forEach(o.selfEdges,(function(t){g.addDummyNode(e,"selfedge",{width:t.label.width,height:t.label.height,rank:o.rank,order:i+ ++n,e:t.e,label:t.label},"_se")})),delete o.selfEdges}))}))}(e)})),t(" adjustCoordinateSystem",(function(){f.adjust(e)})),t(" position",(function(){h(e)})),t(" positionSelfEdges",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);if("selfedge"===n.dummy){var r=e.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,s=r.height/2;e.setEdge(n.e,n.label),e.removeNode(t),n.label.points=[{x:i+2*a/3,y:o-s},{x:i+5*a/6,y:o-s},{x:i+a,y:o},{x:i+5*a/6,y:o+s},{x:i+2*a/3,y:o+s}],n.label.x=n.x,n.label.y=n.y}}))}(e)})),t(" removeBorderNodes",(function(){!function(e){r.forEach(e.nodes(),(function(t){if(e.children(t).length){var n=e.node(t),i=e.node(n.borderTop),o=e.node(n.borderBottom),a=e.node(r.last(n.borderLeft)),s=e.node(r.last(n.borderRight));n.width=Math.abs(s.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(e.nodes(),(function(t){"border"===e.node(t).dummy&&e.removeNode(t)}))}(e)})),t(" normalize.undo",(function(){o.undo(e)})),t(" fixupEdgeLabelCoords",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(e)})),t(" undoCoordinateSystem",(function(){f.undo(e)})),t(" translateGraph",(function(){!function(e){var t=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=e.graph(),s=a.marginx||0,l=a.marginy||0;function c(e){var r=e.x,a=e.y,s=e.width,l=e.height;t=Math.min(t,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,a-l/2),o=Math.max(o,a+l/2)}r.forEach(e.nodes(),(function(t){c(e.node(t))})),r.forEach(e.edges(),(function(t){var n=e.edge(t);r.has(n,"x")&&c(n)})),t-=s,i-=l,r.forEach(e.nodes(),(function(n){var r=e.node(n);r.x-=t,r.y-=i})),r.forEach(e.edges(),(function(n){var o=e.edge(n);r.forEach(o.points,(function(e){e.x-=t,e.y-=i})),r.has(o,"x")&&(o.x-=t),r.has(o,"y")&&(o.y-=i)})),a.width=n-t+s,a.height=o-i+l}(e)})),t(" assignNodeIntersects",(function(){!function(e){r.forEach(e.edges(),(function(t){var n,r,i=e.edge(t),o=e.node(t.v),a=e.node(t.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(e)})),t(" reversePoints",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);n.reversed&&n.points.reverse()}))}(e)})),t(" acyclic.undo",(function(){i.undo(e)}))}(t,n)})),n(" updateInputGraph",(function(){!function(e,t){r.forEach(e.nodes(),(function(n){var r=e.node(n),i=t.node(n);r&&(r.x=i.x,r.y=i.y,t.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(e.edges(),(function(n){var i=e.edge(n),o=t.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),e.graph().width=t.graph().width,e.graph().height=t.graph().height}(e,t)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],b={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},y=["acyclicer","ranker","rankdir","align"],x=["width","height"],w={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function S(e,t){return r.mapValues(r.pick(e,t),Number)}function $(e){var t={};return r.forEach(e,(function(e,n){t[n.toLowerCase()]=e})),t}},function(e,t,n){var r=n(149);e.exports=function(e){return r(e,5)}},function(e,t,n){var r=n(89),i=n(57),o=n(90),a=n(48),s=Object.prototype,l=s.hasOwnProperty,c=r((function(e,t){e=Object(e);var n=-1,r=t.length,c=r>2?t[2]:void 0;for(c&&o(t[0],t[1],c)&&(r=1);++n-1?s[l?t[c]:c]:void 0}}},function(e,t,n){var r=n(188),i=n(37),o=n(365),a=Math.max;e.exports=function(e,t,n){var s=null==e?0:e.length;if(!s)return-1;var l=null==n?0:o(n);return l<0&&(l=a(s+l,0)),r(e,i(t,3),l)}},function(e,t,n){var r=n(196);e.exports=function(e){var t=r(e),n=t%1;return t==t?n?t-n:t:0}},function(e,t,n){var r=n(367),i=n(23),o=n(61),a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,l=/^0o[0-7]+$/i,c=parseInt;e.exports=function(e){if("number"==typeof e)return e;if(o(e))return NaN;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=s.test(e);return n||l.test(e)?c(e.slice(2),n?2:8):a.test(e)?NaN:+e}},function(e,t,n){var r=n(368),i=/^\s+/;e.exports=function(e){return e?e.slice(0,r(e)+1).replace(i,""):e}},function(e,t){var n=/\s/;e.exports=function(e){for(var t=e.length;t--&&n.test(e.charAt(t)););return t}},function(e,t,n){var r=n(128),i=n(169),o=n(48);e.exports=function(e,t){return null==e?e:r(e,i(t),o)}},function(e,t){e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},function(e,t,n){var r=n(79),i=n(127),o=n(37);e.exports=function(e,t){var n={};return t=o(t,3),i(e,(function(e,i,o){r(n,i,t(e,i,o))})),n}},function(e,t,n){var r=n(132),i=n(373),o=n(49);e.exports=function(e){return e&&e.length?r(e,o,i):void 0}},function(e,t){e.exports=function(e,t){return e>t}},function(e,t,n){var r=n(375),i=n(379)((function(e,t,n){r(e,t,n)}));e.exports=i},function(e,t,n){var r=n(73),i=n(198),o=n(128),a=n(376),s=n(23),l=n(48),c=n(199);e.exports=function e(t,n,u,d,f){t!==n&&o(n,(function(o,l){if(f||(f=new r),s(o))a(t,n,l,u,e,d,f);else{var p=d?d(c(t,l),o,l+"",t,n,f):void 0;void 0===p&&(p=o),i(t,l,p)}}),l)}},function(e,t,n){var r=n(198),i=n(155),o=n(164),a=n(156),s=n(165),l=n(66),c=n(13),u=n(189),d=n(59),f=n(64),p=n(23),h=n(377),g=n(67),m=n(199),v=n(378);e.exports=function(e,t,n,b,y,x,w){var k=m(e,n),A=m(t,n),E=w.get(A);if(E)r(e,n,E);else{var S=x?x(k,A,n+"",e,t,w):void 0,$=void 0===S;if($){var C=c(A),_=!C&&d(A),O=!C&&!_&&g(A);S=A,C||_||O?c(k)?S=k:u(k)?S=a(k):_?($=!1,S=i(A,!0)):O?($=!1,S=o(A,!0)):S=[]:h(A)||l(A)?(S=k,l(k)?S=v(k):p(k)&&!f(k)||(S=s(A))):$=!1}$&&(w.set(A,S),y(S,A,b,x,w),w.delete(A)),r(e,n,S)}}},function(e,t,n){var r=n(47),i=n(84),o=n(32),a=Function.prototype,s=Object.prototype,l=a.toString,c=s.hasOwnProperty,u=l.call(Object);e.exports=function(e){if(!o(e)||"[object Object]"!=r(e))return!1;var t=i(e);if(null===t)return!0;var n=c.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==u}},function(e,t,n){var r=n(65),i=n(48);e.exports=function(e){return r(e,i(e))}},function(e,t,n){var r=n(89),i=n(90);e.exports=function(e){return r((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&i(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r1&&a(e,t[0],t[1])?t=[]:n>2&&a(t[0],t[1],t[2])&&(t=[t[0]]),i(e,r(t,1),[])}));e.exports=s},function(e,t,n){var r=n(88),i=n(86),o=n(37),a=n(184),s=n(393),l=n(82),c=n(394),u=n(49),d=n(13);e.exports=function(e,t,n){t=t.length?r(t,(function(e){return d(e)?function(t){return i(t,1===e.length?e[0]:e)}:e})):[u];var f=-1;t=r(t,l(o));var p=a(e,(function(e,n,i){return{criteria:r(t,(function(t){return t(e)})),index:++f,value:e}}));return s(p,(function(e,t){return c(e,t,n)}))}},function(e,t){e.exports=function(e,t){var n=e.length;for(e.sort(t);n--;)e[n]=e[n].value;return e}},function(e,t,n){var r=n(395);e.exports=function(e,t,n){for(var i=-1,o=e.criteria,a=t.criteria,s=o.length,l=n.length;++i=l?c:c*("desc"==n[i]?-1:1)}return e.index-t.index}},function(e,t,n){var r=n(61);e.exports=function(e,t){if(e!==t){var n=void 0!==e,i=null===e,o=e==e,a=r(e),s=void 0!==t,l=null===t,c=t==t,u=r(t);if(!l&&!u&&!a&&e>t||a&&s&&c&&!l&&!u||i&&s&&c||!n&&c||!o)return 1;if(!i&&!a&&!u&&e0;--l)if(r=t[l].dequeue()){i=i.concat(s(e,t,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(t){return e.outEdges(t.v,t.w)})),!0)};var a=r.constant(1);function s(e,t,n,i,o){var a=o?[]:void 0;return r.forEach(e.inEdges(i.v),(function(r){var i=e.edge(r),s=e.node(r.v);o&&a.push({v:r.v,w:r.w}),s.out-=i,l(t,n,s)})),r.forEach(e.outEdges(i.v),(function(r){var i=e.edge(r),o=r.w,a=e.node(o);a.in-=i,l(t,n,a)})),e.removeNode(i.v),a}function l(e,t,n){n.out?n.in?e[n.out-n.in+t].enqueue(n):e[e.length-1].enqueue(n):e[0].enqueue(n)}},function(e,t){function n(){var e={};e._next=e._prev=e,this._sentinel=e}function r(e){e._prev._next=e._next,e._next._prev=e._prev,delete e._next,delete e._prev}function i(e,t){if("_next"!==e&&"_prev"!==e)return t}e.exports=n,n.prototype.dequeue=function(){var e=this._sentinel,t=e._prev;if(t!==e)return r(t),t},n.prototype.enqueue=function(e){var t=this._sentinel;e._prev&&e._next&&r(e),e._next=t._next,t._next._prev=e,t._next=e,e._prev=t},n.prototype.toString=function(){for(var e=[],t=this._sentinel,n=t._prev;n!==t;)e.push(JSON.stringify(n,i)),n=n._prev;return"["+e.join(", ")+"]"}},function(e,t,n){"use strict";var r=n(11),i=n(20);e.exports={run:function(e){e.graph().dummyChains=[],r.forEach(e.edges(),(function(t){!function(e,t){var n,r,o,a=t.v,s=e.node(a).rank,l=t.w,c=e.node(l).rank,u=t.name,d=e.edge(t),f=d.labelRank;if(c===s+1)return;for(e.removeEdge(t),o=0,++s;sl.lim&&(c=l,u=!0);var d=r.filter(t.edges(),(function(t){return u===b(e,e.node(t.v),c)&&u!==b(e,e.node(t.w),c)}));return r.minBy(d,(function(e){return o(t,e)}))}function v(e,t,n,i){var o=n.v,a=n.w;e.removeEdge(o,a),e.setEdge(i.v,i.w,{}),p(e),d(e,t),function(e,t){var n=r.find(e.nodes(),(function(e){return!t.node(e).parent})),i=s(e,n);i=i.slice(1),r.forEach(i,(function(n){var r=e.node(n).parent,i=t.edge(n,r),o=!1;i||(i=t.edge(r,n),o=!0),t.node(n).rank=t.node(r).rank+(o?i.minlen:-i.minlen)}))}(e,t)}function b(e,t,n){return n.low<=t.lim&&t.lim<=n.lim}e.exports=u,u.initLowLimValues=p,u.initCutValues=d,u.calcCutValue=f,u.leaveEdge=g,u.enterEdge=m,u.exchangeEdges=v},function(e,t,n){var r=n(11);e.exports=function(e){var t=function(e){var t={},n=0;function i(o){var a=n;r.forEach(e.children(o),i),t[o]={low:a,lim:n++}}return r.forEach(e.children(),i),t}(e);r.forEach(e.graph().dummyChains,(function(n){for(var r=e.node(n),i=r.edgeObj,o=function(e,t,n,r){var i,o,a=[],s=[],l=Math.min(t[n].low,t[r].low),c=Math.max(t[n].lim,t[r].lim);i=n;do{i=e.parent(i),a.push(i)}while(i&&(t[i].low>l||c>t[i].lim));o=i,i=r;for(;(i=e.parent(i))!==o;)s.push(i);return{path:a.concat(s.reverse()),lca:o}}(e,t,i.v,i.w),a=o.path,s=o.lca,l=0,c=a[l],u=!0;n!==i.w;){if(r=e.node(n),u){for(;(c=a[l])!==s&&e.node(c).maxRank=2),s=u.buildLayerMatrix(e);var m=o(e,s);m0;)t%2&&(n+=l[t+1]),l[t=t-1>>1]+=e.weight;c+=e.weight*n}))),c}e.exports=function(e,t){for(var n=0,r=1;r=e.barycenter)&&function(e,t){var n=0,r=0;e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.vs=t.vs.concat(e.vs),e.barycenter=n/r,e.weight=r,e.i=Math.min(t.i,e.i),t.merged=!0}(e,t)}}function i(t){return function(n){n.in.push(t),0==--n.indegree&&e.push(n)}}for(;e.length;){var o=e.pop();t.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(t,(function(e){return!e.merged})),(function(e){return r.pick(e,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(e){return!e.indegree})))}},function(e,t,n){var r=n(11),i=n(20);function o(e,t,n){for(var i;t.length&&(i=r.last(t)).i<=n;)t.pop(),e.push(i.vs),n++;return n}e.exports=function(e,t){var n=i.partition(e,(function(e){return r.has(e,"barycenter")})),a=n.lhs,s=r.sortBy(n.rhs,(function(e){return-e.i})),l=[],c=0,u=0,d=0;a.sort((f=!!t,function(e,t){return e.barycentert.barycenter?1:f?t.i-e.i:e.i-t.i})),d=o(l,s,d),r.forEach(a,(function(e){d+=e.vs.length,l.push(e.vs),c+=e.barycenter*e.weight,u+=e.weight,d=o(l,s,d)}));var f;var p={vs:r.flatten(l,!0)};u&&(p.barycenter=c/u,p.weight=u);return p}},function(e,t,n){var r=n(11),i=n(28).Graph;e.exports=function(e,t,n){var o=function(e){var t;for(;e.hasNode(t=r.uniqueId("_root")););return t}(e),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(t){return e.node(t)}));return r.forEach(e.nodes(),(function(i){var s=e.node(i),l=e.parent(i);(s.rank===t||s.minRank<=t&&t<=s.maxRank)&&(a.setNode(i),a.setParent(i,l||o),r.forEach(e[n](i),(function(t){var n=t.v===i?t.w:t.v,o=a.edge(n,i),s=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:e.edge(t).weight+s})})),r.has(s,"minRank")&&a.setNode(i,{borderLeft:s.borderLeft[t],borderRight:s.borderRight[t]}))})),a}},function(e,t,n){var r=n(11);e.exports=function(e,t,n){var i,o={};r.forEach(n,(function(n){for(var r,a,s=e.parent(n);s;){if((r=e.parent(s))?(a=o[r],o[r]=s):(a=i,i=s),a&&a!==s)return void t.setEdge(a,s);s=r}}))}},function(e,t,n){"use strict";var r=n(11),i=n(20),o=n(419).positionX;e.exports=function(e){(function(e){var t=i.buildLayerMatrix(e),n=e.graph().ranksep,o=0;r.forEach(t,(function(t){var i=r.max(r.map(t,(function(t){return e.node(t).height})));r.forEach(t,(function(t){e.node(t).y=o+i/2})),o+=i+n}))})(e=i.asNonCompoundGraph(e)),r.forEach(o(e),(function(t,n){e.node(n).x=t}))}},function(e,t,n){"use strict";var r=n(11),i=n(28).Graph,o=n(20);function a(e,t){var n={};return r.reduce(t,(function(t,i){var o=0,a=0,s=t.length,c=r.last(i);return r.forEach(i,(function(t,u){var d=function(e,t){if(e.node(t).dummy)return r.find(e.predecessors(t),(function(t){return e.node(t).dummy}))}(e,t),f=d?e.node(d).order:s;(d||t===c)&&(r.forEach(i.slice(a,u+1),(function(t){r.forEach(e.predecessors(t),(function(r){var i=e.node(r),a=i.order;!(as)&&l(n,t,c)}))}))}return r.reduce(t,(function(t,n){var o,a=-1,s=0;return r.forEach(n,(function(r,l){if("border"===e.node(r).dummy){var c=e.predecessors(r);c.length&&(o=e.node(c[0]).order,i(n,s,l,a,o),s=l,a=o)}i(n,s,n.length,o,t.length)})),n})),n}function l(e,t,n){if(t>n){var r=t;t=n,n=r}var i=e[t];i||(e[t]=i={}),i[n]=!0}function c(e,t,n){if(t>n){var i=t;t=n,n=i}return r.has(e[t],n)}function u(e,t,n,i){var o={},a={},s={};return r.forEach(t,(function(e){r.forEach(e,(function(e,t){o[e]=e,a[e]=e,s[e]=t}))})),r.forEach(t,(function(e){var t=-1;r.forEach(e,(function(e){var l=i(e);if(l.length)for(var u=((l=r.sortBy(l,(function(e){return s[e]}))).length-1)/2,d=Math.floor(u),f=Math.ceil(u);d<=f;++d){var p=l[d];a[e]===e&&t\n.menu ul ul {\n margin-left: 12px;\n}\n\n\n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(425),i=n(21);n(426),angular.module("dbt").directive("modelTreeLine",["$state",function(e){return{scope:{item:"=",depth:"<",resourceType:"@"},replace:!0,templateUrl:r,link:function(t,n,r,o){t.depth||(t.depth=0);var a=t.item.name;if(a){var s=i.last(a,15).join(""),l=i.initial(a,s.length).join("");t.name={name:a,start:l,end:s},t.name_start=l,t.name_end=s,t.onFolderClick=function(n){if(n.active=!n.active,"source"==t.resourceType){var r=n.name;e.go("dbt.source_list",{source:r})}else 0===t.depth&&"database"!==n.type&&e.go("dbt.project_overview",{project_name:n.name})},t.activate=function(n){t.$emit("clearSearch"),n.active=!0;var r="dbt."+n.node.resource_type;e.go(r,{unique_id:n.unique_id})},t.getIcon=function(e,t){return"#"+{header:{on:"icn-down",off:"icn-right"},database:{on:"icn-db-on",off:"icn-db"},schema:{on:"icn-tree-on",off:"icn-tree"},table:{on:"icn-doc-on",off:"icn-doc"},folder:{on:"icn-dir-on",off:"icn-dir"},file:{on:"icn-doc-on",off:"icn-doc"}}[e][t]},t.getClass=function(e){return{active:e.active,"menu-tree":"header"==e.type||"schema"==e.type||"folder"==e.type,"menu-main":"header"==e.type,"menu-node":"file"==e.type||"table"==e.type}}}}}}])},function(e,t){var n="/components/model_tree/model_tree_line.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
  • \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n
      \n \n
    \n
  • \n')}]),e.exports=n},function(e,t,n){var r=n(427);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.unselectable{\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(429);n(31);n(206),r.module("dbt").directive("docsSearch",["$sce","project",function(e,t){return{scope:{query:"=",results:"=",onSelect:"&"},replace:!0,templateUrl:i,link:function(n){n.max_results=20,n.show_all=!1,n.max_results_columns=3,n.limit_columns={},n.checkboxStatus={show_names:!1,show_descriptions:!1,show_columns:!1,show_code:!1,show_tags:!1},n.limit_search=function(e,t,r){return t0&&null!=n.query&&n.query.trim().length>0){let t=e.replace(/\s+/g," "),o=r(i(n.query)[0]),a=t.search(new RegExp(o)),s=a-75<0?0:a-75,l=a+75>t.length?t.length:a+75;return"..."+t.substring(s,l)+"..."}return e},n.highlight=function(t){if(!n.query||!t)return e.trustAsHtml(t);let o="("+i(n.query).map(e=>r(e)).join(")|(")+")";return e.trustAsHtml(t.replace(new RegExp(o,"gi"),'$&'))},n.$watch("query",(function(e,t){0==e.length&&(n.show_all=!1,n.limit_columns={})})),n.columnFilter=function(e){var t=[];let r=i(n.query);for(var o in e)r.every(e=>-1!=o.toLowerCase().indexOf(e))&&t.push(o);return t},n.limitColumns=function(e){return void 0!==n.limit_columns[e]?n.limit_columns[e]:3}}}}])},function(e,t){var n="/components/search/search.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n {{ query }}\n {{ results.length }} search results\n

    \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n

    \n \n {{result.model.resource_type}}\n

    \n

    \n
    \n
    \n
    \n \n columns:\n \n \n \n Show {{ columnFilter(result.model.columns).length - max_results_columns }} more\n
    \n
    \n \n \n \n
    \n
    \n \n tags:\n \n \n \n
    \n
    \n Show {{ results.length - max_results }} more\n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(431);n(432);const i=n(21);angular.module("dbt").directive("tableDetails",["$sce","$filter",function(e,t){return{scope:{model:"=",extras:"=",exclude:"<"},templateUrl:r,link:function(e){function n(e,t){if(0==e)return"0 bytes";if(e<1&&(e*=1e6),isNaN(parseFloat(e))||!isFinite(e))return"-";void 0===t&&(t=0);var n=Math.floor(Math.log(e)/Math.log(1024));return(e/Math.pow(1024,Math.floor(n))).toFixed(t)+" "+["bytes","KB","MB","GB","TB","PB"][n]}function r(e,n){return void 0===n&&(n=2),t("number")(100*e,n)+"%"}function o(e,n){return void 0===n&&(n=0),t("number")(e,n)}e.details=[],e.extended=[],e.exclude=e.exclude||[],e.meta=null,e._show_expanded=!1,e.show_expanded=function(t){return void 0!==t&&(e._show_expanded=t),e._show_expanded},e.hasData=function(e){return!(!e||i.isEmpty(e))&&(1!=e.length||0!=e[0].include)},e.$watch("model",(function(t,a){i.property(["metadata","type"])(t);var s,l,c,u=t.hasOwnProperty("sources")&&null!=t.sources[0]?t.sources[0].source_meta:null;if(e.meta=t.meta||u,e.details=function(e){var t,n,r=!e.metadata,o=e.metadata||{};t=e.database?e.database+".":"",n=r?void 0:"source"==e.resource_type?t+e.schema+"."+e.identifier:t+e.schema+"."+e.alias;var a,s=[{name:"Owner",value:o.owner},{name:"Type",value:r?void 0:(a=o.type,"BASE TABLE"==a?{type:"table",name:"table"}:"LATE BINDING VIEW"==a?{type:"view",name:"late binding view"}:{type:a.toLowerCase(),name:a.toLowerCase()}).name},{name:"Package",value:e.package_name},{name:"Language",value:e.language},{name:"Relation",value:n}];return i.filter(s,(function(e){return void 0!==e.value}))}(t),e.extended=(s=t.stats,l={rows:o,row_count:o,num_rows:o,max_varchar:o,pct_used:r,size:n,bytes:n,num_bytes:n},c=i.sortBy(i.values(s),"label"),i.map(c,(function(e){var t=i.clone(e),n=l[e.id];return n&&(t.value=n(e.value),t.label=e.label.replace("Approximate","~"),t.label=e.label.replace("Utilization","Used")),t}))),e.extras){var d=i.filter(e.extras,(function(e){return void 0!==e.value&&null!==e.value}));e.details=e.details.concat(d)}e.show_extended=i.where(e.extended,{include:!0}).length>0})),e.queryTag=function(t){e.$emit("query",t)}}}}])},function(e,t){var n="/components/table_details/table_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    Details
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Tags
    \n
    \n {{ tag }} \n
    \n
    untagged
    \n
    \n
    \n
    {{ item.name }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ item.label }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){var r=n(433);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n\n.details-content {\n table-layout: fixed;\n}\n\n.detail-body {\n white-space: nowrap;\n overflow-x: scroll;\n}\n",""])},function(e,t,n){"use strict";const r=n(435),i=n(21);angular.module("dbt").directive("columnDetails",["project",function(e){return{scope:{model:"="},templateUrl:r,link:function(t){t.has_test=function(e,t){return-1!=i.pluck(e.tests,"short").indexOf(t)},t.has_more_info=function(e){var t=e.tests||[],n=e.description||"",r=e.meta||{};return t.length||n.length||!i.isEmpty(r)},t.toggle_column_expanded=function(e){t.has_more_info(e)&&(e.expanded=!e.expanded)},t.getState=function(e){return"dbt."+e.resource_type},t.get_col_name=function(t){return e.caseColumn(t)},t.get_columns=function(e){var t=i.chain(e.columns).values().sortBy("index").value();return i.each(t,(function(e,t){e.index=t})),t}}}}])},function(e,t){var n="/components/column_details/column_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n Column information is not available for this seed\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ColumnTypeDescriptionTestsMore?
    \n
    \n {{ get_col_name(column.name) }}\n
    \n
    \n {{ column.type }}

    \n
    \n {{ column.description }}\n \n \n U\n N\n F\n A\n +\n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Details
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Description
    \n \n
    \n\n
    \n
    Generic Tests
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(437);n(31),n(438);function i(e){return"python"===e?"language-python":"language-sql"}angular.module("dbt").directive("codeBlock",["code","$timeout",function(e,t){return{scope:{versions:"=",default:"<",language:"="},restrict:"E",templateUrl:r,link:function(n,r){n.selected_version=n.default,n.language_class=i(n.language),n.source=null,n.setSelected=function(r){n.selected_version=r,n.source=n.versions[r]||"";const i=n.source.trim();n.highlighted=e.highlight(i,n.language),t((function(){Prism.highlightAll()}))},n.titleCase=function(e){return e.charAt(0).toUpperCase()+e.substring(1)},n.copied=!1,n.copy_to_clipboard=function(){e.copy_to_clipboard(n.source),n.copied=!0,setTimeout((function(){n.$apply((function(){n.copied=!1}))}),1e3)},n.$watch("language",(function(e,t){e&&e!=t&&(n.language_class=i(e))}),!0),n.$watch("versions",(function(e,t){if(e)if(n.default)n.setSelected(n.default);else{var r=Object.keys(n.versions);r.length>0&&n.setSelected(r[0])}}),!0)}}}])},function(e,t){var n="/components/code_block/code_block.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    Code
    \n\n')}]),e.exports=n},function(e,t,n){var r=n(439);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"pre.code {\n border: none !important;\n overflow-y: visible !important;\n overflow-x: scroll !important;\n padding-bottom: 10px;\n}\n\npre.code code {\n font-family: Monaco, monospace !important;\n font-weight: 400 !important;\n}\n\n.line-numbers-rows {\n border: none !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(441);angular.module("dbt").directive("macroArguments",[function(){return{scope:{macro:"="},templateUrl:r,link:function(e){_.each(e.macro.arguments,(function(e){e.expanded=!1}))}}}])},function(e,t){var n="/components/macro_arguments/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n
    \n
    \n Details are not available for this macro\n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ArgumentTypeDescriptionMore?
    \n
    \n {{ arg.name }}\n
    \n
    \n {{ arg.type }}

    \n
    \n {{ arg.description }}\n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(443);angular.module("dbt").directive("referenceList",["$state",function(e){return{scope:{references:"=",node:"="},restrict:"E",templateUrl:r,link:function(t){t.selected_type=null,t.setType=function(e){t.selected_type=e,t.nodes=t.references[t.selected_type]},t.getNodeUrl=function(t){var n="dbt."+t.resource_type;return e.href(n,{unique_id:t.unique_id,"#":null})},t.mapResourceType=function(e){return"model"==e?"Models":"seed"==e?"Seeds":"test"==e?"Tests":"snapshot"==e?"Snapshots":"analysis"==e?"Analyses":"macro"==e?"Macros":"exposure"==e?"Exposures":"metric"==e?"Metrics":"operation"==e?"Operations":"Nodes"},t.$watch("references",(function(e){e&&_.size(e)>0?(t.selected_type=_.keys(e)[0],t.has_references=!0,t.nodes=t.references[t.selected_type]):t.has_references=!1}))}}}])},function(e,t){var n="/components/references/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n No resources reference this {{ node.resource_type }}\n
    \n
    \n \n
    \n \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){n(445),n(447),n(448),n(449),n(450),n(451),n(452),n(453),n(454),n(455)},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.copied=!1,e.copy_to_clipboard=function(t){r.copy_to_clipboard(t),e.copied=!0,setTimeout((function(){e.$apply((function(){e.copied=!1}))}),1e3)},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.nav-tabs li.nav-pull-right {\n flex: 1 0 auto;\n text-align: right;\n}\n\ntr.column-row-selected {\n\n}\n\ntd.column-expanded{\n padding: 0px !important;\n}\n\ntd.column-expanded > div {\n padding: 5px 10px;\n margin-left: 20px;\n height: 100%;\n\n border-left: 1px solid #ccc !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SourceCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Sample SQL":r.generateSourceSQL(e.model)},e.extra_table_fields=[{name:"Loader",value:e.model.loader},{name:"Source",value:e.model.source_name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SeedCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Example SQL":r.generateSourceSQL(e.model)}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SnapshotCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"Compiled SQL is not available for this snapshot"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("TestCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(33);n(34),r.module("dbt").controller("MacroCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,a,s,l){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.macro={},n.ready((function(t){let n=t.macros[e.model_uid];if(e.macro=n,e.references=o.getMacroReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=o.getMacroParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.macro.is_adapter_macro){var r=t.metadata.adapter_type;e.versions=n.impls,n.impls[r]?e.default_version=r:n.impls.default?e.default_version="default":e.default_version=i.keys(n.impls)[0]}else e.default_version="Source",e.versions={Source:e.macro.macro_sql}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("AnalysisCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.default_version="Source",e.versions={Source:"",Compiled:""},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language,e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ExposureCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.exposure={},n.ready((function(t){let n=t.nodes[e.model_uid];e.exposure=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.language=n.language,e.extra_table_fields=[{name:"Maturity",value:e.exposure.maturity},{name:"Owner",value:e.exposure.owner.name},{name:"Owner email",value:e.exposure.owner.email},{name:"Exposure name",value:e.exposure.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("MetricCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.metric={},n.ready((function(t){let n=t.nodes[e.model_uid];e.metric=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.versions={Definition:r.generateMetricSQL(e.metric)};const o="expression"===e.metric.type?"Expression metric":"Aggregate metric";e.extra_table_fields=[{name:"Metric Type",value:o},{name:"Metric name",value:e.metric.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("OperationCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("GraphCtrl",["$scope","$state","$window","graph","project","selectorService",function(e,t,n,r,i,o){function a(e){return e&&"source"==e.resource_type?"source:"+e.source_name+"."+e.name:e&&"exposure"==e.resource_type?"exposure:"+e.name:e&&"metric"==e.resource_type?"metric:"+e.name:e.name?e.name:"*"}e.graph=r.graph,e.graphService=r,e.graphRendered=function(e){r.setGraphReady(e)},e.$watch((function(){return t.params.unique_id}),(function(e,t){e&&e!=t&&i.find_by_id(e,(function(e){e&&("sidebar"==r.orientation?r.showVerticalGraph(a(e),!1):r.showFullGraph(a(e)))})),e||o.clearViewNode()}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(31),a=n(458);n(459),n(206),n(467),n(469),n(472),n(476),r.module("dbt").controller("MainController",["$scope","$route","$state","project","graph","selectorService","trackingService","locationService","$transitions",function(e,t,n,r,s,l,c,u,d){function f(t){e.model_uid=t;var n=r.node(t);n&&l.resetSelection(n)}function p(e){e&&setTimeout((function(){var t=o("*[data-nav-unique-id='"+e+"']");t.length&&t[0].scrollIntoView&&t[0].scrollIntoView({behavior:"smooth",block:"center",inline:"center"})}),1)}e.tree={database:{},project:{},sources:{}},e.search={query:"",results:[],is_focused:!1},e.logo=a,e.model_uid=null,e.project={},o("body").bind("keydown",(function(e){"t"==event.key&&"INPUT"!=event.target.tagName&&(console.log("Opening search"),o("#search").focus(),event.preventDefault())})),e.onSearchFocus=function(t,n){e.search.is_focused=n},e.clearSearch=function(){e.search.is_focused=!1,e.search.query="",e.search.results=[],o("#search").blur()},e.$on("clearSearch",(function(){e.clearSearch()})),e.$on("query",(function(t,n){e.search.is_focused=!0,e.search.query=n})),e.onSearchKeypress=function(t){"Escape"==t.key&&(e.clearSearch(),t.preventDefault())},r.getModelTree(n.params.unique_id,(function(t){e.tree.database=t.database,e.tree.project=t.project,e.tree.sources=t.sources,e.tree.exposures=t.exposures,e.tree.metrics=t.metrics,setTimeout((function(){p(e.model_uid)}))})),d.onSuccess({},(function(t,n){var i=t.router.globals.params,o=l.getViewNode(),a=o?o.unique_id:null,s=i.unique_id,u=!0;if(t.from().name==t.to().name&&a==s&&(u=!1),u&&i.unique_id){var d=r.updateSelected(i.unique_id);e.tree.database=d.database,e.tree.project=d.project,e.tree.sources=d.sources,e.search.query="",console.log("updating selected model to: ",i),f(i.unique_id),setTimeout((function(){p(i.unique_id)}))}u&&c.track_pageview()})),e.$watch("search.query",(function(t){e.search.results=function(t){if(""===e.search.query)return t;let n={name:10,tags:5,description:3,raw_code:2,columns:1};return i.each(t,(function(t){t.overallWeight=0,i.each(Object.keys(n),(function(r){if(null!=t.model[r]){let o=0,a=t.model[r],s=e.search.query.toLowerCase();if("columns"===r)i.each(a,(function(e){if(e.name){let t=e.name.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}}));else if("tags"===r)i.each(a,(function(e){let t=e.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}));else{a=a.toLowerCase();let e=0;for(;-1!=e;)e=a.indexOf(s,e),-1!=e&&(o++,e++)}t.overallWeight+=o*n[r]}}))})),t}(r.search(t))})),r.init(),r.ready((function(t){e.project=t,e.search.results=r.search("");var o=i.unique(i.pluck(i.values(t.nodes),"package_name")).sort(),a=[null];i.each(t.nodes,(function(e){var t=e.tags;a=i.union(a,t).sort()})),l.init({packages:o,tags:a}),f(n.params.unique_id);var d=u.parseState(n.params);d.show_graph&&s.ready((function(){i.assign(l.selection.dirty,d.selected);var e=l.updateSelection();s.updateGraph(e)}));var p=t.metadata||{};c.init({track:p.send_anonymous_usage_stats,project_id:p.project_id})}))}])},function(e,t){e.exports="data:image/svg+xml,%3Csvg width='242' height='90' viewBox='0 0 242 90' fill='none' xmlns='http://www.w3.org/2000/svg'%3E %3Cpath d='M240.384 74.5122L239.905 75.8589H239.728L239.249 74.5156V75.8589H238.941V74.0234H239.324L239.816 75.3872L240.309 74.0234H240.691V75.8589H240.384V74.5122ZM238.671 74.3003H238.169V75.8589H237.858V74.3003H237.352V74.0234H238.671V74.3003Z' fill='%23262A38'/%3E %3Cpath d='M154.123 13.915V75.3527H141.672V69.0868C140.37 71.2839 138.499 73.0742 136.22 74.2134C133.779 75.434 131.012 76.085 128.246 76.085C124.828 76.1664 121.41 75.1899 118.562 73.2369C115.633 71.2839 113.354 68.5986 111.889 65.425C110.262 61.7631 109.448 57.8572 109.529 53.8698C109.448 49.8825 110.262 45.9765 111.889 42.3961C113.354 39.3038 115.633 36.6185 118.481 34.7469C121.41 32.8753 124.828 31.9801 128.246 32.0615C130.931 32.0615 133.616 32.6311 135.976 33.8517C138.255 34.991 140.126 36.6999 141.428 38.8156V18.0651L154.123 13.915ZM139.15 63.2279C140.777 61.1121 141.672 58.0199 141.672 54.0326C141.672 50.0452 140.859 47.0344 139.15 44.9187C137.441 42.8029 134.755 41.5823 131.989 41.6637C129.222 41.5009 126.537 42.7215 124.746 44.8373C123.038 46.953 122.142 49.9639 122.142 53.8698C122.142 57.8572 123.038 60.9494 124.746 63.1465C126.455 65.3436 129.222 66.5642 131.989 66.4828C135.081 66.4828 137.522 65.3436 139.15 63.2279Z' fill='%23262A38'/%3E %3Cpath d='M198.635 34.6655C201.564 36.5371 203.843 39.2225 205.226 42.3147C206.853 45.8952 207.667 49.8011 207.586 53.7885C207.667 57.7758 206.853 61.7632 205.226 65.3436C203.761 68.5172 201.483 71.2026 198.553 73.1556C195.705 75.0272 192.287 76.0037 188.87 75.9223C186.103 76.0037 183.336 75.3527 180.895 74.0507C178.617 72.9114 176.745 71.1212 175.524 68.9241V75.2713H162.993V18.0651L175.606 13.915V38.9783C176.826 36.7812 178.698 34.991 180.976 33.8517C183.418 32.5498 186.103 31.8988 188.87 31.9801C192.287 31.8988 195.705 32.8753 198.635 34.6655ZM192.45 63.1465C194.159 60.9494 194.973 57.8572 194.973 53.7885C194.973 49.8825 194.159 46.8716 192.45 44.7559C190.741 42.6402 188.381 41.5823 185.289 41.5823C182.523 41.4196 179.837 42.6402 178.047 44.8373C176.338 47.0344 175.524 50.0452 175.524 53.9512C175.524 57.9386 176.338 61.0308 178.047 63.1465C179.756 65.3436 182.441 66.5642 185.289 66.4015C188.056 66.5642 190.741 65.3436 192.45 63.1465Z' fill='%23262A38'/%3E %3Cpath d='M225 42.4774V58.915C225 61.2749 225.651 62.9838 226.791 64.0416C228.093 65.1809 229.801 65.7505 231.592 65.6691C232.975 65.6691 234.44 65.425 235.742 65.0995V74.8644C233.382 75.6782 230.941 76.085 228.499 76.0037C223.292 76.0037 219.304 74.5389 216.537 71.6094C213.771 68.68 212.387 64.5299 212.387 59.1592V23.1103L225 19.0416V33.038H235.742V42.4774H225Z' fill='%23262A38'/%3E %3Cpath d='M86.1754 3.74322C88.2911 5.77758 89.6745 8.46293 90 11.3924C90 12.613 89.6745 13.4268 88.9421 14.9729C88.2098 16.519 79.1772 32.1429 76.4919 36.4557C74.9458 38.9783 74.132 41.9892 74.132 44.9186C74.132 47.9295 74.9458 50.859 76.4919 53.3816C79.1772 57.6944 88.2098 73.3996 88.9421 74.9457C89.6745 76.4919 90 77.2242 90 78.4448C89.6745 81.3743 88.3725 84.0597 86.2568 86.0127C84.2224 88.1284 81.5371 89.5118 78.689 89.7559C77.4684 89.7559 76.6546 89.4304 75.1899 88.698C73.7251 87.9656 57.7758 79.1772 53.4629 76.4919C53.1374 76.3291 52.8119 76.085 52.4051 75.9222L31.085 63.3092C31.5732 67.3779 33.3635 71.2839 36.2929 74.132C36.8626 74.7016 37.4322 75.1899 38.0832 75.6781C37.5949 75.9222 37.0253 76.1664 36.5371 76.4919C32.2242 79.1772 16.519 88.2098 14.9729 88.9421C13.4268 89.6745 12.6944 90 11.3924 90C8.46293 89.6745 5.77758 88.3725 3.82459 86.2568C1.70886 84.2224 0.325497 81.5371 0 78.6076C0.0813743 77.387 0.406872 76.1664 1.05787 75.1085C1.79024 73.5624 10.8228 57.8571 13.5081 53.5443C15.0542 51.0217 15.868 48.0922 15.868 45.0814C15.868 42.0705 15.0542 39.141 13.5081 36.6184C10.8228 32.1429 1.70886 16.4376 1.05787 14.8915C0.406872 13.8336 0.0813743 12.613 0 11.3924C0.325497 8.46293 1.62749 5.77758 3.74322 3.74322C5.77758 1.62749 8.46293 0.325497 11.3924 0C12.613 0.0813743 13.8336 0.406872 14.9729 1.05787C16.2749 1.62749 27.7486 8.30018 33.8517 11.8807L35.2351 12.6944C35.7233 13.0199 36.1302 13.264 36.4557 13.4268L37.1067 13.8336L58.8336 26.6908C58.3454 21.8083 55.8228 17.3327 51.9168 14.3219C52.4051 14.0778 52.9747 13.8336 53.4629 13.5081C57.7758 10.8228 73.481 1.70886 75.0271 1.05787C76.085 0.406872 77.3056 0.0813743 78.6076 0C81.4557 0.325497 84.1411 1.62749 86.1754 3.74322ZM46.1392 50.7776L50.7776 46.1392C51.4286 45.4882 51.4286 44.5118 50.7776 43.8608L46.1392 39.2224C45.4882 38.5714 44.5118 38.5714 43.8608 39.2224L39.2224 43.8608C38.5714 44.5118 38.5714 45.4882 39.2224 46.1392L43.8608 50.7776C44.4304 51.3472 45.4882 51.3472 46.1392 50.7776Z' fill='%23FF694A'/%3E %3C/svg%3E"},function(e,t,n){"use strict";n.r(t);var r=n(63),i=n.n(r);n(460),n(461),n(462),n(463),n(465);const o=n(9),a=(n(31),n(21));window.Prism=i.a,o.module("dbt").factory("code",["$sce",function(e){var t={copied:!1,highlight:function(t,n="sql"){if("sql"==n)var r=i.a.highlight(t,i.a.languages.sql,"sql");else if("python"==n)r=i.a.highlight(t,i.a.languages.python,"python");return e.trustAsHtml(r)},copy_to_clipboard:function(e){var t=document.createElement("textarea");t.value=e,t.setAttribute("readonly",""),t.style.position="absolute",t.style.left="-9999px",document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)},generateSourceSQL:function(e){var t=["select"],n=a.size(e.columns),r=a.keys(e.columns);a.each(r,(function(e,r){var i=" "+e;r+1!=n&&(i+=","),t.push(i)}));const i=(e.database?e.database+".":"")+e.schema+"."+e.identifier;return t.push("from "+i),t.join("\n")},generateMetricSQL:function(e){if("expression"==e.type)return e.sql;const t=[`select ${e.type}(${e.sql})`,`from {{ ${e.model} }}`];if(e.filters.length>0){const n=e.filters.map(e=>`${e.field} ${e.operator} ${e.value}`).join(" AND ");t.push("where "+n)}return t.join("\n")}};return t}])},function(e,t){Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}},function(e,t){Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},function(e,t){!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(e,t){if("PRE"===e.tagName&&e.classList.contains("line-numbers")){var n=e.querySelector(".line-numbers-rows");if(n){var r=parseInt(e.getAttribute("data-start"),10)||1,i=r+(n.children.length-1);ti&&(t=i);var o=t-r;return n.children[o]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},n=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&n===window.innerWidth||(n=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var n=t.element,i=n.parentNode;if(i&&/pre/i.test(i.nodeName)&&!n.querySelector(".line-numbers-rows")&&Prism.util.isActive(n,"line-numbers")){n.classList.remove("line-numbers"),i.classList.add("line-numbers");var o,a=t.code.match(e),s=a?a.length+1:1,l=new Array(s+1).join("");(o=document.createElement("span")).setAttribute("aria-hidden","true"),o.className="line-numbers-rows",o.innerHTML=l,i.hasAttribute("data-start")&&(i.style.counterReset="linenumber "+(parseInt(i.getAttribute("data-start"),10)-1)),t.element.appendChild(o),r([i]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(t){if(0!=(t=t.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=t.map((function(t){var n=t.querySelector("code"),r=t.querySelector(".line-numbers-rows");if(n&&r){var i=t.querySelector(".line-numbers-sizer"),o=n.textContent.split(e);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:t,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i code {\n\tposition: relative;\n\twhite-space: inherit;\n}\n\n.line-numbers .line-numbers-rows {\n\tposition: absolute;\n\tpointer-events: none;\n\ttop: 0;\n\tfont-size: 100%;\n\tleft: -3.8em;\n\twidth: 3em; /* works for line-numbers below 1000 lines */\n\tletter-spacing: -1px;\n\tborder-right: 1px solid #999;\n\n\t-webkit-user-select: none;\n\t-moz-user-select: none;\n\t-ms-user-select: none;\n\tuser-select: none;\n\n}\n\n\t.line-numbers-rows > span {\n\t\tdisplay: block;\n\t\tcounter-increment: linenumber;\n\t}\n\n\t\t.line-numbers-rows > span:before {\n\t\t\tcontent: counter(linenumber);\n\t\t\tcolor: #999;\n\t\t\tdisplay: block;\n\t\t\tpadding-right: 0.8em;\n\t\t\ttext-align: right;\n\t\t}\n',""])},function(e,t,n){var r=n(466);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,'/**\n * GHColors theme by Avi Aryan (http://aviaryan.in)\n * Inspired by Github syntax coloring\n */\n\ncode[class*="language-"],\npre[class*="language-"] {\n\tcolor: #393A34;\n\tfont-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace;\n\tdirection: ltr;\n\ttext-align: left;\n\twhite-space: pre;\n\tword-spacing: normal;\n\tword-break: normal;\n\tfont-size: .9em;\n\tline-height: 1.2em;\n\n\t-moz-tab-size: 4;\n\t-o-tab-size: 4;\n\ttab-size: 4;\n\n\t-webkit-hyphens: none;\n\t-moz-hyphens: none;\n\t-ms-hyphens: none;\n\thyphens: none;\n}\n\npre > code[class*="language-"] {\n\tfont-size: 1em;\n}\n\npre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,\ncode[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {\n\tbackground: #b3d4fc;\n}\n\npre[class*="language-"]::selection, pre[class*="language-"] ::selection,\ncode[class*="language-"]::selection, code[class*="language-"] ::selection {\n\tbackground: #b3d4fc;\n}\n\n/* Code blocks */\npre[class*="language-"] {\n\tpadding: 1em;\n\tmargin: .5em 0;\n\toverflow: auto;\n\tborder: 1px solid #dddddd;\n\tbackground-color: white;\n}\n\n/* Inline code */\n:not(pre) > code[class*="language-"] {\n\tpadding: .2em;\n\tpadding-top: 1px;\n\tpadding-bottom: 1px;\n\tbackground: #f8f8f8;\n\tborder: 1px solid #dddddd;\n}\n\n.token.comment,\n.token.prolog,\n.token.doctype,\n.token.cdata {\n\tcolor: #999988;\n\tfont-style: italic;\n}\n\n.token.namespace {\n\topacity: .7;\n}\n\n.token.string,\n.token.attr-value {\n\tcolor: #e3116c;\n}\n\n.token.punctuation,\n.token.operator {\n\tcolor: #393A34; /* no highlight */\n}\n\n.token.entity,\n.token.url,\n.token.symbol,\n.token.number,\n.token.boolean,\n.token.variable,\n.token.constant,\n.token.property,\n.token.regex,\n.token.inserted {\n\tcolor: #36acaa;\n}\n\n.token.atrule,\n.token.keyword,\n.token.attr-name,\n.language-autohotkey .token.selector {\n\tcolor: #00a4db;\n}\n\n.token.function,\n.token.deleted,\n.language-autohotkey .token.tag {\n\tcolor: #9a050f;\n}\n\n.token.tag,\n.token.selector,\n.language-autohotkey .token.keyword {\n\tcolor: #00009f;\n}\n\n.token.important,\n.token.function,\n.token.bold {\n\tfont-weight: bold;\n}\n\n.token.italic {\n\tfont-style: italic;\n}\n',""])},function(e,t,n){n(31);const r=n(21),i=n(148),o=n(203),a=n(468);angular.module("dbt").factory("graph",["$state","$window","$q","selectorService","project","locationService",function(e,t,n,s,l,c){var u={vertical:{userPanningEnabled:!1,boxSelectionEnabled:!1,maxZoom:1.5},horizontal:{userPanningEnabled:!0,boxSelectionEnabled:!1,maxZoom:1,minZoom:.05}},d={none:{name:"null"},left_right:{name:"dagre",rankDir:"LR",rankSep:200,edgeSep:30,nodeSep:50},top_down:{name:"preset",positions:function(t){var n=e.params.unique_id;if(!n)return{x:0,y:0};var a=f.graph.pristine.dag,s=r.sortBy(o.ancestorNodes(a,n,1)),l=r.sortBy(o.descendentNodes(a,n,1)),c=r.partial(r.includes,s),u=r.partial(r.includes,l),d=a.filterNodes(c),p=a.filterNodes(u);return function(e,t,n,i){console.log("Getting position for ",i,". Primary: ",e);var o,a=100/(1+Math.max(t.length,n.length));if(e==i)return{x:0,y:0};if(r.includes(t,i))o={set:t,index:r.indexOf(t,i),factor:-1,type:"parent"};else{if(!r.includes(n,i))return{x:0,y:0};o={set:n,index:r.indexOf(n,i),factor:1,type:"child"}}var s=o.set.length;if("parent"==o.type)var l={x:(0+o.index)*a,y:-200-100*(s-o.index-1)};else l={x:(0+o.index)*a,y:200+100*(s-o.index-1)};return l}(n,i.alg.topsort(d),i.alg.topsort(p).reverse(),t.data("id"))}}},f={loading:!0,loaded:n.defer(),graph_element:null,orientation:"sidebar",expanded:!1,graph:{options:u.vertical,pristine:{nodes:{},edges:{},dag:null},elements:[],layout:d.none,style:[{selector:"edge.vertical",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#027599","arrow-scale":1.5,"line-color":"#027599",width:3,"target-distance-from-node":"5px","source-endpoint":"0% 50%","target-endpoint":"0deg"}},{selector:"edge.horizontal",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#006f8a","arrow-scale":1.5,"target-distance-from-node":"10px","source-distance-from-node":"5px","line-color":"#006f8a",width:3,"source-endpoint":"50% 0%","target-endpoint":"270deg"}},{selector:"edge[selected=1]",style:{"line-color":"#bd6bb6","target-arrow-color":"#bd6bb6","z-index":1}},{selector:'node[display="none"]',style:{display:"none"}},{selector:"node.vertical",style:{"text-margin-x":"5px","background-color":"#0094b3","font-size":"16px",shape:"ellipse",color:"#fff",width:"5px",height:"5px",padding:"5px",content:"data(label)","font-weight":300,"text-valign":"center","text-halign":"right"}},{selector:"node.horizontal",style:{"background-color":"#0094b3","font-size":"24px",shape:"roundrectangle",color:"#fff",width:"label",height:"label",padding:"12px",content:"data(label)","font-weight":300,"font-family":'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Helvetica, Arial, sans-serif',"text-valign":"center","text-halign":"center",ghost:"yes","ghost-offset-x":"2px","ghost-offset-y":"4px","ghost-opacity":.5,"text-outline-color":"#000","text-outline-width":"1px","text-outline-opacity":.2}},{selector:'node[resource_type="source"]',style:{"background-color":"#5fb825"}},{selector:'node[resource_type="exposure"]',style:{"background-color":"#ff694b"}},{selector:'node[resource_type="metric"]',style:{"background-color":"#ff5688"}},{selector:'node[language="python"]',style:{"background-color":"#6a5acd"}},{selector:"node[node_color]",style:{"background-color":"data(node_color)"}},{selector:"node[selected=1]",style:{"background-color":"#bd6bb6"}},{selector:"node.horizontal[selected=1]",style:{"background-color":"#88447d"}},{selector:"node.horizontal.dirty",style:{"background-color":"#919599"}},{selector:"node[hidden=1]",style:{"background-color":"#919599","background-opacity":.5}}],ready:function(e){console.log("graph ready")}}};function p(e,t,n){var i=r.map(e,(function(e){return f.graph.pristine.nodes[e]})),o=[];r.flatten(r.each(e,(function(t){var n=f.graph.pristine.edges[t];r.each(n,(function(t){r.includes(e,t.data.target)&&r.includes(e,t.data.source)&&o.push(t)}))})));var s=r.compact(i).concat(r.compact(o));return r.each(f.graph.elements,(function(e){e.data.display="none",e.data.selected=0,e.data.hidden=0,e.classes=n})),r.each(s,(function(e){e.data.display="element",e.classes=n,t&&r.includes(t,e.data.unique_id)&&(e.data.selected=1),r.get(e,["data","docs","show"],!0)||(e.data.hidden=1);var i=r.get(e,["data","docs","node_color"]);i&&a.isValidColor(i)&&(e.data.node_color=i)})),f.graph.elements=r.filter(s,(function(e){return"element"==e.data.display})),e}function h(e,t,n){var r=f.graph.pristine.dag;if(r){var i=f.graph.pristine.nodes,o=s.selectNodes(r,i,e),a=n?o.matched:[];return p(o.selected,a,t)}}return f.setGraphReady=function(e){f.loading=!1,f.loaded.resolve(),f.graph_element=e},f.ready=function(e){f.loaded.promise.then((function(){e(f)}))},f.manifest={},f.packages=[],f.selected_node=null,f.getCanvasHeight=function(){return.8*t.innerHeight+"px"},l.ready((function(e){f.manifest=e,f.packages=r.uniq(r.map(f.manifest.nodes,"package_name")),r.each(r.filter(f.manifest.nodes,(function(e){var t=r.includes(["model","seed","source","snapshot","analysis","exposure","metric","operation"],e.resource_type),n="test"==e.resource_type&&!e.hasOwnProperty("test_metadata");return t||n})),(function(e){var t={group:"nodes",data:r.assign(e,{parent:e.package_name,id:e.unique_id,is_group:"false"})};f.graph.pristine.nodes[e.unique_id]=t})),r.each(f.manifest.parent_map,(function(e,t){r.each(e,(function(e){var n=f.manifest.nodes[e],i=f.manifest.nodes[t];if(r.includes(["model","source","seed","snapshot","metric"],n.resource_type)&&("test"!=i.resource_type||!i.hasOwnProperty("test_metadata"))){var o=n.unique_id+"|"+i.unique_id,a={group:"edges",data:{source:n.unique_id,target:i.unique_id,unique_id:o}},s=i.unique_id;f.graph.pristine.edges[s]||(f.graph.pristine.edges[s]=[]),f.graph.pristine.edges[s].push(a)}}))}));var t=new i.Graph({directed:!0});r.each(f.graph.pristine.nodes,(function(e){t.setNode(e.data.unique_id,e.data.name)})),r.each(f.graph.pristine.edges,(function(e){r.each(e,(function(e){t.setEdge(e.data.source,e.data.target)}))})),f.graph.pristine.dag=t,f.graph.elements=r.flatten(r.values(f.graph.pristine.nodes).concat(r.values(f.graph.pristine.edges))),p(t.nodes())})),f.hideGraph=function(){f.orientation="sidebar",f.expanded=!1},f.showVerticalGraph=function(e,t){f.orientation="sidebar",t&&(f.expanded=!0);var n=h(r.assign({},s.options,{include:"+"+e+"+",exclude:"",hops:1}),"vertical",!0);return f.graph.layout=d.top_down,f.graph.options=u.vertical,n},f.showFullGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=r.assign({},s.options);e?(t.include="+"+e+"+",t.exclude=""):(t.include="",t.exclude="");var n=h(t,"horizontal",!0);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(t),n},f.updateGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=h(e,"horizontal",!1);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(e),t},f.deselectNodes=function(){"fullscreen"==f.orientation&&f.graph_element.elements().data("selected",0)},f.selectNode=function(e){if("fullscreen"==f.orientation){f.graph.pristine.nodes[e];var t=f.graph.pristine.dag,n=r.indexBy(o.ancestorNodes(t,e)),i=r.indexBy(o.descendentNodes(t,e));n[e]=e,i[e]=e;var a=f.graph_element;r.each(f.graph.elements,(function(t){var r=a.$id(t.data.id);n[t.data.source]&&n[t.data.target]||i[t.data.source]&&i[t.data.target]||t.data.unique_id==e?r.data("selected",1):r.data("selected",0)}))}},f.markDirty=function(e){f.markAllClean(),r.each(e,(function(e){f.graph_element.$id(e).addClass("dirty")}))},f.markAllClean=function(){f.graph_element&&f.graph_element.elements().removeClass("dirty")},f}])},function(e,t,n){"use strict";n.r(t),n.d(t,"isValidColor",(function(){return i}));const r=new Set(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","ghostwhite","gold","goldenrod","gray","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"]);function i(e){if(!e)return!1;const t=e.trim().toLowerCase();if(""===t)return!1;const n=t.match(/^#([A-Fa-f0-9]{3}){1,2}$/),i=r.has(t);return Boolean(n)||i}},function(e,t,n){n(31);const r=n(21),i=n(470);angular.module("dbt").factory("selectorService",["$state",function(e){var t={include:"",exclude:"",packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"],depth:1},n={view_node:null,selection:{clean:r.clone(t),dirty:r.clone(t)},options:{packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"]},init:function(e){r.each(e,(function(e,r){n.options[r]=e,t[r]=e,n.selection.clean[r]=e,n.selection.dirty[r]=e}))},resetSelection:function(e){var i={include:e&&r.includes(["model","seed","snapshot"],e.resource_type)?"+"+e.name+"+":e&&"source"==e.resource_type?"+source:"+e.source_name+"."+e.name+"+":e&&"exposure"==e.resource_type?"+exposure:"+e.name:e&&"metric"==e.resource_type?"+metric:"+e.name:e&&r.includes(["analysis","test"],e.resource_type)?"+"+e.name:""},o=r.assign({},t,i);n.selection.clean=r.clone(o),n.selection.dirty=r.clone(o),n.view_node=e},getViewNode:function(){return n.view_node},excludeNode:function(e,t){var r,i=n.selection.dirty.exclude,o=t.parents?"+":"",a=t.children?"+":"",s=i.length>0?" ":"";"source"==e.resource_type?(o+="source:",r=e.source_name+"."+e.name):["exposure","metric"].indexOf(e.resource_type)>-1?(o+=e.resource_type+":",r=e.name):r=e.name;var l=i+s+o+r+a;return n.selection.dirty.exclude=l,n.updateSelection()},selectSource:function(e,t){var r="source:"+e+(t.children?"+":"");return n.selection.dirty.include=r,n.updateSelection()},clearViewNode:function(){n.view_node=null},isDirty:function(){return!r.isEqual(n.selection.clean,n.selection.dirty)},updateSelection:function(){return n.selection.clean=r.clone(n.selection.dirty),n.selection.clean},selectNodes:function(e,t,n){return i.selectNodes(e,t,n)}};return n}])},function(e,t,n){const r=n(21),i=n(471);function o(e,t){return t||(t=" "),r.filter(r.uniq(e.split(t)),(function(e){return e.length>0}))}function a(e){var t={raw:e,select_at:!1,select_children:!1,children_depth:null,select_parents:!1,parents_depth:null};const n=new RegExp(""+/^/.source+/(?(\@))?/.source+/(?((?(\d*))\+))?/.source+/((?([\w.]+)):)?/.source+/(?(.*?))/.source+/(?(\+(?(\d*))))?/.source+/$/.source).exec(e).groups;t.select_at="@"==n.childs_parents,t.select_parents=!!n.parents,t.select_children=!!n.children,n.parents_depth&&(t.parents_depth=parseInt(n.parents_depth)),n.children_depth&&(t.children_depth=parseInt(n.children_depth));var r=n.method,i=n.value;return r?-1!=r.indexOf(".")&&([r,selector_modifier]=r.split(".",2),i={config:selector_modifier,value:i}):r="implicit",t.selector_type=r,t.selector_value=i,t}function s(e){var t=o(e," ");return r.map(t,(function(e){var t=o(e,",");return t.length>1?{method:"intersect",selectors:r.map(t,a)}:{method:"none",selectors:r.map([e],a)}}))}function l(e,t){var n=s(e),i=null,o=null;return r.each(n,(function(e){var n="intersect"==e.method?r.intersection:r.union;r.each(e.selectors,(function(e){var r=t(e);null===i?(i=r.matched,o=r.selected):(i=n(i,r.matched),o=n(o,r.selected))}))})),{matched:i||[],selected:o||[]}}e.exports={splitSpecs:o,parseSpec:a,parseSpecs:s,buildSpec:function(e,t,n){return{include:s(e),exclude:s(t),hops:n}},applySpec:l,selectNodes:function(e,t,n){n.include,n.exclude;var o,a=r.partial(i.getNodesFromSpec,e,t,n.hops);r.values(t),o=0==n.include.trim().length?{selected:e.nodes(),matched:[]}:l(n.include,a);var s=l(n.exclude,a),c=o.selected,u=o.matched;c=r.difference(c,s.selected),u=r.difference(u,s.matched);var d=[];return r.each(c,(function(e){var i=t[e];i.data.tags||(i.data.tags=[]);var o=r.includes(n.packages,i.data.package_name),a=r.intersection(n.tags,i.data.tags).length>0,s=r.includes(n.tags,null)&&0==i.data.tags.length,l=r.includes(n.resource_types,i.data.resource_type);o&&(a||s)&&l||d.push(i.data.unique_id)})),{selected:r.difference(c,d),matched:r.difference(u,d)}}}},function(e,t,n){const r=n(21),i=n(203);var o="fqn",a="tag",s="source",l="exposure",c="metric",u="path",d="file",f="package",p="config",h="test_name",g="test_type",m={};function v(e,t){if(t===r.last(e))return!0;var n=e.reduce((e,t)=>e.concat(t.split(".")),[]),i=t.split(".");if(n.length-1||!r.hasOwnProperty("test_metadata")&&["data","singular"].indexOf(t)>-1)&&n.push(r)})),n}function $(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("source"==r.resource_type){var i,o,a=r.source_name,s=r.name;-1!=t.indexOf(".")?[i,o]=t.split(".",2):(i=t,o=null),("*"==i||i==a&&"*"===o||i==a&&o===s||i==a&&null===o)&&n.push(e.data)}})),n}m["implicit"]=function(e,t){var n=b(e,t),i=y(e,t),o=[];t.toLowerCase().endsWith(".sql")&&(o=x(e,t));var a=r.uniq([].concat(r.map(n,"unique_id"),r.map(i,"unique_id"),r.map(o,"unique_id")));return r.map(a,t=>e[t].data)},m[o]=b,m[a]=w,m[s]=$,m[l]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("exposure"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[c]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("metric"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[u]=y,m[d]=x,m[f]=k,m[p]=A,m[h]=E,m[g]=S,e.exports={isFQNMatch:v,getNodesByFQN:b,getNodesByTag:w,getNodesBySource:$,getNodesByPath:y,getNodesByPackage:k,getNodesByConfig:A,getNodesByTestName:E,getNodesByTestType:S,getNodesFromSpec:function(e,t,n,o){const a=m[o.selector_type];if(!a)return console.log("Node matcher for selector",o.selector_type,"is invalid"),{selected:[],matched:[]};var s=a(t,o.selector_value),l=[],c=[];return r.each(s,(function(t){var a=t.unique_id;c.push(t.unique_id);var s=[],u=[],d=[];if(o.select_at&&(d=r.union(i.selectAt(e,a))),o.select_parents){var f=n||o.parents_depth;s=i.ancestorNodes(e,a,f)}if(o.select_children){f=n||o.children_depth;u=i.descendentNodes(e,a,f)}l=r.union([a],l,u,s,d)})),{selected:l,matched:c}}}},function(e,t,n){const r=n(9);n(473);r.module("dbt").factory("trackingService",["$location","selectorService","$rootScope",function(e,t,n){var r={initialized:!1,snowplow:null,project_id:null,init:function(e){r.initialized||(r.initialized=!0,r.project_id=e.project_id,!0===e.track&&r.turn_on_tracking())},isHosted:function(){return window.location.hostname.indexOf(".getdbt.com")>-1},turn_on_tracking:function(){var e,t,n,i,o,a;e=window,t=document,n="script",e[i="snowplow"]||(e.GlobalSnowplowNamespace=e.GlobalSnowplowNamespace||[],e.GlobalSnowplowNamespace.push(i),e[i]=function(){(e[i].q=e[i].q||[]).push(arguments)},e[i].q=e[i].q||[],o=t.createElement(n),a=t.getElementsByTagName(n)[0],o.async=1,o.src="//d1fc8wv8zag5ca.cloudfront.net/2.9.0/sp.js",a.parentNode.insertBefore(o,a));var s={appId:"dbt-docs",forceSecureTracker:!0,respectDoNotTrack:!0,userFingerprint:!1,contexts:{webPage:!0}};r.isHosted()&&(s.cookieDomain=".getdbt.com"),r.snowplow=window.snowplow,r.snowplow("newTracker","sp","fishtownanalytics.sinter-collect.com",s),r.snowplow("enableActivityTracking",30,30),r.track_pageview()},fuzzUrls:function(){r.isHosted()||(r.snowplow("setCustomUrl","https://fuzzed.getdbt.com/"),r.snowplow("setReferrerUrl","https://fuzzed.getdbt.com/"))},getContext:function(){return[{schema:"iglu:com.dbt/dbt_docs/jsonschema/1-0-0",data:{is_cloud_hosted:r.isHosted(),core_project_id:r.project_id}}]},track_pageview:function(){if(r.snowplow){r.fuzzUrls();r.snowplow("trackPageView",null,r.getContext())}},track_event:function(e,t,n,i){r.snowplow&&(r.fuzzUrls(),r.snowplow("trackStructEvent","dbt-docs",e,t,n,i,r.getContext()))},track_graph_interaction:function(e,t){r.snowplow&&(r.fuzzUrls(),r.track_event("graph","interact",e,t))}};return r}])},function(e,t,n){var r,i,o,a,s;r=n(474),i=n(204).utf8,o=n(475),a=n(204).bin,(s=function(e,t){e.constructor==String?e=t&&"binary"===t.encoding?a.stringToBytes(e):i.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var n=r.bytesToWords(e),l=8*e.length,c=1732584193,u=-271733879,d=-1732584194,f=271733878,p=0;p>>24)|4278255360&(n[p]<<24|n[p]>>>8);n[l>>>5]|=128<>>9<<4)]=l;var h=s._ff,g=s._gg,m=s._hh,v=s._ii;for(p=0;p>>0,u=u+y>>>0,d=d+x>>>0,f=f+w>>>0}return r.endian([c,u,d,f])})._ff=function(e,t,n,r,i,o,a){var s=e+(t&n|~t&r)+(i>>>0)+a;return(s<>>32-o)+t},s._gg=function(e,t,n,r,i,o,a){var s=e+(t&r|n&~r)+(i>>>0)+a;return(s<>>32-o)+t},s._hh=function(e,t,n,r,i,o,a){var s=e+(t^n^r)+(i>>>0)+a;return(s<>>32-o)+t},s._ii=function(e,t,n,r,i,o,a){var s=e+(n^(t|~r))+(i>>>0)+a;return(s<>>32-o)+t},s._blocksize=16,s._digestsize=16,e.exports=function(e,t){if(null==e)throw new Error("Illegal argument "+e);var n=r.wordsToBytes(s(e,t));return t&&t.asBytes?n:t&&t.asString?a.bytesToString(n):r.bytesToHex(n)}},function(e,t){var n,r;n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",r={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&r.rotl(e,8)|4278255360&r.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-o)&63)):t.push("=");return t.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var t=[],r=0,i=0;r>>6-2*i);return t}},e.exports=r},function(e,t){function n(e){return!!e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)} +/*! Runge-Kutta spring physics function generator. Adapted from Framer.js, copyright Koen Bok. MIT License: http://en.wikipedia.org/wiki/MIT_License */var r=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,n,r){var i={x:t.x+r.dx*n,v:t.v+r.dv*n,tension:t.tension,friction:t.friction};return{dx:i.v,dv:e(i)}}function n(n,r){var i={dx:n.v,dv:e(n)},o=t(n,.5*r,i),a=t(n,.5*r,o),s=t(n,r,a),l=1/6*(i.dx+2*(o.dx+a.dx)+s.dx),c=1/6*(i.dv+2*(o.dv+a.dv)+s.dv);return n.x=n.x+l*r,n.v=n.v+c*r,n}return function e(t,r,i){var o,a={x:-1,v:0,tension:null,friction:null},s=[0],l=0,c=void 0,u=void 0;for(t=parseFloat(t)||500,r=parseFloat(r)||20,i=i||null,a.tension=t,a.friction=r,c=(o=null!==i)?(l=e(t,r))/i*.016:.016;u=n(u||a,c),s.push(1+u.x),l+=16,Math.abs(u.x)>1e-4&&Math.abs(u.v)>1e-4;);return o?function(e){return s[e*(s.length-1)|0]}:l}}();e.exports=r},function(e,t,n){"use strict";var r=n(0);function i(e,t,n,r,i){if(1===r)return n;var o=i(t,n,r);return null==e||((e.roundValue||e.color)&&(o=Math.round(o)),void 0!==e.min&&(o=Math.max(o,e.min)),void 0!==e.max&&(o=Math.min(o,e.max))),o}function o(e,t){return null!=e.pfValue||null!=e.value?null==e.pfValue||null!=t&&"%"===t.type.units?e.value:e.pfValue:e}e.exports=function(e,t,n,a,s){var l=null!=s?s.type:null;n<0?n=0:n>1&&(n=1);var c=o(e,s),u=o(t,s);if(r.number(c)&&r.number(u))return i(l,c,u,n,a);if(r.array(c)&&r.array(u)){for(var d=[],f=0;f0},startBatch:function(){var e=this._private;return null==e.batchCount&&(e.batchCount=0),0===e.batchCount&&(e.batchingStyle=e.batchingNotify=!0,e.batchStyleEles=this.collection(),e.batchNotifyEles=this.collection(),e.batchNotifyTypes=[],e.batchNotifyTypes.ids={}),e.batchCount++,this},endBatch:function(){var e=this._private;return e.batchCount--,0===e.batchCount&&(e.batchingStyle=!1,e.batchStyleEles.updateStyle(),e.batchingNotify=!1,this.notify({type:e.batchNotifyTypes,eles:e.batchNotifyEles})),this},batch:function(e){return this.startBatch(),e(),this.endBatch(),this},batchData:function(e){var t=this;return this.batch((function(){for(var n=Object.keys(e),r=0;r0;)e.removeChild(e.childNodes[0]);this._private.renderer=null},onRender:function(e){return this.on("render",e)},offRender:function(e){return this.off("render",e)}};i.invalidateDimensions=i.resize,e.exports=i},function(e,t,n){"use strict";var r=n(0),i=n(7),o={collection:function(e,t){return r.string(e)?this.$(e):r.elementOrCollection(e)?e.collection():r.array(e)?new i(this,e,t):new i(this)},nodes:function(e){var t=this.$((function(e){return e.isNode()}));return e?t.filter(e):t},edges:function(e){var t=this.$((function(e){return e.isEdge()}));return e?t.filter(e):t},$:function(e){var t=this._private.elements;return e?t.filter(e):t.spawnSelf()},mutableElements:function(){return this._private.elements}};o.elements=o.filter=o.$,e.exports=o},function(e,t,n){"use strict";var r=n(0),i=n(18),o={style:function(e){return e&&this.setStyle(e).update(),this._private.style},setStyle:function(e){var t=this._private;return r.stylesheet(e)?t.style=e.generateStyle(this):r.array(e)?t.style=i.fromJson(this,e):r.string(e)?t.style=i.fromString(this,e):t.style=i(this),t.style}};e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(5),a={apply:function(e){var t=this._private,n=t.cy.collection();t.newStyle&&(t.contextStyles={},t.propDiffs={},this.cleanElements(e,!0));for(var r=0;r0;if(c||u){var d=void 0;c&&u||c?d=l.properties:u&&(d=l.mappedProperties);for(var f=0;f0){n=!0;break}t.hasPie=n;var i=e.pstyle("text-transform").strValue,o=e.pstyle("label").strValue,a=e.pstyle("source-label").strValue,s=e.pstyle("target-label").strValue,l=e.pstyle("font-style").strValue,c=e.pstyle("font-size").pfValue+"px",u=e.pstyle("font-family").strValue,d=e.pstyle("font-weight").strValue,f=l+"$"+c+"$"+u+"$"+d+"$"+i+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-valign").strValue+"$"+e.pstyle("text-outline-width").pfValue+"$"+e.pstyle("text-wrap").strValue+"$"+e.pstyle("text-max-width").pfValue;t.labelStyleKey=f,t.sourceLabelKey=f+"$"+a,t.targetLabelKey=f+"$"+s,t.labelKey=f+"$"+o,t.fontKey=l+"$"+d+"$"+c+"$"+u,t.styleKey=Date.now()}},applyParsedProperty:function(e,t){var n=this,o=t,a=e._private.style,s=void 0,l=n.types,c=n.properties[o.name].type,u=o.bypass,d=a[o.name],f=d&&d.bypass,p=e._private,h=function(){n.checkZOrderTrigger(e,o.name,d?d.value:null,o.value)};if("curve-style"===t.name&&"haystack"===t.value&&e.isEdge()&&(e.isLoop()||e.source().isParent()||e.target().isParent())&&(o=t=this.parse(t.name,"bezier",u)),o.delete)return a[o.name]=void 0,h(),!0;if(o.deleteBypassed)return d?!!d.bypass&&(d.bypassed=void 0,h(),!0):(h(),!0);if(o.deleteBypass)return d?!!d.bypass&&(a[o.name]=d.bypassed,h(),!0):(h(),!0);var g=function(){r.error("Do not assign mappings to elements without corresponding data (e.g. ele `"+e.id()+"` for property `"+o.name+"` with data field `"+o.field+"`); try a `["+o.field+"]` selector to limit scope to elements with `"+o.field+"` defined")};switch(o.mapped){case l.mapData:for(var m=o.field.split("."),v=p.data,b=0;b1&&(y=1),c.color){var x=o.valueMin[0],w=o.valueMax[0],k=o.valueMin[1],A=o.valueMax[1],E=o.valueMin[2],S=o.valueMax[2],$=null==o.valueMin[3]?1:o.valueMin[3],C=null==o.valueMax[3]?1:o.valueMax[3],_=[Math.round(x+(w-x)*y),Math.round(k+(A-k)*y),Math.round(E+(S-E)*y),Math.round($+(C-$)*y)];s={bypass:o.bypass,name:o.name,value:_,strValue:"rgb("+_[0]+", "+_[1]+", "+_[2]+")"}}else{if(!c.number)return!1;var O=o.valueMin+(o.valueMax-o.valueMin)*y;s=this.parse(o.name,O,o.bypass,"mapping")}s||(s=this.parse(o.name,d.strValue,o.bypass,"mapping")),s||g(),s.mapping=o,o=s;break;case l.data:var j=o.field.split("."),T=p.data;if(T)for(var P=0;P0&&l>0){for(var u={},d=!1,f=0;f0?e.delayAnimation(c).play().promise().then(t):t()})).then((function(){return e.animation({style:u,duration:l,easing:e.pstyle("transition-timing-function").value,queue:!1}).play().promise()})).then((function(){r.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1}))}else a.transitioning&&(this.removeBypasses(e,s),e.emitAndNotify("style"),a.transitioning=!1)},checkZOrderTrigger:function(e,t,n,r){var i=this.properties[t];null==i.triggersZOrder||null!=n&&!i.triggersZOrder(n,r)||this._private.cy.notify({type:"zorder",eles:e})}};e.exports=a},function(e,t,n){"use strict";var r=n(0),i=n(1),o={applyBypass:function(e,t,n,o){var a=[];if("*"===t||"**"===t){if(void 0!==n)for(var s=0;sn.length?t.substr(n.length):""}function l(){o=o.length>a.length?o.substr(a.length):""}for(t=t.replace(/[/][*](\s|.)+?[*][/]/g,"");!t.match(/^\s*$/);){var c=t.match(/^\s*((?:.|\s)+?)\s*\{((?:.|\s)+?)\}/);if(!c){r.error("Halting stylesheet parsing: String stylesheet contains more to parse but no selector and block found in: "+t);break}n=c[0];var u=c[1];if("core"!==u&&new i(u)._private.invalid)r.error("Skipping parsing of block: Invalid selector found in string stylesheet: "+u),s();else{var d=c[2],f=!1;o=d;for(var p=[];!o.match(/^\s*$/);){var h=o.match(/^\s*(.+?)\s*:\s*(.+?)\s*;/);if(!h){r.error("Skipping parsing of block: Invalid formatting of style property and value definitions found in:"+d),f=!0;break}a=h[0];var g=h[1],m=h[2];this.properties[g]?this.parse(g,m)?(p.push({name:g,val:m}),l()):(r.error("Skipping property: Invalid property definition in: "+a),l()):(r.error("Skipping property: Invalid property name in: "+a),l())}if(f){s();break}this.selector(u);for(var v=0;v node").css({shape:"rectangle",padding:10,"background-color":"#eee","border-color":"#ccc","border-width":1}).selector("edge").css({width:3,"curve-style":"haystack"}).selector(":parent <-> node").css({"curve-style":"bezier","source-endpoint":"outside-to-line","target-endpoint":"outside-to-line"}).selector(":selected").css({"background-color":"#0169D9","line-color":"#0169D9","source-arrow-color":"#0169D9","target-arrow-color":"#0169D9","mid-source-arrow-color":"#0169D9","mid-target-arrow-color":"#0169D9"}).selector("node:parent:selected").css({"background-color":"#CCE1F9","border-color":"#aec8e5"}).selector(":active").css({"overlay-color":"black","overlay-padding":10,"overlay-opacity":.25}).selector("core").css({"selection-box-color":"#ddd","selection-box-opacity":.65,"selection-box-border-color":"#aaa","selection-box-border-width":1,"active-bg-color":"black","active-bg-opacity":.15,"active-bg-size":30,"outside-texture-bg-color":"#000","outside-texture-bg-opacity":.125}),this.defaultLength=this.length},e.exports=o},function(e,t,n){"use strict";var r=n(1),i=n(0),o=n(2),a={parse:function(e,t,n,o){if(i.fn(t))return this.parseImplWarn(e,t,n,o);var a=[e,t,n,"mapping"===o||!0===o||!1===o||null==o?"dontcare":o].join("$"),s=this.propCache=this.propCache||{},l=void 0;return(l=s[a])||(l=s[a]=this.parseImplWarn(e,t,n,o)),(n||"mapping"===o)&&(l=r.copy(l))&&(l.value=r.copy(l.value)),l},parseImplWarn:function(e,t,n,i){var o=this.parseImpl(e,t,n,i);return o||null==t||r.error("The style property `%s: %s` is invalid",e,t),o},parseImpl:function(e,t,n,a){e=r.camel2dash(e);var s=this.properties[e],l=t,c=this.types;if(!s)return null;if(void 0===t)return null;s.alias&&(s=s.pointsTo,e=s.name);var u=i.string(t);u&&(t=t.trim());var d=s.type;if(!d)return null;if(n&&(""===t||null===t))return{name:e,value:t,bypass:!0,deleteBypass:!0};if(i.fn(t))return{name:e,value:t,strValue:"fn",mapped:c.fn,bypass:n};var f=void 0,p=void 0;if(!u||a);else{if(f=new RegExp(c.data.regex).exec(t)){if(n)return!1;var h=c.data;return{name:e,value:f,strValue:""+t,mapped:h,field:f[1],bypass:n}}if(p=new RegExp(c.mapData.regex).exec(t)){if(n)return!1;if(d.multiple)return!1;var g=c.mapData;if(!d.color&&!d.number)return!1;var m=this.parse(e,p[4]);if(!m||m.mapped)return!1;var v=this.parse(e,p[5]);if(!v||v.mapped)return!1;if(m.value===v.value)return!1;if(d.color){var b=m.value,y=v.value;if(!(b[0]!==y[0]||b[1]!==y[1]||b[2]!==y[2]||b[3]!==y[3]&&(null!=b[3]&&1!==b[3]||null!=y[3]&&1!==y[3])))return!1}return{name:e,value:p,strValue:""+t,mapped:g,field:p[1],fieldMin:parseFloat(p[2]),fieldMax:parseFloat(p[3]),valueMin:m.value,valueMax:v.value,bypass:n}}}if(d.multiple&&"multiple"!==a){var x=void 0;if(x=u?t.split(/\s+/):i.array(t)?t:[t],d.evenMultiple&&x.length%2!=0)return null;for(var w=[],k=[],A=[],E=!1,S=0;Sd.max||d.strictMax&&t===d.max))return null;var P={name:e,value:t,strValue:""+t+(_||""),units:_,bypass:n};return d.unitless||"px"!==_&&"em"!==_?P.pfValue=t:P.pfValue="px"!==_&&_?this.getEmSizeInPixels()*t:t,"ms"!==_&&"s"!==_||(P.pfValue="ms"===_?t:1e3*t),"deg"!==_&&"rad"!==_||(P.pfValue="rad"===_?t:o.deg2rad(t)),"%"===_&&(P.pfValue=t/100),P}if(d.propList){var D=[],R=""+t;if("none"===R);else{for(var I=R.split(","),N=0;N0&&s>0&&!isNaN(n.w)&&!isNaN(n.h)&&n.w>0&&n.h>0)return{zoom:l=(l=(l=Math.min((a-2*t)/n.w,(s-2*t)/n.h))>this._private.maxZoom?this._private.maxZoom:l)t.maxZoom?t.maxZoom:s)t.maxZoom||!t.zoomingEnabled?a=!0:(t.zoom=l,o.push("zoom"))}if(i&&(!a||!e.cancelOnFailedZoom)&&t.panningEnabled){var c=e.pan;r.number(c.x)&&(t.pan.x=c.x,s=!1),r.number(c.y)&&(t.pan.y=c.y,s=!1),s||o.push("pan")}return o.length>0&&(o.push("viewport"),this.emit(o.join(" ")),this.notify({type:"viewport"})),this},center:function(e){var t=this.getCenterPan(e);return t&&(this._private.pan=t,this.emit("pan viewport"),this.notify({type:"viewport"})),this},getCenterPan:function(e,t){if(this._private.panningEnabled){if(r.string(e)){var n=e;e=this.mutableElements().filter(n)}else r.elementOrCollection(e)||(e=this.mutableElements());if(0!==e.length){var i=e.boundingBox(),o=this.width(),a=this.height();return{x:(o-(t=void 0===t?this._private.zoom:t)*(i.x1+i.x2))/2,y:(a-t*(i.y1+i.y2))/2}}}},reset:function(){return this._private.panningEnabled&&this._private.zoomingEnabled?(this.viewport({pan:{x:0,y:0},zoom:1}),this):this},invalidateSize:function(){this._private.sizeCache=null},size:function(){var e,t,n=this._private,r=n.container;return n.sizeCache=n.sizeCache||(r?(e=i.getComputedStyle(r),t=function(t){return parseFloat(e.getPropertyValue(t))},{width:r.clientWidth-t("padding-left")-t("padding-right"),height:r.clientHeight-t("padding-top")-t("padding-bottom")}):{width:1,height:1})},width:function(){return this.size().width},height:function(){return this.size().height},extent:function(){var e=this._private.pan,t=this._private.zoom,n=this.renderedExtent(),r={x1:(n.x1-e.x)/t,x2:(n.x2-e.x)/t,y1:(n.y1-e.y)/t,y2:(n.y2-e.y)/t};return r.w=r.x2-r.x1,r.h=r.y2-r.y1,r},renderedExtent:function(){var e=this.width(),t=this.height();return{x1:0,y1:0,x2:e,y2:t,w:e,h:t}}};a.centre=a.center,a.autolockNodes=a.autolock,a.autoungrabifyNodes=a.autoungrabify,e.exports=a},function(e,t,n){"use strict";var r=n(1),i=n(4),o=n(7),a=n(12),s=n(95),l=n(0),c=n(11),u={},d={};function f(e,t,n){var s=n,d=function(n){r.error("Can not register `"+t+"` for `"+e+"` since `"+n+"` already exists in the prototype and can not be overridden")};if("core"===e){if(a.prototype[t])return d(t);a.prototype[t]=n}else if("collection"===e){if(o.prototype[t])return d(t);o.prototype[t]=n}else if("layout"===e){for(var f=function(e){this.options=e,n.call(this,e),l.plainObject(this._private)||(this._private={}),this._private.cy=e.cy,this._private.listeners=[],this.createEmitter()},h=f.prototype=Object.create(n.prototype),g=[],m=0;m0;)m();c=n.collection();for(var v=function(e){var t=h[e],n=t.maxDegree(!1),r=t.filter((function(e){return e.degree(!1)===n}));c=c.add(r)},b=0;by.length-1;)y.push([]);y[J].push(X),Z.depth=J,Z.index=y[J].length-1}N()}var K=0;if(t.avoidOverlap)for(var ee=0;eec||0===t)&&(r+=l/u,i++)}return r/=i=Math.max(1,i),0===i&&(r=void 0),ie[e.id()]=r,r},ae=function(e,t){return oe(e)-oe(t)},se=0;se<3;se++){for(var le=0;le0&&y[0].length<=3?u/2:0),f=2*Math.PI/y[i].length*o;return 0===i&&1===y[0].length&&(d=1),{x:de+d*Math.cos(f),y:fe+d*Math.sin(f)}}return{x:de+(o+1-(a+1)/2)*s,y:(i+1)*c}}var p={x:de+(o+1-(a+1)/2)*s,y:(i+1)*c};return p},he={},ge=y.length-1;ge>=0;ge--)for(var me=y[ge],ve=0;ve1&&t.avoidOverlap){p*=1.75;var b=Math.cos(d)-Math.cos(0),y=Math.sin(d)-Math.sin(0),x=Math.sqrt(p*p/(b*b+y*y));f=Math.max(x,f)}return s.layoutPositions(this,t,(function(e,n){var r=t.startAngle+n*d*(a?1:-1),i=f*Math.cos(r),o=f*Math.sin(r);return{x:c+i,y:u+o}})),this},e.exports=s},function(e,t,n){"use strict";var r=n(1),i=n(2),o={fit:!0,padding:30,startAngle:1.5*Math.PI,sweep:void 0,clockwise:!0,equidistant:!1,minNodeSpacing:10,boundingBox:void 0,avoidOverlap:!0,nodeDimensionsIncludeLabels:!1,height:void 0,width:void 0,spacingFactor:void 0,concentric:function(e){return e.degree()},levelWidth:function(e){return e.maxDegree()/4},animate:!1,animationDuration:500,animationEasing:void 0,animateFilter:function(e,t){return!0},ready:void 0,stop:void 0,transform:function(e,t){return t}};function a(e){this.options=r.extend({},o,e)}a.prototype.run=function(){for(var e=this.options,t=e,n=void 0!==t.counterclockwise?!t.counterclockwise:t.clockwise,r=e.cy,o=t.eles.nodes().not(":parent"),a=i.makeBoundingBox(t.boundingBox?t.boundingBox:{x1:0,y1:0,w:r.width(),h:r.height()}),s=a.x1+a.w/2,l=a.y1+a.h/2,c=[],u=(t.startAngle,0),d=0;d0&&Math.abs(b[0].value-x.value)>=m&&(b=[],v.push(b)),b.push(x)}var w=u+t.minNodeSpacing;if(!t.avoidOverlap){var k=v.length>0&&v[0].length>1,A=(Math.min(a.w,a.h)/2-w)/(v.length+k?1:0);w=Math.min(w,A)}for(var E=0,S=0;S1&&t.avoidOverlap){var O=Math.cos(_)-Math.cos(0),j=Math.sin(_)-Math.sin(0),T=Math.sqrt(w*w/(O*O+j*j));E=Math.max(T,E)}$.r=E,E+=w}if(t.equidistant){for(var P=0,D=0,R=0;R0)var c=(f=r.nodeOverlap*s)*i/(b=Math.sqrt(i*i+o*o)),d=f*o/b;else{var f,p=u(e,i,o),h=u(t,-1*i,-1*o),g=h.x-p.x,m=h.y-p.y,v=g*g+m*m,b=Math.sqrt(v);c=(f=(e.nodeRepulsion+t.nodeRepulsion)/v)*g/b,d=f*m/b}e.isLocked||(e.offsetX-=c,e.offsetY-=d),t.isLocked||(t.offsetX+=c,t.offsetY+=d)}},l=function(e,t,n,r){if(n>0)var i=e.maxX-t.minX;else i=t.maxX-e.minX;if(r>0)var o=e.maxY-t.minY;else o=t.maxY-e.minY;return i>=0&&o>=0?Math.sqrt(i*i+o*o):0},u=function(e,t,n){var r=e.positionX,i=e.positionY,o=e.height||1,a=e.width||1,s=n/t,l=o/a,c={};return 0===t&&0n?(c.x=r,c.y=i+o/2,c):0t&&-1*l<=s&&s<=l?(c.x=r-a/2,c.y=i-a*n/2/t,c):0=l)?(c.x=r+o*t/2/n,c.y=i+o/2,c):0>n&&(s<=-1*l||s>=l)?(c.x=r-o*t/2/n,c.y=i-o/2,c):c},d=function(e,t){for(var n=0;n1){var h=t.gravity*d/p,g=t.gravity*f/p;u.offsetX+=h,u.offsetY+=g}}}}},p=function(e,t){var n=[],r=0,i=-1;for(n.push.apply(n,e.graphSet[0]),i+=e.graphSet[0].length;r<=i;){var o=n[r++],a=e.idToIndex[o],s=e.layoutNodes[a],l=s.children;if(0n)var i={x:n*e/r,y:n*t/r};else i={x:e,y:t};return i},m=function e(t,n){var r=t.parentId;if(null!=r){var i=n.layoutNodes[n.idToIndex[r]],o=!1;return(null==i.maxX||t.maxX+i.padRight>i.maxX)&&(i.maxX=t.maxX+i.padRight,o=!0),(null==i.minX||t.minX-i.padLefti.maxY)&&(i.maxY=t.maxY+i.padBottom,o=!0),(null==i.minY||t.minY-i.padTopg&&(f+=h+t.componentSpacing,d=0,p=0,h=0)}}}(0,i),r})).then((function(e){d.layoutNodes=e.layoutNodes,o.stop(),b()}));var b=function(){!0===e.animate||!1===e.animate?v({force:!0,next:function(){n.one("layoutstop",e.stop),n.emit({type:"layoutstop",layout:n})}}):e.eles.nodes().layoutPositions(n,e,(function(e){var t=d.layoutNodes[d.idToIndex[e.data("id")]];return{x:t.positionX,y:t.positionY}}))};return this},c.prototype.stop=function(){return this.stopped=!0,this.thread&&this.thread.stop(),this.emit("layoutstop"),this},c.prototype.destroy=function(){return this.thread&&this.thread.stop(),this};var u=function(e,t,n){for(var r=n.eles.edges(),i=n.eles.nodes(),s={isCompound:e.hasCompoundNodes(),layoutNodes:[],idToIndex:{},nodeSize:i.size(),graphSet:[],indexToGraph:[],layoutEdges:[],edgeSize:r.size(),temperature:n.initialTemp,clientWidth:e.width(),clientHeight:e.width(),boundingBox:o.makeBoundingBox(n.boundingBox?n.boundingBox:{x1:0,y1:0,w:e.width(),h:e.height()})},l=n.eles.components(),c={},u=0;u0)for(s.graphSet.push(A),u=0;ur.count?0:r.graph},f=function e(t,n,r,i){var o=i.graphSet[r];if(-1a){var h=u(),g=d();(h-1)*g>=a?u(h-1):(g-1)*h>=a&&d(g-1)}else for(;c*l=a?d(v+1):u(m+1)}var b=o.w/c,y=o.h/l;if(t.condense&&(b=0,y=0),t.avoidOverlap)for(var x=0;x=c&&(T=0,j++)},D={},R=0;R(r=i.sqdistToFiniteLine(e,t,w[k],w[k+1],w[k+2],w[k+3])))return b(n,r),!0}else if("bezier"===a.edgeType||"multibezier"===a.edgeType||"self"===a.edgeType||"compound"===a.edgeType)for(w=a.allpts,k=0;k+5(r=i.sqdistToQuadraticBezier(e,t,w[k],w[k+1],w[k+2],w[k+3],w[k+4],w[k+5])))return b(n,r),!0;v=v||o.source,x=x||o.target;var A=l.getArrowWidth(s,u),E=[{name:"source",x:a.arrowStartX,y:a.arrowStartY,angle:a.srcArrowAngle},{name:"target",x:a.arrowEndX,y:a.arrowEndY,angle:a.tgtArrowAngle},{name:"mid-source",x:a.midX,y:a.midY,angle:a.midsrcArrowAngle},{name:"mid-target",x:a.midX,y:a.midY,angle:a.midtgtArrowAngle}];for(k=0;k0&&(y(v),y(x))}function w(e,t,n){return o.getPrefixedProperty(e,t,n)}function k(n,r){var o,a=n._private,s=m;o=r?r+"-":"";var l=n.pstyle(o+"label").value;if("yes"===n.pstyle("text-events").strValue&&l){var c=a.rstyle,u=n.pstyle("text-border-width").pfValue,d=n.pstyle("text-background-padding").pfValue,f=w(c,"labelWidth",r)+u+2*s+2*d,p=w(c,"labelHeight",r)+u+2*s+2*d,h=w(c,"labelX",r),g=w(c,"labelY",r),v=w(a.rscratch,"labelAngle",r),y=h-f/2,x=h+f/2,k=g-p/2,A=g+p/2;if(v){var E=Math.cos(v),S=Math.sin(v),$=function(e,t){return{x:(e-=h)*E-(t-=g)*S+h,y:e*S+t*E+g}},C=$(y,k),_=$(y,A),O=$(x,k),j=$(x,A),T=[C.x,C.y,O.x,O.y,j.x,j.y,_.x,_.y];if(i.pointInsidePolygonPoints(e,t,T))return b(n),!0}else{var P={w:f,h:p,x1:y,x2:x,y1:k,y2:A};if(i.inBoundingBox(P,e,t))return b(n),!0}}}n&&(u=u.interactive);for(var A=u.length-1;A>=0;A--){var E=u[A];E.isNode()?y(E)||k(E):x(E)||k(E)||k(E,"source")||k(E,"target")}return d},getAllInBox:function(e,t,n,r){var o=this.getCachedZSortedEles().interactive,a=[],s=Math.min(e,n),l=Math.max(e,n),c=Math.min(t,r),u=Math.max(t,r);e=s,n=l,t=c,r=u;for(var d=i.makeBoundingBox({x1:e,y1:t,x2:n,y2:r}),f=0;fb?b+"$-$"+v:v+"$-$"+b,g&&(t="unbundled$-$"+h.id);var y=u[t];null==y&&(y=u[t]=[],d.push(t)),y.push(Bt),g&&(y.hasUnbundled=!0),m&&(y.hasBezier=!0)}else f.push(Bt)}for(var x=0;xGt.id()){var k=Ht;Ht=Gt,Gt=k}Wt=Ht.position(),Yt=Gt.position(),Xt=Ht.outerWidth(),Qt=Ht.outerHeight(),Zt=Gt.outerWidth(),Jt=Gt.outerHeight(),n=l.nodeShapes[this.getNodeShape(Ht)],o=l.nodeShapes[this.getNodeShape(Gt)],s=!1;var A={north:0,west:0,south:0,east:0,northwest:0,southwest:0,northeast:0,southeast:0},E=Wt.x,S=Wt.y,$=Xt,C=Qt,_=Yt.x,O=Yt.y,j=Zt,T=Jt,P=w.length;for(p=0;p=d||w){p={cp:b,segment:x};break}}if(p)break}b=p.cp;var k=(d-g)/(x=p.segment).length,A=x.t1-x.t0,E=u?x.t0+A*k:x.t1-A*k;E=r.bound(0,E,1),t=r.qbezierPtAt(b.p0,b.p1,b.p2,E),c=function(e,t,n,i){var o=r.bound(0,i-.001,1),a=r.bound(0,i+.001,1),s=r.qbezierPtAt(e,t,n,o),l=r.qbezierPtAt(e,t,n,a);return f(s,l)}(b.p0,b.p1,b.p2,E);break;case"straight":case"segments":case"haystack":var S,$,C,_,O=0,j=i.allpts.length;for(v=0;v+3=d));v+=2);E=(d-$)/S,E=r.bound(0,E,1),t=r.lineAt(C,_,E),c=f(C,_)}l("labelX",o,t.x),l("labelY",o,t.y),l("labelAutoAngle",o,c)}};c("source"),c("target"),this.applyLabelDimensions(e)}},applyLabelDimensions:function(e){this.applyPrefixedLabelDimensions(e),e.isEdge()&&(this.applyPrefixedLabelDimensions(e,"source"),this.applyPrefixedLabelDimensions(e,"target"))},applyPrefixedLabelDimensions:function(e,t){var n=e._private,r=this.getLabelText(e,t),i=this.calculateLabelDimensions(e,r);o.setPrefixedProperty(n.rstyle,"labelWidth",t,i.width),o.setPrefixedProperty(n.rscratch,"labelWidth",t,i.width),o.setPrefixedProperty(n.rstyle,"labelHeight",t,i.height),o.setPrefixedProperty(n.rscratch,"labelHeight",t,i.height)},getLabelText:function(e,t){var n=e._private,r=t?t+"-":"",i=e.pstyle(r+"label").strValue,a=e.pstyle("text-transform").value,s=function(e,r){return r?(o.setPrefixedProperty(n.rscratch,e,t,r),r):o.getPrefixedProperty(n.rscratch,e,t)};"none"==a||("uppercase"==a?i=i.toUpperCase():"lowercase"==a&&(i=i.toLowerCase()));var l=e.pstyle("text-wrap").value;if("wrap"===l){var c=s("labelKey");if(c&&s("labelWrapKey")===c)return s("labelWrapCachedText");for(var u=i.split("\n"),d=e.pstyle("text-max-width").pfValue,f=[],p=0;pd){for(var g=h.split(/\s+/),m="",v=0;vd);k++)x+=i[k],k===i.length-1&&(w=!0);return w||(x+="…"),x}return i},calculateLabelDimensions:function(e,t,n){var r=e._private.labelStyleKey+"$@$"+t;n&&(r+="$@$"+n);var i=this.labelDimCache||(this.labelDimCache={});if(i[r])return i[r];var o=e.pstyle("font-style").strValue,a=1*e.pstyle("font-size").pfValue+"px",s=e.pstyle("font-family").strValue,l=e.pstyle("font-weight").strValue,c=this.labelCalcDiv;c||(c=this.labelCalcDiv=document.createElement("div"),document.body.appendChild(c));var u=c.style;return u.fontFamily=s,u.fontStyle=o,u.fontSize=a,u.fontWeight=l,u.position="absolute",u.left="-9999px",u.top="-9999px",u.zIndex="-1",u.visibility="hidden",u.pointerEvents="none",u.padding="0",u.lineHeight="1","wrap"===e.pstyle("text-wrap").value?u.whiteSpace="pre":u.whiteSpace="normal",c.textContent=t,i[r]={width:Math.ceil(c.clientWidth/1),height:Math.ceil(c.clientHeight/1)},i[r]},calculateLabelAngles:function(e){var t=e._private.rscratch,n=e.isEdge(),r=e.pstyle("text-rotation"),i=r.strValue;"none"===i?t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle=0:n&&"autorotate"===i?(t.labelAngle=Math.atan(t.midDispY/t.midDispX),t.sourceLabelAngle=t.sourceLabelAutoAngle,t.targetLabelAngle=t.targetLabelAutoAngle):t.labelAngle=t.sourceLabelAngle=t.targetLabelAngle="autorotate"===i?0:r.pfValue}};e.exports=a},function(e,t,n){"use strict";var r={getNodeShape:function(e){var t=e.pstyle("shape").value;if(e.isParent())return"rectangle"===t||"roundrectangle"===t||"cutrectangle"===t||"barrel"===t?t:"rectangle";if("polygon"===t){var n=e.pstyle("shape-polygon-points").value;return this.nodeShapes.makePolygon(n).name}return t}};e.exports=r},function(e,t,n){"use strict";var r={registerCalculationListeners:function(){var e=this.cy,t=e.collection(),n=this,r=function(e,n){var r=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t.merge(e);for(var i=0;i=e.desktopTapThreshold2}var C=n(i);b&&(e.hoverData.tapholdCancelled=!0),s=!0,t(v,["mousemove","vmousemove","tapdrag"],i,{position:{x:p[0],y:p[1]}});var _=function(){e.data.bgActivePosistion=void 0,e.hoverData.selecting||l.emit("boxstart"),m[4]=1,e.hoverData.selecting=!0,e.redrawHint("select",!0),e.redraw()};if(3===e.hoverData.which){if(b){var O={originalEvent:i,type:"cxtdrag",position:{x:p[0],y:p[1]}};x?x.emit(O):l.emit(O),e.hoverData.cxtDragged=!0,e.hoverData.cxtOver&&v===e.hoverData.cxtOver||(e.hoverData.cxtOver&&e.hoverData.cxtOver.emit({originalEvent:i,type:"cxtdragout",position:{x:p[0],y:p[1]}}),e.hoverData.cxtOver=v,v&&v.emit({originalEvent:i,type:"cxtdragover",position:{x:p[0],y:p[1]}}))}}else if(e.hoverData.dragging){if(s=!0,l.panningEnabled()&&l.userPanningEnabled()){var T;if(e.hoverData.justStartedPan){var P=e.hoverData.mdownPos;T={x:(p[0]-P[0])*c,y:(p[1]-P[1])*c},e.hoverData.justStartedPan=!1}else T={x:w[0]*c,y:w[1]*c};l.panBy(T),e.hoverData.dragged=!0}p=e.projectIntoViewport(i.clientX,i.clientY)}else if(1!=m[4]||null!=x&&!x.isEdge()){if(x&&x.isEdge()&&x.active()&&x.unactivate(),x&&x.grabbed()||v==y||(y&&t(y,["mouseout","tapdragout"],i,{position:{x:p[0],y:p[1]}}),v&&t(v,["mouseover","tapdragover"],i,{position:{x:p[0],y:p[1]}}),e.hoverData.last=v),x)if(b){if(l.boxSelectionEnabled()&&C)x&&x.grabbed()&&(f(k),x.emit("free")),_();else if(x&&x.grabbed()&&e.nodeIsDraggable(x)){var D=!e.dragData.didDrag;D&&e.redrawHint("eles",!0),e.dragData.didDrag=!0;var R=[];e.hoverData.draggingEles||u(l.collection(k),{inDragLayer:!0});for(var I=0;I0&&e.redrawHint("eles",!0),e.dragData.possibleDragElements=l=[]),t(s,["mouseup","tapend","vmouseup"],r,{position:{x:o[0],y:o[1]}}),e.dragData.didDrag||e.hoverData.dragged||e.hoverData.selecting||e.hoverData.isOverThresholdDrag||t(c,["click","tap","vclick"],r,{position:{x:o[0],y:o[1]}}),s!=c||e.dragData.didDrag||e.hoverData.selecting||null!=s&&s._private.selectable&&(e.hoverData.dragging||("additive"===i.selectionType()||u?s.selected()?s.unselect():s.select():u||(i.$(":selected").unmerge(s).unselect(),s.select())),e.redrawHint("eles",!0)),e.hoverData.selecting){var h=i.collection(e.getAllInBox(a[0],a[1],a[2],a[3]));e.redrawHint("select",!0),h.length>0&&e.redrawHint("eles",!0),i.emit("boxend");var g=function(e){return e.selectable()&&!e.selected()};"additive"===i.selectionType()||u||i.$(":selected").unmerge(h).unselect(),h.emit("box").stdFilter(g).select().emit("boxselect"),e.redraw()}if(e.hoverData.dragging&&(e.hoverData.dragging=!1,e.redrawHint("select",!0),e.redrawHint("eles",!0),e.redraw()),!a[4]){e.redrawHint("drag",!0),e.redrawHint("eles",!0);var m=c&&c.grabbed();f(l),m&&c.emit("free")}}a[4]=0,e.hoverData.down=null,e.hoverData.cxtStarted=!1,e.hoverData.draggingEles=!1,e.hoverData.selecting=!1,e.hoverData.isOverThresholdDrag=!1,e.dragData.didDrag=!1,e.hoverData.dragged=!1,e.hoverData.dragDelta=[],e.hoverData.mdownPos=null,e.hoverData.mdownGPos=null}}),!1),e.registerBinding(e.container,"wheel",(function(t){if(!e.scrollingPage){var n,r=e.cy,i=e.projectIntoViewport(t.clientX,t.clientY),o=[i[0]*r.zoom()+r.pan().x,i[1]*r.zoom()+r.pan().y];e.hoverData.draggingEles||e.hoverData.dragging||e.hoverData.cxtStarted||0!==e.selection[4]?t.preventDefault():r.panningEnabled()&&r.userPanningEnabled()&&r.zoomingEnabled()&&r.userZoomingEnabled()&&(t.preventDefault(),e.data.wheelZooming=!0,clearTimeout(e.data.wheelTimeout),e.data.wheelTimeout=setTimeout((function(){e.data.wheelZooming=!1,e.redrawHint("eles",!0),e.redraw()}),150),n=null!=t.deltaY?t.deltaY/-250:null!=t.wheelDeltaY?t.wheelDeltaY/1e3:t.wheelDelta/1e3,n*=e.wheelSensitivity,1===t.deltaMode&&(n*=33),r.zoom({level:r.zoom()*Math.pow(10,n),renderedPosition:{x:o[0],y:o[1]}}))}}),!0),e.registerBinding(window,"scroll",(function(t){e.scrollingPage=!0,clearTimeout(e.scrollingPageTimeout),e.scrollingPageTimeout=setTimeout((function(){e.scrollingPage=!1}),250)}),!0),e.registerBinding(e.container,"mouseout",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseout",position:{x:n[0],y:n[1]}})}),!1),e.registerBinding(e.container,"mouseover",(function(t){var n=e.projectIntoViewport(t.clientX,t.clientY);e.cy.emit({originalEvent:t,type:"mouseover",position:{x:n[0],y:n[1]}})}),!1);var T,P,D,R,I=function(e,t,n,r){return Math.sqrt((n-e)*(n-e)+(r-t)*(r-t))},N=function(e,t,n,r){return(n-e)*(n-e)+(r-t)*(r-t)};if(e.registerBinding(e.container,"touchstart",T=function(n){if(j(n)){e.touchData.capture=!0,e.data.bgActivePosistion=void 0;var r=e.cy,i=e.touchData.now,o=e.touchData.earlier;if(n.touches[0]){var a=e.projectIntoViewport(n.touches[0].clientX,n.touches[0].clientY);i[0]=a[0],i[1]=a[1]}if(n.touches[1]&&(a=e.projectIntoViewport(n.touches[1].clientX,n.touches[1].clientY),i[2]=a[0],i[3]=a[1]),n.touches[2]&&(a=e.projectIntoViewport(n.touches[2].clientX,n.touches[2].clientY),i[4]=a[0],i[5]=a[1]),n.touches[1]){f(e.dragData.touchDragEles);var s=e.findContainerClientCoords();S=s[0],$=s[1],C=s[2],_=s[3],v=n.touches[0].clientX-S,b=n.touches[0].clientY-$,y=n.touches[1].clientX-S,x=n.touches[1].clientY-$,O=0<=v&&v<=C&&0<=y&&y<=C&&0<=b&&b<=_&&0<=x&&x<=_;var c=r.pan(),p=r.zoom();if(w=I(v,b,y,x),k=N(v,b,y,x),E=[((A=[(v+y)/2,(b+x)/2])[0]-c.x)/p,(A[1]-c.y)/p],k<4e4&&!n.touches[2]){var h=e.findNearestElement(i[0],i[1],!0,!0),g=e.findNearestElement(i[2],i[3],!0,!0);return h&&h.isNode()?(h.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=h):g&&g.isNode()?(g.activate().emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start=g):r.emit({originalEvent:n,type:"cxttapstart",position:{x:i[0],y:i[1]}}),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxt=!0,e.touchData.cxtDragged=!1,e.data.bgActivePosistion=void 0,void e.redraw()}}if(n.touches[2]);else if(n.touches[1]);else if(n.touches[0]){var m=e.findNearestElements(i[0],i[1],!0,!0),T=m[0];if(null!=T&&(T.activate(),e.touchData.start=T,e.touchData.starts=m,e.nodeIsGrabbable(T))){var P=e.dragData.touchDragEles=[],D=null;e.redrawHint("eles",!0),e.redrawHint("drag",!0),T.selected()?(D=r.$((function(t){return t.selected()&&e.nodeIsGrabbable(t)})),u(D,{addToList:P})):d(T,{addToList:P}),l(T);var R=function(e){return{originalEvent:n,type:e,position:{x:i[0],y:i[1]}}};T.emit(R("grabon")),D?D.forEach((function(e){e.emit(R("grab"))})):T.emit(R("grab"))}t(T,["touchstart","tapstart","vmousedown"],n,{position:{x:i[0],y:i[1]}}),null==T&&(e.data.bgActivePosistion={x:a[0],y:a[1]},e.redrawHint("select",!0),e.redraw()),e.touchData.singleTouchMoved=!1,e.touchData.singleTouchStartTime=+new Date,clearTimeout(e.touchData.tapholdTimeout),e.touchData.tapholdTimeout=setTimeout((function(){!1!==e.touchData.singleTouchMoved||e.pinching||e.touchData.selecting||(t(e.touchData.start,["taphold"],n,{position:{x:i[0],y:i[1]}}),e.touchData.start||r.$(":selected").unselect())}),e.tapholdDuration)}if(n.touches.length>=1){for(var M=e.touchData.startPosition=[],z=0;z=e.touchTapThreshold2}if(i&&e.touchData.cxt){n.preventDefault();var D=n.touches[0].clientX-S,R=n.touches[0].clientY-$,M=n.touches[1].clientX-S,z=n.touches[1].clientY-$,L=N(D,R,M,z);if(L/k>=2.25||L>=22500){e.touchData.cxt=!1,e.data.bgActivePosistion=void 0,e.redrawHint("select",!0);var B={originalEvent:n,type:"cxttapend",position:{x:c[0],y:c[1]}};e.touchData.start?(e.touchData.start.unactivate().emit(B),e.touchData.start=null):l.emit(B)}}if(i&&e.touchData.cxt){B={originalEvent:n,type:"cxtdrag",position:{x:c[0],y:c[1]}},e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),e.touchData.start?e.touchData.start.emit(B):l.emit(B),e.touchData.start&&(e.touchData.start._private.grabbed=!1),e.touchData.cxtDragged=!0;var F=e.findNearestElement(c[0],c[1],!0,!0);e.touchData.cxtOver&&F===e.touchData.cxtOver||(e.touchData.cxtOver&&e.touchData.cxtOver.emit({originalEvent:n,type:"cxtdragout",position:{x:c[0],y:c[1]}}),e.touchData.cxtOver=F,F&&F.emit({originalEvent:n,type:"cxtdragover",position:{x:c[0],y:c[1]}}))}else if(i&&n.touches[2]&&l.boxSelectionEnabled())n.preventDefault(),e.data.bgActivePosistion=void 0,this.lastThreeTouch=+new Date,e.touchData.selecting||l.emit("boxstart"),e.touchData.selecting=!0,e.redrawHint("select",!0),s&&0!==s.length&&void 0!==s[0]?(s[2]=(c[0]+c[2]+c[4])/3,s[3]=(c[1]+c[3]+c[5])/3):(s[0]=(c[0]+c[2]+c[4])/3,s[1]=(c[1]+c[3]+c[5])/3,s[2]=(c[0]+c[2]+c[4])/3+1,s[3]=(c[1]+c[3]+c[5])/3+1),s[4]=1,e.touchData.selecting=!0,e.redraw();else if(i&&n.touches[1]&&l.zoomingEnabled()&&l.panningEnabled()&&l.userZoomingEnabled()&&l.userPanningEnabled()){if(n.preventDefault(),e.data.bgActivePosistion=void 0,e.redrawHint("select",!0),ee=e.dragData.touchDragEles){e.redrawHint("drag",!0);for(var q=0;q0)return h[0]}return null},p=Object.keys(d),h=0;h0?f:r.roundRectangleIntersectLine(o,a,e,t,n,i,s)},checkPoint:function(e,t,n,i,o,a,s){var l=r.getRoundRectangleRadius(i,o),c=2*l;if(r.pointInsidePolygon(e,t,this.points,a,s,i,o-c,[0,-1],n))return!0;if(r.pointInsidePolygon(e,t,this.points,a,s,i-c,o,[0,-1],n))return!0;var u=i/2+2*n,d=o/2+2*n,f=[a-u,s-d,a-u,s,a+u,s,a+u,s-d];return!!r.pointInsidePolygonPoints(e,t,f)||!!r.checkInEllipse(e,t,c,c,a+i/2-l,s+o/2-l,n)||!!r.checkInEllipse(e,t,c,c,a-i/2+l,s+o/2-l,n)}}},registerNodeShapes:function(){var e=this.nodeShapes={},t=this;this.generateEllipse(),this.generatePolygon("triangle",r.generateUnitNgonPointsFitToSquare(3,0)),this.generatePolygon("rectangle",r.generateUnitNgonPointsFitToSquare(4,0)),e.square=e.rectangle,this.generateRoundRectangle(),this.generateCutRectangle(),this.generateBarrel(),this.generateBottomRoundrectangle(),this.generatePolygon("diamond",[0,1,1,0,0,-1,-1,0]),this.generatePolygon("pentagon",r.generateUnitNgonPointsFitToSquare(5,0)),this.generatePolygon("hexagon",r.generateUnitNgonPointsFitToSquare(6,0)),this.generatePolygon("heptagon",r.generateUnitNgonPointsFitToSquare(7,0)),this.generatePolygon("octagon",r.generateUnitNgonPointsFitToSquare(8,0));var n=new Array(20),i=r.generateUnitNgonPoints(5,0),o=r.generateUnitNgonPoints(5,Math.PI/5),a=.5*(3-Math.sqrt(5));a*=1.57;for(var s=0;s0&&t.data.lyrTxrCache.invalidateElements(n)}))}l.CANVAS_LAYERS=3,l.SELECT_BOX=0,l.DRAG=1,l.NODE=2,l.BUFFER_COUNT=3,l.TEXTURE_BUFFER=0,l.MOTIONBLUR_BUFFER_NODE=1,l.MOTIONBLUR_BUFFER_DRAG=2,l.redrawHint=function(e,t){var n=this;switch(e){case"eles":n.data.canvasNeedsRedraw[l.NODE]=t;break;case"drag":n.data.canvasNeedsRedraw[l.DRAG]=t;break;case"select":n.data.canvasNeedsRedraw[l.SELECT_BOX]=t}};var u="undefined"!=typeof Path2D;l.path2dEnabled=function(e){if(void 0===e)return this.pathsEnabled;this.pathsEnabled=!!e},l.usePaths=function(){return u&&this.pathsEnabled},[n(126),n(127),n(128),n(129),n(130),n(131),n(132),n(133),n(134),n(135)].forEach((function(e){r.extend(l,e)})),e.exports=s},function(e,t,n){"use strict";var r=n(2),i=n(1),o=n(9),a=n(19),s={dequeue:"dequeue",downscale:"downscale",highQuality:"highQuality"},l=function(e){this.renderer=e,this.onDequeues=[],this.setupDequeueing()},c=l.prototype;c.reasons=s,c.getTextureQueue=function(e){return this.eleImgCaches=this.eleImgCaches||{},this.eleImgCaches[e]=this.eleImgCaches[e]||[]},c.getRetiredTextureQueue=function(e){var t=this.eleImgCaches.retired=this.eleImgCaches.retired||{};return t[e]=t[e]||[]},c.getElementQueue=function(){return this.eleCacheQueue=this.eleCacheQueue||new o((function(e,t){return t.reqs-e.reqs}))},c.getElementIdToQueue=function(){return this.eleIdToCacheQueue=this.eleIdToCacheQueue||{}},c.getElement=function(e,t,n,i,o){var a=this,l=this.renderer,c=e._private.rscratch,u=l.cy.zoom();if(0===t.w||0===t.h||!e.visible())return null;if(null==i&&(i=Math.ceil(r.log2(u*n))),i<-4)i=-4;else if(u>=3.99||i>2)return null;var d,f=Math.pow(2,i),p=t.h*f,h=t.w*f,g=c.imgCaches=c.imgCaches||{},m=g[i];if(m)return m;if(d=p<=25?25:p<=50?50:50*Math.ceil(p/50),p>1024||h>1024||e.isEdge()||e.isParent())return null;var v=a.getTextureQueue(d),b=v[v.length-2],y=function(){return a.recycleTexture(d,h)||a.addTexture(d,h)};b||(b=v[v.length-1]),b||(b=y()),b.width-b.usedWidthi;$--)C=a.getElement(e,t,n,$,s.downscale);_()}else{var O;if(!A&&!E&&!S)for($=i-1;$>=-4;$--){var j;if(j=g[$]){O=j;break}}if(k(O))return a.queueElement(e,i),O;b.context.translate(b.usedWidth,0),b.context.scale(f,f),l.drawElement(b.context,e,t,w),b.context.scale(1/f,1/f),b.context.translate(-b.usedWidth,0)}return m=g[i]={ele:e,x:b.usedWidth,texture:b,level:i,scale:f,width:h,height:p,scaledLabelShown:w},b.usedWidth+=Math.ceil(h+8),b.eleCaches.push(m),a.checkTextureFullness(b),m},c.invalidateElement=function(e){var t=e._private.rscratch.imgCaches;if(t)for(var n=-4;n<=2;n++){var r=t[n];if(r){var o=r.texture;o.invalidatedWidth+=r.width,t[n]=null,i.removeFromArray(o.eleCaches,r),this.removeFromQueue(e),this.checkTextureUtility(o)}}},c.checkTextureUtility=function(e){e.invalidatedWidth>=.5*e.width&&this.retireTexture(e)},c.checkTextureFullness=function(e){var t=this.getTextureQueue(e.height);e.usedWidth/e.width>.8&&e.fullnessChecks>=10?i.removeFromArray(t,e):e.fullnessChecks++},c.retireTexture=function(e){var t=e.height,n=this.getTextureQueue(t);i.removeFromArray(n,e),e.retired=!0;for(var r=e.eleCaches,o=0;o=t)return a.retired=!1,a.usedWidth=0,a.invalidatedWidth=0,a.fullnessChecks=0,i.clearArray(a.eleCaches),a.context.setTransform(1,0,0,1,0,0),a.context.clearRect(0,0,a.width,a.height),i.removeFromArray(r,a),n.push(a),a}},c.queueElement=function(e,t){var n=this.getElementQueue(),r=this.getElementIdToQueue(),i=e.id(),o=r[i];if(o)o.level=Math.max(o.level,t),o.reqs++,n.updateItem(o);else{var a={ele:e,level:t,reqs:1};n.push(a),r[i]=a}},c.dequeue=function(e){for(var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=[],i=0;i<1&&t.size()>0;i++){var o=t.pop(),a=o.ele;if(null==a._private.rscratch.imgCaches[o.level]){n[a.id()]=null,r.push(o);var l=a.boundingBox();this.getElement(a,l,e,o.level,s.dequeue)}}return r},c.removeFromQueue=function(e){var t=this.getElementQueue(),n=this.getElementIdToQueue(),r=n[e.id()];null!=r&&(r.reqs=i.MAX_INT,t.updateItem(r),t.pop(),n[e.id()]=null)},c.onDequeue=function(e){this.onDequeues.push(e)},c.offDequeue=function(e){i.removeFromArray(this.onDequeues,e)},c.setupDequeueing=a.setupDequeueing({deqRedrawThreshold:100,deqCost:.15,deqAvgCost:.1,deqNoDrawCost:.9,deqFastCost:.9,deq:function(e,t,n){return e.dequeue(t,n)},onDeqd:function(e,t){for(var n=0;n=3.99||n>2)return null;o.validateLayersElesOrdering(n,e);var l,c,u=o.layersByLevel,d=Math.pow(2,n),f=u[n]=u[n]||[];if(o.levelIsComplete(n,e))return f;!function(){var t=function(t){if(o.validateLayersElesOrdering(t,e),o.levelIsComplete(t,e))return c=u[t],!0},i=function(e){if(!c)for(var r=n+e;-4<=r&&r<=2&&!t(r);r+=e);};i(1),i(-1);for(var a=f.length-1;a>=0;a--){var s=f[a];s.invalid&&r.removeFromArray(f,s)}}();var p=function(t){var r=(t=t||{}).after;if(function(){if(!l){l=i.makeBoundingBox();for(var t=0;t16e6)return null;var a=o.makeLayer(l,n);if(null!=r){var s=f.indexOf(r)+1;f.splice(s,0,a)}else(void 0===t.insert||t.insert)&&f.unshift(a);return a};if(o.skipping&&!s)return null;for(var h=null,g=e.length/1,m=!s,v=0;v=g||!i.boundingBoxInBoundingBox(h.bb,b.boundingBox()))&&!(h=p({insert:!0,after:h})))return null;c||m?o.queueLayer(h,b):o.drawEleInLayer(h,b,n,t),h.eles.push(b),x[n]=h}}return c||(m?null:f)},c.getEleLevelForLayerLevel=function(e,t){return e},c.drawEleInLayer=function(e,t,n,r){var i=this.renderer,o=e.context,a=t.boundingBox();if(0!==a.w&&0!==a.h&&t.visible()){var s=this.eleTxrCache,l=s.reasons.highQuality;n=this.getEleLevelForLayerLevel(n,r);var c=s.getElement(t,a,null,n,l);c?(f(o,!1),o.drawImage(c.texture.canvas,c.x,0,c.width,c.height,a.x1,a.y1,a.w,a.h),f(o,!0)):i.drawElement(o,t)}},c.levelIsComplete=function(e,t){var n=this.layersByLevel[e];if(!n||0===n.length)return!1;for(var r=0,i=0;i0)return!1;if(o.invalid)return!1;r+=o.eles.length}return r===t.length},c.validateLayersElesOrdering=function(e,t){var n=this.layersByLevel[e];if(n)for(var r=0;r0){e=!0;break}}return e},c.invalidateElements=function(e){var t=this;t.lastInvalidationTime=r.performanceNow(),0!==e.length&&t.haveLayers()&&t.updateElementsInLayers(e,(function(e,n,r){t.invalidateLayer(e)}))},c.invalidateLayer=function(e){if(this.lastInvalidationTime=r.performanceNow(),!e.invalid){var t=e.level,n=e.eles,i=this.layersByLevel[t];r.removeFromArray(i,e),e.elesQueue=[],e.invalid=!0,e.replacement&&(e.replacement.invalid=!0);for(var o=0;o0&&void 0!==arguments[0]?arguments[0]:f;e.lineWidth=h,e.lineCap="butt",i.strokeStyle(e,d[0],d[1],d[2],n),i.drawEdgePath(t,e,o.allpts,p)},m=function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:f;i.drawArrowheads(e,t,n)};if(e.lineJoin="round","yes"===t.pstyle("ghost").value){var v=t.pstyle("ghost-offset-x").pfValue,b=t.pstyle("ghost-offset-y").pfValue,y=t.pstyle("ghost-opacity").value,x=f*y;e.translate(v,b),g(x),m(x),e.translate(-v,-b)}g(),m(),function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:c;e.lineWidth=l,"self"!==o.edgeType||a?e.lineCap="round":e.lineCap="butt",i.strokeStyle(e,u[0],u[1],u[2],n),i.drawEdgePath(t,e,o.allpts,"solid")}(),i.drawElementText(e,t,r),n&&e.translate(s.x1,s.y1)}},drawEdgePath:function(e,t,n,r){var i=e._private.rscratch,o=t,a=void 0,s=!1,l=this.usePaths();if(l){var c=n.join("$");i.pathCacheKey&&i.pathCacheKey===c?(a=t=i.pathCache,s=!0):(a=t=new Path2D,i.pathCacheKey=c,i.pathCache=a)}if(o.setLineDash)switch(r){case"dotted":o.setLineDash([1,1]);break;case"dashed":o.setLineDash([6,3]);break;case"solid":o.setLineDash([])}if(!s&&!i.badLine)switch(t.beginPath&&t.beginPath(),t.moveTo(n[0],n[1]),i.edgeType){case"bezier":case"self":case"compound":case"multibezier":if(e.hasClass("horizontal")){var u=n[4],d=n[5],f=(n[0]+n[4])/2;t.lineTo(n[0]+10,n[1]),t.bezierCurveTo(f,n[1],f,n[5],n[4]-10,n[5]),t.lineTo(u,d)}else if(e.hasClass("vertical")){var p=n[4],h=n[5],g=(n[1]+n[5])/2;t.bezierCurveTo(n[0],g,n[4],g,n[4],n[5]-10),t.lineTo(p,h)}else for(var m=2;m+30||j>0&&O>0){var P=f-T;switch(k){case"left":P-=m;break;case"center":P-=m/2}var D=p-v-T,R=m+2*T,I=v+2*T;if(_>0){var N=e.fillStyle,M=t.pstyle("text-background-color").value;e.fillStyle="rgba("+M[0]+","+M[1]+","+M[2]+","+_*o+")","roundrectangle"==t.pstyle("text-background-shape").strValue?(s=P,l=D,c=R,u=I,d=(d=2)||5,(a=e).beginPath(),a.moveTo(s+d,l),a.lineTo(s+c-d,l),a.quadraticCurveTo(s+c,l,s+c,l+d),a.lineTo(s+c,l+u-d),a.quadraticCurveTo(s+c,l+u,s+c-d,l+u),a.lineTo(s+d,l+u),a.quadraticCurveTo(s,l+u,s,l+u-d),a.lineTo(s,l+d),a.quadraticCurveTo(s,l,s+d,l),a.closePath(),a.fill()):e.fillRect(P,D,R,I),e.fillStyle=N}if(j>0&&O>0){var z=e.strokeStyle,L=e.lineWidth,B=t.pstyle("text-border-color").value,F=t.pstyle("text-border-style").value;if(e.strokeStyle="rgba("+B[0]+","+B[1]+","+B[2]+","+O*o+")",e.lineWidth=j,e.setLineDash)switch(F){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"double":e.lineWidth=j/4,e.setLineDash([]);break;case"solid":e.setLineDash([])}if(e.strokeRect(P,D,R,I),"double"===F){var q=j/2;e.strokeRect(P+q,D+q,R-2*q,I-2*q)}e.setLineDash&&e.setLineDash([]),e.lineWidth=L,e.strokeStyle=z}}var V=2*t.pstyle("text-outline-width").pfValue;if(V>0&&(e.lineWidth=V),"wrap"===t.pstyle("text-wrap").value){var U=r.getPrefixedProperty(i,"labelWrapCachedLines",n),H=v/U.length;switch(A){case"top":p-=(U.length-1)*H;break;case"center":case"bottom":p-=(U.length-1)*H}for(var G=0;G0&&e.strokeText(U[G],f,p),e.fillText(U[G],f,p),p+=H}else V>0&&e.strokeText(h,f,p),e.fillText(h,f,p);0!==E&&(e.rotate(-E),e.translate(-$,-C))}}},e.exports=o},function(e,t,n){"use strict";var r=n(0),i={drawNode:function(e,t,n,i){var o,a,s=this,l=t._private,c=l.rscratch,u=t.position();if(r.number(u.x)&&r.number(u.y)&&t.visible()){var d=t.effectiveOpacity(),f=s.usePaths(),p=void 0,h=!1,g=t.padding();o=t.width()+2*g,a=t.height()+2*g;var m=void 0;n&&(m=n,e.translate(-m.x1,-m.y1));for(var v=t.pstyle("background-image").value,b=new Array(v.length),y=new Array(v.length),x=0,w=0;w0&&void 0!==arguments[0]?arguments[0]:C;s.fillStyle(e,$[0],$[1],$[2],t)},P=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:j;s.strokeStyle(e,_[0],_[1],_[2],t)},D=t.pstyle("shape").strValue,R=t.pstyle("shape-polygon-points").pfValue;if(f){var I=D+"$"+o+"$"+a+("polygon"===D?"$"+R.join("$"):"");e.translate(u.x,u.y),c.pathCacheKey===I?(p=c.pathCache,h=!0):(p=new Path2D,c.pathCacheKey=I,c.pathCache=p)}var N,M,z,L=function(){if(!h){var n=u;f&&(n={x:0,y:0}),s.nodeShapes[s.getNodeShape(t)].draw(p||e,n.x,n.y,o,a)}f?e.fill(p):e.fill()},B=function(){for(var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,r=l.backgrounding,i=0,o=0;o0&&void 0!==arguments[0]&&arguments[0],r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:d;s.hasPie(t)&&(s.drawPie(e,t,r),n&&(f||s.nodeShapes[s.getNodeShape(t)].draw(e,u.x,u.y,o,a)))},q=function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:d,n=(E>0?E:-E)*t,r=E>0?0:255;0!==E&&(s.fillStyle(e,r,r,r,n),f?e.fill(p):e.fill())},V=function(){if(S>0){if(e.lineWidth=S,e.lineCap="butt",e.setLineDash)switch(O){case"dotted":e.setLineDash([1,1]);break;case"dashed":e.setLineDash([4,2]);break;case"solid":case"double":e.setLineDash([])}if(f?e.stroke(p):e.stroke(),"double"===O){e.lineWidth=S/3;var t=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",f?e.stroke(p):e.stroke(),e.globalCompositeOperation=t}e.setLineDash&&e.setLineDash([])}};if("yes"===t.pstyle("ghost").value){var U=t.pstyle("ghost-offset-x").pfValue,H=t.pstyle("ghost-offset-y").pfValue,G=t.pstyle("ghost-opacity").value,W=G*d;e.translate(U,H),T(G*C),L(),B(W),F(0!==E||0!==S),q(W),P(G*j),V(),e.translate(-U,-H)}T(),L(),B(),F(0!==E||0!==S),q(),P(),V(),f&&e.translate(-u.x,-u.y),s.drawElementText(e,t,i),N=t.pstyle("overlay-padding").pfValue,M=t.pstyle("overlay-opacity").value,z=t.pstyle("overlay-color").value,M>0&&(s.fillStyle(e,z[0],z[1],z[2],M),s.nodeShapes.roundrectangle.draw(e,u.x,u.y,o+2*N,a+2*N),e.fill()),n&&e.translate(m.x1,m.y1)}},hasPie:function(e){return(e=e[0])._private.hasPie},drawPie:function(e,t,n,r){t=t[0],r=r||t.position();var i=t.cy().style(),o=t.pstyle("pie-size"),a=r.x,s=r.y,l=t.width(),c=t.height(),u=Math.min(l,c)/2,d=0;this.usePaths()&&(a=0,s=0),"%"===o.units?u*=o.pfValue:void 0!==o.pfValue&&(u=o.pfValue/2);for(var f=1;f<=i.pieBackgroundN;f++){var p=t.pstyle("pie-"+f+"-background-size").value,h=t.pstyle("pie-"+f+"-background-color").value,g=t.pstyle("pie-"+f+"-background-opacity").value*n,m=p/100;m+d>1&&(m=1-d);var v=1.5*Math.PI+2*Math.PI*d,b=v+2*Math.PI*m;0===p||d>=1||d+m>1||(e.beginPath(),e.moveTo(a,s),e.arc(a,s,u,v,b),e.closePath(),this.fillStyle(e,h[0],h[1],h[2],g),e.fill(),d+=m)}}};e.exports=i},function(e,t,n){"use strict";var r={},i=n(1);r.getPixelRatio=function(){var e=this.data.contexts[0];if(null!=this.forcedPixelRatio)return this.forcedPixelRatio;var t=e.backingStorePixelRatio||e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return(window.devicePixelRatio||1)/t},r.paintCache=function(e){for(var t,n=this.paintCaches=this.paintCaches||[],r=!0,i=0;is.minMbLowQualFrames&&(s.motionBlurPxRatio=s.mbPxRBlurry)),s.clearingMotionBlur&&(s.motionBlurPxRatio=1),s.textureDrawLastFrame&&!f&&(d[s.NODE]=!0,d[s.SELECT_BOX]=!0);var y=c.style()._private.coreStyle,x=c.zoom(),w=void 0!==o?o:x,k=c.pan(),A={x:k.x,y:k.y},E={zoom:x,pan:{x:k.x,y:k.y}},S=s.prevViewport;void 0===S||E.zoom!==S.zoom||E.pan.x!==S.pan.x||E.pan.y!==S.pan.y||m&&!g||(s.motionBlurPxRatio=1),a&&(A=a),w*=l,A.x*=l,A.y*=l;var $=s.getCachedZSortedEles();function C(e,t,n,r,i){var o=e.globalCompositeOperation;e.globalCompositeOperation="destination-out",s.fillStyle(e,255,255,255,s.motionBlurTransparency),e.fillRect(t,n,r,i),e.globalCompositeOperation=o}function _(e,r){var i,l,c,d;s.clearingMotionBlur||e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]&&e!==u.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]?(i=A,l=w,c=s.canvasWidth,d=s.canvasHeight):(i={x:k.x*h,y:k.y*h},l=x*h,c=s.canvasWidth*h,d=s.canvasHeight*h),e.setTransform(1,0,0,1,0,0),"motionBlur"===r?C(e,0,0,c,d):t||void 0!==r&&!r||e.clearRect(0,0,c,d),n||(e.translate(i.x,i.y),e.scale(l,l)),a&&e.translate(a.x,a.y),o&&e.scale(o,o)}if(f||(s.textureDrawLastFrame=!1),f){if(s.textureDrawLastFrame=!0,!s.textureCache){s.textureCache={},s.textureCache.bb=c.mutableElements().boundingBox(),s.textureCache.texture=s.data.bufferCanvases[s.TEXTURE_BUFFER];var O=s.data.bufferContexts[s.TEXTURE_BUFFER];O.setTransform(1,0,0,1,0,0),O.clearRect(0,0,s.canvasWidth*s.textureMult,s.canvasHeight*s.textureMult),s.render({forcedContext:O,drawOnlyNodeLayer:!0,forcedPxRatio:l*s.textureMult}),(E=s.textureCache.viewport={zoom:c.zoom(),pan:c.pan(),width:s.canvasWidth,height:s.canvasHeight}).mpan={x:(0-E.pan.x)/E.zoom,y:(0-E.pan.y)/E.zoom}}d[s.DRAG]=!1,d[s.NODE]=!1;var j=u.contexts[s.NODE],T=s.textureCache.texture;E=s.textureCache.viewport,s.textureCache.bb,j.setTransform(1,0,0,1,0,0),p?C(j,0,0,E.width,E.height):j.clearRect(0,0,E.width,E.height);var P=y["outside-texture-bg-color"].value,D=y["outside-texture-bg-opacity"].value;s.fillStyle(j,P[0],P[1],P[2],D),j.fillRect(0,0,E.width,E.height),x=c.zoom(),_(j,!1),j.clearRect(E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l),j.drawImage(T,E.mpan.x,E.mpan.y,E.width/E.zoom/l,E.height/E.zoom/l)}else s.textureOnViewport&&!t&&(s.textureCache=null);var R=c.extent(),I=s.pinching||s.hoverData.dragging||s.swipePanning||s.data.wheelZooming||s.hoverData.draggingEles,N=s.hideEdgesOnViewport&&I,M=[];if(M[s.NODE]=!d[s.NODE]&&p&&!s.clearedForMotionBlur[s.NODE]||s.clearingMotionBlur,M[s.NODE]&&(s.clearedForMotionBlur[s.NODE]=!0),M[s.DRAG]=!d[s.DRAG]&&p&&!s.clearedForMotionBlur[s.DRAG]||s.clearingMotionBlur,M[s.DRAG]&&(s.clearedForMotionBlur[s.DRAG]=!0),d[s.NODE]||n||r||M[s.NODE]){var z=p&&!M[s.NODE]&&1!==h;_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_NODE]:u.contexts[s.NODE]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.nondrag,l,R):s.drawLayeredElements(j,$.nondrag,l,R),s.debug&&s.drawDebugPoints(j,$.nondrag),n||p||(d[s.NODE]=!1)}if(!r&&(d[s.DRAG]||n||M[s.DRAG])&&(z=p&&!M[s.DRAG]&&1!==h,_(j=t||(z?s.data.bufferContexts[s.MOTIONBLUR_BUFFER_DRAG]:u.contexts[s.DRAG]),p&&!z?"motionBlur":void 0),N?s.drawCachedNodes(j,$.drag,l,R):s.drawCachedElements(j,$.drag,l,R),s.debug&&s.drawDebugPoints(j,$.drag),n||p||(d[s.DRAG]=!1)),s.showFps||!r&&d[s.SELECT_BOX]&&!n){if(_(j=t||u.contexts[s.SELECT_BOX]),1==s.selection[4]&&(s.hoverData.selecting||s.touchData.selecting)){x=s.cy.zoom();var L=y["selection-box-border-width"].value/x;j.lineWidth=L,j.fillStyle="rgba("+y["selection-box-color"].value[0]+","+y["selection-box-color"].value[1]+","+y["selection-box-color"].value[2]+","+y["selection-box-opacity"].value+")",j.fillRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]),L>0&&(j.strokeStyle="rgba("+y["selection-box-border-color"].value[0]+","+y["selection-box-border-color"].value[1]+","+y["selection-box-border-color"].value[2]+","+y["selection-box-opacity"].value+")",j.strokeRect(s.selection[0],s.selection[1],s.selection[2]-s.selection[0],s.selection[3]-s.selection[1]))}if(u.bgActivePosistion&&!s.hoverData.selecting){x=s.cy.zoom();var B=u.bgActivePosistion;j.fillStyle="rgba("+y["active-bg-color"].value[0]+","+y["active-bg-color"].value[1]+","+y["active-bg-color"].value[2]+","+y["active-bg-opacity"].value+")",j.beginPath(),j.arc(B.x,B.y,y["active-bg-size"].pfValue/x,0,2*Math.PI),j.fill()}var F=s.lastRedrawTime;if(s.showFps&&F){F=Math.round(F);var q=Math.round(1e3/F);j.setTransform(1,0,0,1,0,0),j.fillStyle="rgba(255, 0, 0, 0.75)",j.strokeStyle="rgba(255, 0, 0, 0.75)",j.lineWidth=1,j.fillText("1 frame = "+F+" ms = "+q+" fps",0,20),j.strokeRect(0,30,250,20),j.fillRect(0,30,250*Math.min(q/60,1),20)}n||(d[s.SELECT_BOX]=!1)}if(p&&1!==h){var V=u.contexts[s.NODE],U=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_NODE],H=u.contexts[s.DRAG],G=s.data.bufferCanvases[s.MOTIONBLUR_BUFFER_DRAG],W=function(e,t,n){e.setTransform(1,0,0,1,0,0),n||!b?e.clearRect(0,0,s.canvasWidth,s.canvasHeight):C(e,0,0,s.canvasWidth,s.canvasHeight);var r=h;e.drawImage(t,0,0,s.canvasWidth*r,s.canvasHeight*r,0,0,s.canvasWidth,s.canvasHeight)};(d[s.NODE]||M[s.NODE])&&(W(V,U,M[s.NODE]),d[s.NODE]=!1),(d[s.DRAG]||M[s.DRAG])&&(W(H,G,M[s.DRAG]),d[s.DRAG]=!1)}s.prevViewport=E,s.clearingMotionBlur&&(s.clearingMotionBlur=!1,s.motionBlurCleared=!0,s.motionBlur=!0),p&&(s.motionBlurTimeout=setTimeout((function(){s.motionBlurTimeout=null,s.clearedForMotionBlur[s.NODE]=!1,s.clearedForMotionBlur[s.DRAG]=!1,s.motionBlur=!1,s.clearingMotionBlur=!f,s.mbFrames=0,d[s.NODE]=!0,d[s.DRAG]=!0,s.redraw()}),100)),t||c.emit("render")},e.exports=r},function(e,t,n){"use strict";for(var r=n(2),i={drawPolygonPath:function(e,t,n,r,i,o){var a=r/2,s=i/2;e.beginPath&&e.beginPath(),e.moveTo(t+a*o[0],n+s*o[1]);for(var l=1;l0&&a>0){p.clearRect(0,0,o,a),p.globalCompositeOperation="source-over";var h=this.getCachedZSortedEles();if(e.full)p.translate(-n.x1*c,-n.y1*c),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(n.x1*c,n.y1*c);else{var g=t.pan(),m={x:g.x*c,y:g.y*c};c*=t.zoom(),p.translate(m.x,m.y),p.scale(c,c),this.drawElements(p,h),p.scale(1/c,1/c),p.translate(-m.x,-m.y)}e.bg&&(p.globalCompositeOperation="destination-over",p.fillStyle=e.bg,p.rect(0,0,o,a),p.fill())}return f},i.png=function(e){return a(e,this.bufferCanvasImage(e),"image/png")},i.jpg=function(e){return a(e,this.bufferCanvasImage(e),"image/jpeg")},e.exports=i},function(e,t,n){"use strict";var r={nodeShapeImpl:function(e,t,n,r,i,o,a){switch(e){case"ellipse":return this.drawEllipsePath(t,n,r,i,o);case"polygon":return this.drawPolygonPath(t,n,r,i,o,a);case"roundrectangle":return this.drawRoundRectanglePath(t,n,r,i,o);case"cutrectangle":return this.drawCutRectanglePath(t,n,r,i,o);case"bottomroundrectangle":return this.drawBottomRoundRectanglePath(t,n,r,i,o);case"barrel":return this.drawBarrelPath(t,n,r,i,o)}}};e.exports=r},function(e,t,n){"use strict";var r=n(0),i=n(1),o=n(18),a=function e(){if(!(this instanceof e))return new e;this.length=0},s=a.prototype;s.instanceString=function(){return"stylesheet"},s.selector=function(e){return this[this.length++]={selector:e,properties:[]},this},s.css=function(e,t){var n=this.length-1;if(r.string(e))this[n].properties.push({name:e,value:t});else if(r.plainObject(e))for(var a=e,s=0;s=0&&(e._idleTimeoutId=setTimeout((function(){e._onTimeout&&e._onTimeout()}),t))},n(239),t.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,t.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,n(35))},function(e,t,n){(function(e,t){!function(e,n){"use strict";if(!e.setImmediate){var r,i,o,a,s,l=1,c={},u=!1,d=e.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(e);f=f&&f.setTimeout?f:e,"[object process]"==={}.toString.call(e.process)?r=function(e){t.nextTick((function(){h(e)}))}:!function(){if(e.postMessage&&!e.importScripts){var t=!0,n=e.onmessage;return e.onmessage=function(){t=!1},e.postMessage("","*"),e.onmessage=n,t}}()?e.MessageChannel?((o=new MessageChannel).port1.onmessage=function(e){h(e.data)},r=function(e){o.port2.postMessage(e)}):d&&"onreadystatechange"in d.createElement("script")?(i=d.documentElement,r=function(e){var t=d.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,i.removeChild(t),t=null},i.appendChild(t)}):r=function(e){setTimeout(h,0,e)}:(a="setImmediate$"+Math.random()+"$",s=function(t){t.source===e&&"string"==typeof t.data&&0===t.data.indexOf(a)&&h(+t.data.slice(a.length))},e.addEventListener?e.addEventListener("message",s,!1):e.attachEvent("onmessage",s),r=function(t){e.postMessage(a+t,"*")}),f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n1)for(var n=1;n=t||n<0||m&&e-c>=o}function w(){var e=p();if(x(e))return k(e);s=setTimeout(w,function(e){var n=t-(e-l);return m?f(n,o-(e-c)):n}(e))}function k(e){return s=void 0,v&&r?b(e):(r=i=void 0,a)}function A(){var e=p(),n=x(e);if(r=arguments,i=this,l=e,n){if(void 0===s)return y(l);if(m)return s=setTimeout(w,t),b(l)}return void 0===s&&(s=setTimeout(w,t)),a}return t=g(t)||0,h(n)&&(u=!!n.leading,o=(m="maxWait"in n)?d(g(n.maxWait)||0,t):o,v="trailing"in n?!!n.trailing:v),A.cancel=function(){void 0!==s&&clearTimeout(s),c=0,r=l=i=s=void 0},A.flush=function(){return void 0===s?a:k(p())},A}}).call(this,n(35))},function(e,t,n){e.exports=n(243)},function(e,t,n){var r,i,o;(function(){var n,a,s,l,c,u,d,f,p,h,g,m,v,b,y;s=Math.floor,h=Math.min,a=function(e,t){return et?1:0},p=function(e,t,n,r,i){var o;if(null==n&&(n=0),null==i&&(i=a),n<0)throw new Error("lo must be non-negative");for(null==r&&(r=e.length);nn;0<=n?t++:t--)c.push(t);return c}.apply(this).reverse()).length;rg;0<=g?++u:--u)m.push(c(e,n));return m},b=function(e,t,n,r){var i,o,s;for(null==r&&(r=a),i=e[n];n>t&&r(i,o=e[s=n-1>>1])<0;)e[n]=o,n=s;return e[n]=i},y=function(e,t,n){var r,i,o,s,l;for(null==n&&(n=a),i=e.length,l=t,o=e[t],r=2*t+1;r'+e.content+"":s+=">"+e.content+"";var l=t(s);return l.data("selector",e.selector),l.data("on-click-function",e.onClickFunction),l.data("show",void 0===e.show||e.show),l}function y(){var e;l("active")&&(e=s.children(),t(e).each((function(){x(t(this))})),i.off("tapstart",n),s.remove(),c(s=void 0,void 0),c("active",!1),c("anyVisibleChild",!1))}function x(e){var n="string"==typeof e?t("#"+e):e,r=n.data("cy-context-menus-cxtfcn"),o=n.data("selector"),a=n.data("call-on-click-function"),s=n.data("cy-context-menus-cxtcorefcn");r&&i.off("cxttap",o,r),s&&i.off("cxttap",s),a&&n.off("click",a),n.remove()}"get"!==e&&(c("options",a=function(e,t){var n={};for(var r in e)n[r]=e[r];for(var r in t)n[r]=t[r];return n}(r,e)),l("active")&&y(),c("active",!0),o=u(a.contextMenuClasses),(s=t("
    ")).addClass("cy-context-menus-cxt-menu"),c("cxtMenu",s),t("body").append(s),s=s,g(a.menuItems),i.on("tapstart",n=function(){f(s),c("cxtMenuPosition",void 0),c("currentCyEvent",void 0)}),t(".cy-context-menus-cxt-menu").contextmenu((function(){return!1})));return function(e){return{isActive:function(){return l("active")},appendMenuItem:function(t){return m(t),e},appendMenuItems:function(t){return g(t),e},removeMenuItem:function(t){return x(t),e},setTrailingDivider:function(n,r){return function(e,n){var r=t("#"+e);n?r.addClass("cy-context-menus-divider"):r.removeClass("cy-context-menus-divider")}(n,r),e},insertBeforeMenuItem:function(t,n){return v(t,n),e},moveBeforeOtherMenuItem:function(n,r){return function(e,n){if(e!==n){var r=t("#"+e).detach(),i=t("#"+n);r.insertBefore(i)}}(n,r),e},disableMenuItem:function(n){return t("#"+n).attr("disabled",!0),e},enableMenuItem:function(n){return t("#"+n).attr("disabled",!1),e},hideMenuItem:function(n){return t("#"+n).data("show",!1),f(t("#"+n)),e},showMenuItem:function(n){return t("#"+n).data("show",!0),d(t("#"+n)),e},destroy:function(){return y(),e}}}(this)}))}};e.exports&&(e.exports=o),void 0===(r=function(){return o}.call(t,n,t,e))||(e.exports=r),"undefined"!=typeof cytoscape&&i&&o(cytoscape,i)}()},function(e,t,n){var r;r=function(e){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,function(t){return e[t]}.bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){var r=n(1),i=function(e){e&&e("layout","dagre",r)};"undefined"!=typeof cytoscape&&i(cytoscape),e.exports=i},function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var i=n(2),o=n(3),a=n(4);function s(e){this.options=o({},i,e)}s.prototype.run=function(){var e=this.options,t=e.cy,n=e.eles,i=function(e,t){return"function"==typeof t?t.apply(e,[e]):t},o=e.boundingBox||{x1:0,y1:0,w:t.width(),h:t.height()};void 0===o.x2&&(o.x2=o.x1+o.w),void 0===o.w&&(o.w=o.x2-o.x1),void 0===o.y2&&(o.y2=o.y1+o.h),void 0===o.h&&(o.h=o.y2-o.y1);var s=new a.graphlib.Graph({multigraph:!0,compound:!0}),l={},c=function(e,t){null!=t&&(l[e]=t)};c("nodesep",e.nodeSep),c("edgesep",e.edgeSep),c("ranksep",e.rankSep),c("rankdir",e.rankDir),c("ranker",e.ranker),s.setGraph(l),s.setDefaultEdgeLabel((function(){return{}})),s.setDefaultNodeLabel((function(){return{}}));for(var u=n.nodes(),d=0;d1?t-1:0),r=1;r-1}},function(e,t,n){var r=n(75);e.exports=function(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}},function(e,t,n){var r=n(74);e.exports=function(){this.__data__=new r,this.size=0}},function(e,t){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},function(e,t){e.exports=function(e){return this.__data__.get(e)}},function(e,t){e.exports=function(e){return this.__data__.has(e)}},function(e,t,n){var r=n(74),i=n(117),o=n(118);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var a=n.__data__;if(!i||a.length<199)return a.push([e,t]),this.size=++n.size,this;n=this.__data__=new o(a)}return n.set(e,t),this.size=n.size,this}},function(e,t,n){var r=n(64),i=n(262),o=n(23),a=n(151),s=/^\[object .+?Constructor\]$/,l=Function.prototype,c=Object.prototype,u=l.toString,d=c.hasOwnProperty,f=RegExp("^"+u.call(d).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=function(e){return!(!o(e)||i(e))&&(r(e)?f:s).test(a(e))}},function(e,t,n){var r=n(58),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r?r.toStringTag:void 0;e.exports=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(e){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i}},function(e,t){var n=Object.prototype.toString;e.exports=function(e){return n.call(e)}},function(e,t,n){var r,i=n(263),o=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";e.exports=function(e){return!!o&&o in e}},function(e,t,n){var r=n(29)["__core-js_shared__"];e.exports=r},function(e,t){e.exports=function(e,t){return null==e?void 0:e[t]}},function(e,t,n){var r=n(266),i=n(74),o=n(117);e.exports=function(){this.size=0,this.__data__={hash:new r,map:new(o||i),string:new r}}},function(e,t,n){var r=n(267),i=n(268),o=n(269),a=n(270),s=n(271);function l(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t0){if(++t>=800)return arguments[0]}else t=0;return e.apply(void 0,arguments)}}},function(e,t,n){var r=n(173),i=n(340),o=n(344),a=n(174),s=n(345),l=n(129);e.exports=function(e,t,n){var c=-1,u=i,d=e.length,f=!0,p=[],h=p;if(n)f=!1,u=o;else if(d>=200){var g=t?null:s(e);if(g)return l(g);f=!1,u=a,h=new r}else h=t?[]:p;e:for(;++c-1}},function(e,t,n){var r=n(188),i=n(342),o=n(343);e.exports=function(e,t,n){return t==t?o(e,t,n):r(e,i,n)}},function(e,t){e.exports=function(e){return e!=e}},function(e,t){e.exports=function(e,t,n){for(var r=n-1,i=e.length;++r1||1===t.length&&e.hasEdge(t[0],t[0])}))}},function(e,t,n){var r=n(22);e.exports=function(e,t,n){return function(e,t,n){var r={},i=e.nodes();return i.forEach((function(e){r[e]={},r[e][e]={distance:0},i.forEach((function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})})),n(e).forEach((function(n){var i=n.v===e?n.w:n.v,o=t(n);r[e][i]={distance:o,predecessor:e}}))})),i.forEach((function(e){var t=r[e];i.forEach((function(n){var o=r[n];i.forEach((function(n){var r=o[e],i=t[n],a=o[n],s=r.distance+i.distance;s0;){if(n=l.removeMin(),r.has(s,n))a.setEdge(n,s[n]);else{if(u)throw new Error("Input graph is not connected: "+e);u=!0}e.nodeEdges(n).forEach(c)}return a}},function(e,t,n){"use strict";var r=n(11),i=n(399),o=n(402),a=n(403),s=n(20).normalizeRanks,l=n(405),c=n(20).removeEmptyRanks,u=n(406),d=n(407),f=n(408),p=n(409),h=n(418),g=n(20),m=n(28).Graph;e.exports=function(e,t){var n=t&&t.debugTiming?g.time:g.notime;n("layout",(function(){var t=n(" buildLayoutGraph",(function(){return function(e){var t=new m({multigraph:!0,compound:!0}),n=$(e.graph());return t.setGraph(r.merge({},b,S(n,v),r.pick(n,y))),r.forEach(e.nodes(),(function(n){var i=$(e.node(n));t.setNode(n,r.defaults(S(i,x),w)),t.setParent(n,e.parent(n))})),r.forEach(e.edges(),(function(n){var i=$(e.edge(n));t.setEdge(n,r.merge({},A,S(i,k),r.pick(i,E)))})),t}(e)}));n(" runLayout",(function(){!function(e,t){t(" makeSpaceForEdgeLabels",(function(){!function(e){var t=e.graph();t.ranksep/=2,r.forEach(e.edges(),(function(n){var r=e.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===t.rankdir||"BT"===t.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(e)})),t(" removeSelfEdges",(function(){!function(e){r.forEach(e.edges(),(function(t){if(t.v===t.w){var n=e.node(t.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:t,label:e.edge(t)}),e.removeEdge(t)}}))}(e)})),t(" acyclic",(function(){i.run(e)})),t(" nestingGraph.run",(function(){u.run(e)})),t(" rank",(function(){a(g.asNonCompoundGraph(e))})),t(" injectEdgeLabelProxies",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(n.width&&n.height){var r=e.node(t.v),i={rank:(e.node(t.w).rank-r.rank)/2+r.rank,e:t};g.addDummyNode(e,"edge-proxy",i,"_ep")}}))}(e)})),t(" removeEmptyRanks",(function(){c(e)})),t(" nestingGraph.cleanup",(function(){u.cleanup(e)})),t(" normalizeRanks",(function(){s(e)})),t(" assignRankMinMax",(function(){!function(e){var t=0;r.forEach(e.nodes(),(function(n){var i=e.node(n);i.borderTop&&(i.minRank=e.node(i.borderTop).rank,i.maxRank=e.node(i.borderBottom).rank,t=r.max(t,i.maxRank))})),e.graph().maxRank=t}(e)})),t(" removeEdgeLabelProxies",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);"edge-proxy"===n.dummy&&(e.edge(n.e).labelRank=n.rank,e.removeNode(t))}))}(e)})),t(" normalize.run",(function(){o.run(e)})),t(" parentDummyChains",(function(){l(e)})),t(" addBorderSegments",(function(){d(e)})),t(" order",(function(){p(e)})),t(" insertSelfEdges",(function(){!function(e){var t=g.buildLayerMatrix(e);r.forEach(t,(function(t){var n=0;r.forEach(t,(function(t,i){var o=e.node(t);o.order=i+n,r.forEach(o.selfEdges,(function(t){g.addDummyNode(e,"selfedge",{width:t.label.width,height:t.label.height,rank:o.rank,order:i+ ++n,e:t.e,label:t.label},"_se")})),delete o.selfEdges}))}))}(e)})),t(" adjustCoordinateSystem",(function(){f.adjust(e)})),t(" position",(function(){h(e)})),t(" positionSelfEdges",(function(){!function(e){r.forEach(e.nodes(),(function(t){var n=e.node(t);if("selfedge"===n.dummy){var r=e.node(n.e.v),i=r.x+r.width/2,o=r.y,a=n.x-i,s=r.height/2;e.setEdge(n.e,n.label),e.removeNode(t),n.label.points=[{x:i+2*a/3,y:o-s},{x:i+5*a/6,y:o-s},{x:i+a,y:o},{x:i+5*a/6,y:o+s},{x:i+2*a/3,y:o+s}],n.label.x=n.x,n.label.y=n.y}}))}(e)})),t(" removeBorderNodes",(function(){!function(e){r.forEach(e.nodes(),(function(t){if(e.children(t).length){var n=e.node(t),i=e.node(n.borderTop),o=e.node(n.borderBottom),a=e.node(r.last(n.borderLeft)),s=e.node(r.last(n.borderRight));n.width=Math.abs(s.x-a.x),n.height=Math.abs(o.y-i.y),n.x=a.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(e.nodes(),(function(t){"border"===e.node(t).dummy&&e.removeNode(t)}))}(e)})),t(" normalize.undo",(function(){o.undo(e)})),t(" fixupEdgeLabelCoords",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(e)})),t(" undoCoordinateSystem",(function(){f.undo(e)})),t(" translateGraph",(function(){!function(e){var t=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,o=0,a=e.graph(),s=a.marginx||0,l=a.marginy||0;function c(e){var r=e.x,a=e.y,s=e.width,l=e.height;t=Math.min(t,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,a-l/2),o=Math.max(o,a+l/2)}r.forEach(e.nodes(),(function(t){c(e.node(t))})),r.forEach(e.edges(),(function(t){var n=e.edge(t);r.has(n,"x")&&c(n)})),t-=s,i-=l,r.forEach(e.nodes(),(function(n){var r=e.node(n);r.x-=t,r.y-=i})),r.forEach(e.edges(),(function(n){var o=e.edge(n);r.forEach(o.points,(function(e){e.x-=t,e.y-=i})),r.has(o,"x")&&(o.x-=t),r.has(o,"y")&&(o.y-=i)})),a.width=n-t+s,a.height=o-i+l}(e)})),t(" assignNodeIntersects",(function(){!function(e){r.forEach(e.edges(),(function(t){var n,r,i=e.edge(t),o=e.node(t.v),a=e.node(t.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=a,r=o),i.points.unshift(g.intersectRect(o,n)),i.points.push(g.intersectRect(a,r))}))}(e)})),t(" reversePoints",(function(){!function(e){r.forEach(e.edges(),(function(t){var n=e.edge(t);n.reversed&&n.points.reverse()}))}(e)})),t(" acyclic.undo",(function(){i.undo(e)}))}(t,n)})),n(" updateInputGraph",(function(){!function(e,t){r.forEach(e.nodes(),(function(n){var r=e.node(n),i=t.node(n);r&&(r.x=i.x,r.y=i.y,t.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(e.edges(),(function(n){var i=e.edge(n),o=t.edge(n);i.points=o.points,r.has(o,"x")&&(i.x=o.x,i.y=o.y)})),e.graph().width=t.graph().width,e.graph().height=t.graph().height}(e,t)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],b={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},y=["acyclicer","ranker","rankdir","align"],x=["width","height"],w={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],A={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function S(e,t){return r.mapValues(r.pick(e,t),Number)}function $(e){var t={};return r.forEach(e,(function(e,n){t[n.toLowerCase()]=e})),t}},function(e,t,n){var r=n(149);e.exports=function(e){return r(e,5)}},function(e,t,n){var r=n(89),i=n(57),o=n(90),a=n(48),s=Object.prototype,l=s.hasOwnProperty,c=r((function(e,t){e=Object(e);var n=-1,r=t.length,c=r>2?t[2]:void 0;for(c&&o(t[0],t[1],c)&&(r=1);++n-1?s[l?t[c]:c]:void 0}}},function(e,t,n){var r=n(188),i=n(37),o=n(365),a=Math.max;e.exports=function(e,t,n){var s=null==e?0:e.length;if(!s)return-1;var l=null==n?0:o(n);return l<0&&(l=a(s+l,0)),r(e,i(t,3),l)}},function(e,t,n){var r=n(196);e.exports=function(e){var t=r(e),n=t%1;return t==t?n?t-n:t:0}},function(e,t,n){var r=n(367),i=n(23),o=n(61),a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,l=/^0o[0-7]+$/i,c=parseInt;e.exports=function(e){if("number"==typeof e)return e;if(o(e))return NaN;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=s.test(e);return n||l.test(e)?c(e.slice(2),n?2:8):a.test(e)?NaN:+e}},function(e,t,n){var r=n(368),i=/^\s+/;e.exports=function(e){return e?e.slice(0,r(e)+1).replace(i,""):e}},function(e,t){var n=/\s/;e.exports=function(e){for(var t=e.length;t--&&n.test(e.charAt(t)););return t}},function(e,t,n){var r=n(128),i=n(169),o=n(48);e.exports=function(e,t){return null==e?e:r(e,i(t),o)}},function(e,t){e.exports=function(e){var t=null==e?0:e.length;return t?e[t-1]:void 0}},function(e,t,n){var r=n(79),i=n(127),o=n(37);e.exports=function(e,t){var n={};return t=o(t,3),i(e,(function(e,i,o){r(n,i,t(e,i,o))})),n}},function(e,t,n){var r=n(132),i=n(373),o=n(49);e.exports=function(e){return e&&e.length?r(e,o,i):void 0}},function(e,t){e.exports=function(e,t){return e>t}},function(e,t,n){var r=n(375),i=n(379)((function(e,t,n){r(e,t,n)}));e.exports=i},function(e,t,n){var r=n(73),i=n(198),o=n(128),a=n(376),s=n(23),l=n(48),c=n(199);e.exports=function e(t,n,u,d,f){t!==n&&o(n,(function(o,l){if(f||(f=new r),s(o))a(t,n,l,u,e,d,f);else{var p=d?d(c(t,l),o,l+"",t,n,f):void 0;void 0===p&&(p=o),i(t,l,p)}}),l)}},function(e,t,n){var r=n(198),i=n(155),o=n(164),a=n(156),s=n(165),l=n(66),c=n(13),u=n(189),d=n(59),f=n(64),p=n(23),h=n(377),g=n(67),m=n(199),v=n(378);e.exports=function(e,t,n,b,y,x,w){var k=m(e,n),A=m(t,n),E=w.get(A);if(E)r(e,n,E);else{var S=x?x(k,A,n+"",e,t,w):void 0,$=void 0===S;if($){var C=c(A),_=!C&&d(A),O=!C&&!_&&g(A);S=A,C||_||O?c(k)?S=k:u(k)?S=a(k):_?($=!1,S=i(A,!0)):O?($=!1,S=o(A,!0)):S=[]:h(A)||l(A)?(S=k,l(k)?S=v(k):p(k)&&!f(k)||(S=s(A))):$=!1}$&&(w.set(A,S),y(S,A,b,x,w),w.delete(A)),r(e,n,S)}}},function(e,t,n){var r=n(47),i=n(84),o=n(32),a=Function.prototype,s=Object.prototype,l=a.toString,c=s.hasOwnProperty,u=l.call(Object);e.exports=function(e){if(!o(e)||"[object Object]"!=r(e))return!1;var t=i(e);if(null===t)return!0;var n=c.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&l.call(n)==u}},function(e,t,n){var r=n(65),i=n(48);e.exports=function(e){return r(e,i(e))}},function(e,t,n){var r=n(89),i=n(90);e.exports=function(e){return r((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&i(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r1&&a(e,t[0],t[1])?t=[]:n>2&&a(t[0],t[1],t[2])&&(t=[t[0]]),i(e,r(t,1),[])}));e.exports=s},function(e,t,n){var r=n(88),i=n(86),o=n(37),a=n(184),s=n(393),l=n(82),c=n(394),u=n(49),d=n(13);e.exports=function(e,t,n){t=t.length?r(t,(function(e){return d(e)?function(t){return i(t,1===e.length?e[0]:e)}:e})):[u];var f=-1;t=r(t,l(o));var p=a(e,(function(e,n,i){return{criteria:r(t,(function(t){return t(e)})),index:++f,value:e}}));return s(p,(function(e,t){return c(e,t,n)}))}},function(e,t){e.exports=function(e,t){var n=e.length;for(e.sort(t);n--;)e[n]=e[n].value;return e}},function(e,t,n){var r=n(395);e.exports=function(e,t,n){for(var i=-1,o=e.criteria,a=t.criteria,s=o.length,l=n.length;++i=l?c:c*("desc"==n[i]?-1:1)}return e.index-t.index}},function(e,t,n){var r=n(61);e.exports=function(e,t){if(e!==t){var n=void 0!==e,i=null===e,o=e==e,a=r(e),s=void 0!==t,l=null===t,c=t==t,u=r(t);if(!l&&!u&&!a&&e>t||a&&s&&c&&!l&&!u||i&&s&&c||!n&&c||!o)return 1;if(!i&&!a&&!u&&e0;--l)if(r=t[l].dequeue()){i=i.concat(s(e,t,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(c,(function(t){return e.outEdges(t.v,t.w)})),!0)};var a=r.constant(1);function s(e,t,n,i,o){var a=o?[]:void 0;return r.forEach(e.inEdges(i.v),(function(r){var i=e.edge(r),s=e.node(r.v);o&&a.push({v:r.v,w:r.w}),s.out-=i,l(t,n,s)})),r.forEach(e.outEdges(i.v),(function(r){var i=e.edge(r),o=r.w,a=e.node(o);a.in-=i,l(t,n,a)})),e.removeNode(i.v),a}function l(e,t,n){n.out?n.in?e[n.out-n.in+t].enqueue(n):e[e.length-1].enqueue(n):e[0].enqueue(n)}},function(e,t){function n(){var e={};e._next=e._prev=e,this._sentinel=e}function r(e){e._prev._next=e._next,e._next._prev=e._prev,delete e._next,delete e._prev}function i(e,t){if("_next"!==e&&"_prev"!==e)return t}e.exports=n,n.prototype.dequeue=function(){var e=this._sentinel,t=e._prev;if(t!==e)return r(t),t},n.prototype.enqueue=function(e){var t=this._sentinel;e._prev&&e._next&&r(e),e._next=t._next,t._next._prev=e,t._next=e,e._prev=t},n.prototype.toString=function(){for(var e=[],t=this._sentinel,n=t._prev;n!==t;)e.push(JSON.stringify(n,i)),n=n._prev;return"["+e.join(", ")+"]"}},function(e,t,n){"use strict";var r=n(11),i=n(20);e.exports={run:function(e){e.graph().dummyChains=[],r.forEach(e.edges(),(function(t){!function(e,t){var n,r,o,a=t.v,s=e.node(a).rank,l=t.w,c=e.node(l).rank,u=t.name,d=e.edge(t),f=d.labelRank;if(c===s+1)return;for(e.removeEdge(t),o=0,++s;sl.lim&&(c=l,u=!0);var d=r.filter(t.edges(),(function(t){return u===b(e,e.node(t.v),c)&&u!==b(e,e.node(t.w),c)}));return r.minBy(d,(function(e){return o(t,e)}))}function v(e,t,n,i){var o=n.v,a=n.w;e.removeEdge(o,a),e.setEdge(i.v,i.w,{}),p(e),d(e,t),function(e,t){var n=r.find(e.nodes(),(function(e){return!t.node(e).parent})),i=s(e,n);i=i.slice(1),r.forEach(i,(function(n){var r=e.node(n).parent,i=t.edge(n,r),o=!1;i||(i=t.edge(r,n),o=!0),t.node(n).rank=t.node(r).rank+(o?i.minlen:-i.minlen)}))}(e,t)}function b(e,t,n){return n.low<=t.lim&&t.lim<=n.lim}e.exports=u,u.initLowLimValues=p,u.initCutValues=d,u.calcCutValue=f,u.leaveEdge=g,u.enterEdge=m,u.exchangeEdges=v},function(e,t,n){var r=n(11);e.exports=function(e){var t=function(e){var t={},n=0;function i(o){var a=n;r.forEach(e.children(o),i),t[o]={low:a,lim:n++}}return r.forEach(e.children(),i),t}(e);r.forEach(e.graph().dummyChains,(function(n){for(var r=e.node(n),i=r.edgeObj,o=function(e,t,n,r){var i,o,a=[],s=[],l=Math.min(t[n].low,t[r].low),c=Math.max(t[n].lim,t[r].lim);i=n;do{i=e.parent(i),a.push(i)}while(i&&(t[i].low>l||c>t[i].lim));o=i,i=r;for(;(i=e.parent(i))!==o;)s.push(i);return{path:a.concat(s.reverse()),lca:o}}(e,t,i.v,i.w),a=o.path,s=o.lca,l=0,c=a[l],u=!0;n!==i.w;){if(r=e.node(n),u){for(;(c=a[l])!==s&&e.node(c).maxRank=2),s=u.buildLayerMatrix(e);var m=o(e,s);m0;)t%2&&(n+=l[t+1]),l[t=t-1>>1]+=e.weight;c+=e.weight*n}))),c}e.exports=function(e,t){for(var n=0,r=1;r=e.barycenter)&&function(e,t){var n=0,r=0;e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.vs=t.vs.concat(e.vs),e.barycenter=n/r,e.weight=r,e.i=Math.min(t.i,e.i),t.merged=!0}(e,t)}}function i(t){return function(n){n.in.push(t),0==--n.indegree&&e.push(n)}}for(;e.length;){var o=e.pop();t.push(o),r.forEach(o.in.reverse(),n(o)),r.forEach(o.out,i(o))}return r.map(r.filter(t,(function(e){return!e.merged})),(function(e){return r.pick(e,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(e){return!e.indegree})))}},function(e,t,n){var r=n(11),i=n(20);function o(e,t,n){for(var i;t.length&&(i=r.last(t)).i<=n;)t.pop(),e.push(i.vs),n++;return n}e.exports=function(e,t){var n=i.partition(e,(function(e){return r.has(e,"barycenter")})),a=n.lhs,s=r.sortBy(n.rhs,(function(e){return-e.i})),l=[],c=0,u=0,d=0;a.sort((f=!!t,function(e,t){return e.barycentert.barycenter?1:f?t.i-e.i:e.i-t.i})),d=o(l,s,d),r.forEach(a,(function(e){d+=e.vs.length,l.push(e.vs),c+=e.barycenter*e.weight,u+=e.weight,d=o(l,s,d)}));var f;var p={vs:r.flatten(l,!0)};u&&(p.barycenter=c/u,p.weight=u);return p}},function(e,t,n){var r=n(11),i=n(28).Graph;e.exports=function(e,t,n){var o=function(e){var t;for(;e.hasNode(t=r.uniqueId("_root")););return t}(e),a=new i({compound:!0}).setGraph({root:o}).setDefaultNodeLabel((function(t){return e.node(t)}));return r.forEach(e.nodes(),(function(i){var s=e.node(i),l=e.parent(i);(s.rank===t||s.minRank<=t&&t<=s.maxRank)&&(a.setNode(i),a.setParent(i,l||o),r.forEach(e[n](i),(function(t){var n=t.v===i?t.w:t.v,o=a.edge(n,i),s=r.isUndefined(o)?0:o.weight;a.setEdge(n,i,{weight:e.edge(t).weight+s})})),r.has(s,"minRank")&&a.setNode(i,{borderLeft:s.borderLeft[t],borderRight:s.borderRight[t]}))})),a}},function(e,t,n){var r=n(11);e.exports=function(e,t,n){var i,o={};r.forEach(n,(function(n){for(var r,a,s=e.parent(n);s;){if((r=e.parent(s))?(a=o[r],o[r]=s):(a=i,i=s),a&&a!==s)return void t.setEdge(a,s);s=r}}))}},function(e,t,n){"use strict";var r=n(11),i=n(20),o=n(419).positionX;e.exports=function(e){(function(e){var t=i.buildLayerMatrix(e),n=e.graph().ranksep,o=0;r.forEach(t,(function(t){var i=r.max(r.map(t,(function(t){return e.node(t).height})));r.forEach(t,(function(t){e.node(t).y=o+i/2})),o+=i+n}))})(e=i.asNonCompoundGraph(e)),r.forEach(o(e),(function(t,n){e.node(n).x=t}))}},function(e,t,n){"use strict";var r=n(11),i=n(28).Graph,o=n(20);function a(e,t){var n={};return r.reduce(t,(function(t,i){var o=0,a=0,s=t.length,c=r.last(i);return r.forEach(i,(function(t,u){var d=function(e,t){if(e.node(t).dummy)return r.find(e.predecessors(t),(function(t){return e.node(t).dummy}))}(e,t),f=d?e.node(d).order:s;(d||t===c)&&(r.forEach(i.slice(a,u+1),(function(t){r.forEach(e.predecessors(t),(function(r){var i=e.node(r),a=i.order;!(as)&&l(n,t,c)}))}))}return r.reduce(t,(function(t,n){var o,a=-1,s=0;return r.forEach(n,(function(r,l){if("border"===e.node(r).dummy){var c=e.predecessors(r);c.length&&(o=e.node(c[0]).order,i(n,s,l,a,o),s=l,a=o)}i(n,s,n.length,o,t.length)})),n})),n}function l(e,t,n){if(t>n){var r=t;t=n,n=r}var i=e[t];i||(e[t]=i={}),i[n]=!0}function c(e,t,n){if(t>n){var i=t;t=n,n=i}return r.has(e[t],n)}function u(e,t,n,i){var o={},a={},s={};return r.forEach(t,(function(e){r.forEach(e,(function(e,t){o[e]=e,a[e]=e,s[e]=t}))})),r.forEach(t,(function(e){var t=-1;r.forEach(e,(function(e){var l=i(e);if(l.length)for(var u=((l=r.sortBy(l,(function(e){return s[e]}))).length-1)/2,d=Math.floor(u),f=Math.ceil(u);d<=f;++d){var p=l[d];a[e]===e&&t\n.menu ul ul {\n margin-left: 12px;\n}\n\n\n\n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(425),i=n(21);n(426),angular.module("dbt").directive("modelTreeLine",["$state",function(e){return{scope:{item:"=",depth:"<",resourceType:"@"},replace:!0,templateUrl:r,link:function(t,n,r,o){t.depth||(t.depth=0);var a=t.item.name;if(a){var s=i.last(a,15).join(""),l=i.initial(a,s.length).join("");t.name={name:a,start:l,end:s},t.name_start=l,t.name_end=s,t.onFolderClick=function(n){if(n.active=!n.active,"source"==t.resourceType){var r=n.name;e.go("dbt.source_list",{source:r})}else 0===t.depth&&"database"!==n.type&&e.go("dbt.project_overview",{project_name:n.name})},t.activate=function(n){t.$emit("clearSearch"),n.active=!0;var r="dbt."+n.node.resource_type;e.go(r,{unique_id:n.unique_id})},t.getIcon=function(e,t){return"#"+{header:{on:"icn-down",off:"icn-right"},database:{on:"icn-db-on",off:"icn-db"},schema:{on:"icn-tree-on",off:"icn-tree"},table:{on:"icn-doc-on",off:"icn-doc"},folder:{on:"icn-dir-on",off:"icn-dir"},file:{on:"icn-doc-on",off:"icn-doc"}}[e][t]},t.getClass=function(e){return{active:e.active,"menu-tree":"header"==e.type||"schema"==e.type||"folder"==e.type,"menu-main":"header"==e.type,"menu-node":"file"==e.type||"table"==e.type}}}}}}])},function(e,t){var n="/components/model_tree/model_tree_line.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
  • \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n \n \n \n \n \n \n {{name.start}}\n {{name.end}}\n \n \n\n
      \n \n
    \n
  • \n')}]),e.exports=n},function(e,t,n){var r=n(427);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.unselectable{\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(429);n(31);n(206),r.module("dbt").directive("docsSearch",["$sce","project",function(e,t){return{scope:{query:"=",results:"=",onSelect:"&"},replace:!0,templateUrl:i,link:function(n){n.max_results=20,n.show_all=!1,n.max_results_columns=3,n.limit_columns={},n.checkboxStatus={show_names:!1,show_descriptions:!1,show_columns:!1,show_code:!1,show_tags:!1},n.limit_search=function(e,t,r){return t0&&null!=n.query&&n.query.trim().length>0){let t=e.replace(/\s+/g," "),o=r(i(n.query)[0]),a=t.search(new RegExp(o)),s=a-75<0?0:a-75,l=a+75>t.length?t.length:a+75;return"..."+t.substring(s,l)+"..."}return e},n.highlight=function(t){if(!n.query||!t)return e.trustAsHtml(t);let o="("+i(n.query).map(e=>r(e)).join(")|(")+")";return e.trustAsHtml(t.replace(new RegExp(o,"gi"),'$&'))},n.$watch("query",(function(e,t){0==e.length&&(n.show_all=!1,n.limit_columns={})})),n.columnFilter=function(e){var t=[];let r=i(n.query);for(var o in e)r.every(e=>-1!=o.toLowerCase().indexOf(e))&&t.push(o);return t},n.limitColumns=function(e){return void 0!==n.limit_columns[e]?n.limit_columns[e]:3}}}}])},function(e,t){var n="/components/search/search.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n \n
    \n
    \n

    \n {{ query }}\n {{ results.length }} search results\n

    \n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n \n
    \n
    \n

    \n \n {{result.model.resource_type}}\n

    \n

    \n
    \n
    \n
    \n \n columns:\n \n \n \n Show {{ columnFilter(result.model.columns).length - max_results_columns }} more\n
    \n
    \n \n \n \n
    \n
    \n \n tags:\n \n \n \n
    \n
    \n Show {{ results.length - max_results }} more\n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(431);n(432);const i=n(21);angular.module("dbt").directive("tableDetails",["$sce","$filter",function(e,t){return{scope:{model:"=",extras:"=",exclude:"<"},templateUrl:r,link:function(e){function n(e,t){if(0==e)return"0 bytes";if(e<1&&(e*=1e6),isNaN(parseFloat(e))||!isFinite(e))return"-";void 0===t&&(t=0);var n=Math.floor(Math.log(e)/Math.log(1024));return(e/Math.pow(1024,Math.floor(n))).toFixed(t)+" "+["bytes","KB","MB","GB","TB","PB"][n]}function r(e,n){return void 0===n&&(n=2),t("number")(100*e,n)+"%"}function o(e,n){return void 0===n&&(n=0),t("number")(e,n)}e.details=[],e.extended=[],e.exclude=e.exclude||[],e.meta=null,e._show_expanded=!1,e.show_expanded=function(t){return void 0!==t&&(e._show_expanded=t),e._show_expanded},e.hasData=function(e){return!(!e||i.isEmpty(e))&&(1!=e.length||0!=e[0].include)},e.$watch("model",(function(t,a){i.property(["metadata","type"])(t);var s,l,c,u=t.hasOwnProperty("sources")&&null!=t.sources[0]?t.sources[0].source_meta:null;if(e.meta=t.meta||u,e.details=function(e){var t,n,r=!e.metadata,o=e.metadata||{};t=e.database?e.database+".":"",n=r?void 0:"source"==e.resource_type?t+e.schema+"."+e.identifier:t+e.schema+"."+e.alias;var a,s=[{name:"Owner",value:o.owner},{name:"Type",value:r?void 0:(a=o.type,"BASE TABLE"==a?{type:"table",name:"table"}:"LATE BINDING VIEW"==a?{type:"view",name:"late binding view"}:{type:a.toLowerCase(),name:a.toLowerCase()}).name},{name:"Package",value:e.package_name},{name:"Language",value:e.language},{name:"Relation",value:n}];return i.filter(s,(function(e){return void 0!==e.value}))}(t),e.extended=(s=t.stats,l={rows:o,row_count:o,num_rows:o,max_varchar:o,pct_used:r,size:n,bytes:n,num_bytes:n},c=i.sortBy(i.values(s),"label"),i.map(c,(function(e){var t=i.clone(e),n=l[e.id];return n&&(t.value=n(e.value),t.label=e.label.replace("Approximate","~"),t.label=e.label.replace("Utilization","Used")),t}))),e.extras){var d=i.filter(e.extras,(function(e){return void 0!==e.value&&null!==e.value}));e.details=e.details.concat(d)}e.show_extended=i.where(e.extended,{include:!0}).length>0})),e.queryTag=function(t){e.$emit("query",t)}}}}])},function(e,t){var n="/components/table_details/table_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    Details
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    Tags
    \n
    \n {{ tag }} \n
    \n
    untagged
    \n
    \n
    \n
    {{ item.name }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    {{ item.label }}
    \n
    {{ item.value }}
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){var r=n(433);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n\n.details-content {\n table-layout: fixed;\n}\n\n.detail-body {\n white-space: nowrap;\n overflow-x: scroll;\n}\n",""])},function(e,t,n){"use strict";const r=n(435),i=n(21);angular.module("dbt").directive("columnDetails",["project",function(e){return{scope:{model:"="},templateUrl:r,link:function(t){t.has_test=function(e,t){return-1!=i.pluck(e.tests,"short").indexOf(t)},t.has_more_info=function(e){var t=e.tests||[],n=e.description||"",r=e.meta||{};return t.length||n.length||!i.isEmpty(r)},t.toggle_column_expanded=function(e){t.has_more_info(e)&&(e.expanded=!e.expanded)},t.getState=function(e){return"dbt."+e.resource_type},t.get_col_name=function(t){return e.caseColumn(t)},t.get_columns=function(e){var t=i.chain(e.columns).values().sortBy("index").value();return i.each(t,(function(e,t){e.index=t})),t}}}}])},function(e,t){var n="/components/column_details/column_details.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n
    \n Column information is not available for this seed\n
    \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ColumnTypeDescriptionTestsMore?
    \n
    \n {{ get_col_name(column.name) }}\n
    \n
    \n {{ column.type }}

    \n
    \n {{ column.description }}\n \n \n U\n N\n F\n A\n +\n \n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Details
    \n
    \n
    \n
    \n
    {{ k }}
    \n
    {{ v }}
    \n
    \n
    \n
    \n
    \n\n
    \n
    Description
    \n \n
    \n\n
    \n
    Generic Tests
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(437);n(31),n(438);function i(e){return"python"===e?"language-python":"language-sql"}angular.module("dbt").directive("codeBlock",["code","$timeout",function(e,t){return{scope:{versions:"=",default:"<",language:"="},restrict:"E",templateUrl:r,link:function(n,r){n.selected_version=n.default,n.language_class=i(n.language),n.source=null,n.setSelected=function(r){n.selected_version=r,n.source=n.versions[r]||"";const i=n.source.trim();n.highlighted=e.highlight(i,n.language),t((function(){Prism.highlightAll()}))},n.titleCase=function(e){return e.charAt(0).toUpperCase()+e.substring(1)},n.copied=!1,n.copy_to_clipboard=function(){e.copy_to_clipboard(n.source),n.copied=!0,setTimeout((function(){n.$apply((function(){n.copied=!1}))}),1e3)},n.$watch("language",(function(e,t){e&&e!=t&&(n.language_class=i(e))}),!0),n.$watch("versions",(function(e,t){if(e)if(n.default)n.setSelected(n.default);else{var r=Object.keys(n.versions);r.length>0&&n.setSelected(r[0])}}),!0)}}}])},function(e,t){var n="/components/code_block/code_block.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    Code
    \n\n')}]),e.exports=n},function(e,t,n){var r=n(439);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"pre.code {\n border: none !important;\n overflow-y: visible !important;\n overflow-x: scroll !important;\n padding-bottom: 10px;\n}\n\npre.code code {\n font-family: Monaco, monospace !important;\n font-weight: 400 !important;\n}\n\n.line-numbers-rows {\n border: none !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(441);angular.module("dbt").directive("macroArguments",[function(){return{scope:{macro:"="},templateUrl:r,link:function(e){_.each(e.macro.arguments,(function(e){e.expanded=!1}))}}}])},function(e,t){var n="/components/macro_arguments/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'\n\n
    \n
    \n
    \n Details are not available for this macro\n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ArgumentTypeDescriptionMore?
    \n
    \n {{ arg.name }}\n
    \n
    \n {{ arg.type }}

    \n
    \n {{ arg.description }}\n \n \n \n \n \n \n \n \n \n
    \n
    \n
    \n
    Description
    \n \n
    \n
    \n
    \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){"use strict";const r=n(443);angular.module("dbt").directive("referenceList",["$state",function(e){return{scope:{references:"=",node:"="},restrict:"E",templateUrl:r,link:function(t){t.selected_type=null,t.setType=function(e){t.selected_type=e,t.nodes=t.references[t.selected_type]},t.getNodeUrl=function(t){var n="dbt."+t.resource_type;return e.href(n,{unique_id:t.unique_id,"#":null})},t.mapResourceType=function(e){return"model"==e?"Models":"seed"==e?"Seeds":"test"==e?"Tests":"snapshot"==e?"Snapshots":"analysis"==e?"Analyses":"macro"==e?"Macros":"exposure"==e?"Exposures":"metric"==e?"Metrics":"operation"==e?"Operations":"Nodes"},t.$watch("references",(function(e){e&&_.size(e)>0?(t.selected_type=_.keys(e)[0],t.has_references=!0,t.nodes=t.references[t.selected_type]):t.has_references=!1}))}}}])},function(e,t){var n="/components/references/index.html";window.angular.module("ng").run(["$templateCache",function(e){e.put(n,'
    \n
    \n No resources reference this {{ node.resource_type }}\n
    \n
    \n \n
    \n \n
    \n
    \n
    \n')}]),e.exports=n},function(e,t,n){n(445),n(447),n(448),n(449),n(450),n(451),n(452),n(453),n(454),n(455)},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ModelCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.copied=!1,e.copy_to_clipboard=function(t){r.copy_to_clipboard(t),e.copied=!0,setTimeout((function(){e.$apply((function(){e.copied=!1}))}),1e3)},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,"\n.nav-tabs li.nav-pull-right {\n flex: 1 0 auto;\n text-align: right;\n}\n\ntr.column-row-selected {\n\n}\n\ntd.column-expanded{\n padding: 0px !important;\n}\n\ntd.column-expanded > div {\n padding: 5px 10px;\n margin-left: 20px;\n height: 100%;\n\n border-left: 1px solid #ccc !important;\n}\n",""])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SourceCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Sample SQL":r.generateSourceSQL(e.model)},e.extra_table_fields=[{name:"Loader",value:e.model.loader},{name:"Source",value:e.model.source_name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SeedCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.versions={"Example SQL":r.generateSourceSQL(e.model)}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("SnapshotCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"Compiled SQL is not available for this snapshot"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("TestCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(33);n(34),r.module("dbt").controller("MacroCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,a,s,l){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.macro={},n.ready((function(t){let n=t.macros[e.model_uid];if(e.macro=n,e.references=o.getMacroReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=o.getMacroParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.macro.is_adapter_macro){var r=t.metadata.adapter_type;e.versions=n.impls,n.impls[r]?e.default_version=r:n.impls.default?e.default_version="default":e.default_version=i.keys(n.impls)[0]}else e.default_version="Source",e.versions={Source:e.macro.macro_sql}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("AnalysisCtrl",["$scope","$state","project","code","$transitions","$anchorScroll","$location",function(e,t,n,r,o,a,s){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.default_version="Source",e.versions={Source:"",Compiled:""},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language,e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code}}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("ExposureCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.exposure={},n.ready((function(t){let n=t.nodes[e.model_uid];e.exposure=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.language=n.language,e.extra_table_fields=[{name:"Maturity",value:e.exposure.maturity},{name:"Owner",value:e.exposure.owner.name},{name:"Owner email",value:e.exposure.owner.email},{name:"Exposure name",value:e.exposure.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("MetricCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.project=n,e.codeService=r,e.extra_table_fields=[],e.versions={},e.metric={},n.ready((function(t){let n=t.nodes[e.model_uid];e.metric=n,e.parents=i.getParents(t,n),e.parentsLength=e.parents.length,e.versions={Definition:r.generateMetricSQL(e.metric)};const o="expression"===e.metric.type?"Expression metric":"Aggregate metric";e.extra_table_fields=[{name:"Metric Type",value:o},{name:"Metric name",value:e.metric.name}]}))}])},function(e,t,n){"use strict";const r=n(9),i=n(33);n(34),r.module("dbt").controller("OperationCtrl",["$scope","$state","project","code","$anchorScroll","$location",function(e,t,n,r,o,a){e.model_uid=t.params.unique_id,e.tab=t.params.tab,e.project=n,e.codeService=r,e.versions={},e.model={},n.ready((function(t){let n=t.nodes[e.model_uid];e.model=n,e.references=i.getReferences(t,n),e.referencesLength=Object.keys(e.references).length,e.parents=i.getParents(t,n),e.parentsLength=Object.keys(e.parents).length,e.language=n.language;e.versions={Source:e.model.raw_code,Compiled:e.model.compiled_code||"\n-- compiled code not found for this model\n"},setTimeout((function(){o()}),0)}))}])},function(e,t,n){"use strict";n(9).module("dbt").controller("GraphCtrl",["$scope","$state","$window","graph","project","selectorService",function(e,t,n,r,i,o){function a(e){return e&&"source"==e.resource_type?"source:"+e.source_name+"."+e.name:e&&"exposure"==e.resource_type?"exposure:"+e.name:e&&"metric"==e.resource_type?"metric:"+e.name:e.name?e.name:"*"}e.graph=r.graph,e.graphService=r,e.graphRendered=function(e){r.setGraphReady(e)},e.$watch((function(){return t.params.unique_id}),(function(e,t){e&&e!=t&&i.find_by_id(e,(function(e){e&&("sidebar"==r.orientation?r.showVerticalGraph(a(e),!1):r.showFullGraph(a(e)))})),e||o.clearViewNode()}))}])},function(e,t,n){"use strict";const r=n(9),i=n(21),o=n(31),a=n(458);n(459),n(206),n(467),n(469),n(472),n(476),r.module("dbt").controller("MainController",["$scope","$route","$state","project","graph","selectorService","trackingService","locationService","$transitions",function(e,t,n,r,s,l,c,u,d){function f(t){e.model_uid=t;var n=r.node(t);n&&l.resetSelection(n)}function p(e){e&&setTimeout((function(){var t=o("*[data-nav-unique-id='"+e+"']");t.length&&t[0].scrollIntoView&&t[0].scrollIntoView({behavior:"smooth",block:"center",inline:"center"})}),1)}e.tree={database:{},project:{},sources:{}},e.search={query:"",results:[],is_focused:!1},e.logo=a,e.model_uid=null,e.project={},o("body").bind("keydown",(function(e){"t"==event.key&&"INPUT"!=event.target.tagName&&(console.log("Opening search"),o("#search").focus(),event.preventDefault())})),e.onSearchFocus=function(t,n){e.search.is_focused=n},e.clearSearch=function(){e.search.is_focused=!1,e.search.query="",e.search.results=[],o("#search").blur()},e.$on("clearSearch",(function(){e.clearSearch()})),e.$on("query",(function(t,n){e.search.is_focused=!0,e.search.query=n})),e.onSearchKeypress=function(t){"Escape"==t.key&&(e.clearSearch(),t.preventDefault())},r.getModelTree(n.params.unique_id,(function(t){e.tree.database=t.database,e.tree.project=t.project,e.tree.sources=t.sources,e.tree.exposures=t.exposures,e.tree.metrics=t.metrics,setTimeout((function(){p(e.model_uid)}))})),d.onSuccess({},(function(t,n){var i=t.router.globals.params,o=l.getViewNode(),a=o?o.unique_id:null,s=i.unique_id,u=!0;if(t.from().name==t.to().name&&a==s&&(u=!1),u&&i.unique_id){var d=r.updateSelected(i.unique_id);e.tree.database=d.database,e.tree.project=d.project,e.tree.sources=d.sources,e.search.query="",console.log("updating selected model to: ",i),f(i.unique_id),setTimeout((function(){p(i.unique_id)}))}u&&c.track_pageview()})),e.$watch("search.query",(function(t){e.search.results=function(t){if(""===e.search.query)return t;let n={name:10,tags:5,description:3,raw_code:2,columns:1};return i.each(t,(function(t){t.overallWeight=0,i.each(Object.keys(n),(function(r){if(null!=t.model[r]){let o=0,a=t.model[r],s=e.search.query.toLowerCase();if("columns"===r)i.each(a,(function(e){if(e.name){let t=e.name.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}}));else if("tags"===r)i.each(a,(function(e){let t=e.toLowerCase(),n=0;for(;-1!=n;)n=t.indexOf(s,n),-1!=n&&(o++,n++)}));else{a=a.toLowerCase();let e=0;for(;-1!=e;)e=a.indexOf(s,e),-1!=e&&(o++,e++)}t.overallWeight+=o*n[r]}}))})),t}(r.search(t))})),r.init(),r.ready((function(t){e.project=t,e.search.results=r.search("");var o=i.unique(i.pluck(i.values(t.nodes),"package_name")).sort(),a=[null];i.each(t.nodes,(function(e){var t=e.tags;a=i.union(a,t).sort()})),l.init({packages:o,tags:a}),f(n.params.unique_id);var d=u.parseState(n.params);d.show_graph&&s.ready((function(){i.assign(l.selection.dirty,d.selected);var e=l.updateSelection();s.updateGraph(e)}));var p=t.metadata||{};c.init({track:p.send_anonymous_usage_stats,project_id:p.project_id})}))}])},function(e,t){e.exports="data:image/svg+xml,%3Csvg width='242' height='90' viewBox='0 0 242 90' fill='none' xmlns='http://www.w3.org/2000/svg'%3E %3Cpath d='M240.384 74.5122L239.905 75.8589H239.728L239.249 74.5156V75.8589H238.941V74.0234H239.324L239.816 75.3872L240.309 74.0234H240.691V75.8589H240.384V74.5122ZM238.671 74.3003H238.169V75.8589H237.858V74.3003H237.352V74.0234H238.671V74.3003Z' fill='%23262A38'/%3E %3Cpath d='M154.123 13.915V75.3527H141.672V69.0868C140.37 71.2839 138.499 73.0742 136.22 74.2134C133.779 75.434 131.012 76.085 128.246 76.085C124.828 76.1664 121.41 75.1899 118.562 73.2369C115.633 71.2839 113.354 68.5986 111.889 65.425C110.262 61.7631 109.448 57.8572 109.529 53.8698C109.448 49.8825 110.262 45.9765 111.889 42.3961C113.354 39.3038 115.633 36.6185 118.481 34.7469C121.41 32.8753 124.828 31.9801 128.246 32.0615C130.931 32.0615 133.616 32.6311 135.976 33.8517C138.255 34.991 140.126 36.6999 141.428 38.8156V18.0651L154.123 13.915ZM139.15 63.2279C140.777 61.1121 141.672 58.0199 141.672 54.0326C141.672 50.0452 140.859 47.0344 139.15 44.9187C137.441 42.8029 134.755 41.5823 131.989 41.6637C129.222 41.5009 126.537 42.7215 124.746 44.8373C123.038 46.953 122.142 49.9639 122.142 53.8698C122.142 57.8572 123.038 60.9494 124.746 63.1465C126.455 65.3436 129.222 66.5642 131.989 66.4828C135.081 66.4828 137.522 65.3436 139.15 63.2279Z' fill='%23262A38'/%3E %3Cpath d='M198.635 34.6655C201.564 36.5371 203.843 39.2225 205.226 42.3147C206.853 45.8952 207.667 49.8011 207.586 53.7885C207.667 57.7758 206.853 61.7632 205.226 65.3436C203.761 68.5172 201.483 71.2026 198.553 73.1556C195.705 75.0272 192.287 76.0037 188.87 75.9223C186.103 76.0037 183.336 75.3527 180.895 74.0507C178.617 72.9114 176.745 71.1212 175.524 68.9241V75.2713H162.993V18.0651L175.606 13.915V38.9783C176.826 36.7812 178.698 34.991 180.976 33.8517C183.418 32.5498 186.103 31.8988 188.87 31.9801C192.287 31.8988 195.705 32.8753 198.635 34.6655ZM192.45 63.1465C194.159 60.9494 194.973 57.8572 194.973 53.7885C194.973 49.8825 194.159 46.8716 192.45 44.7559C190.741 42.6402 188.381 41.5823 185.289 41.5823C182.523 41.4196 179.837 42.6402 178.047 44.8373C176.338 47.0344 175.524 50.0452 175.524 53.9512C175.524 57.9386 176.338 61.0308 178.047 63.1465C179.756 65.3436 182.441 66.5642 185.289 66.4015C188.056 66.5642 190.741 65.3436 192.45 63.1465Z' fill='%23262A38'/%3E %3Cpath d='M225 42.4774V58.915C225 61.2749 225.651 62.9838 226.791 64.0416C228.093 65.1809 229.801 65.7505 231.592 65.6691C232.975 65.6691 234.44 65.425 235.742 65.0995V74.8644C233.382 75.6782 230.941 76.085 228.499 76.0037C223.292 76.0037 219.304 74.5389 216.537 71.6094C213.771 68.68 212.387 64.5299 212.387 59.1592V23.1103L225 19.0416V33.038H235.742V42.4774H225Z' fill='%23262A38'/%3E %3Cpath d='M86.1754 3.74322C88.2911 5.77758 89.6745 8.46293 90 11.3924C90 12.613 89.6745 13.4268 88.9421 14.9729C88.2098 16.519 79.1772 32.1429 76.4919 36.4557C74.9458 38.9783 74.132 41.9892 74.132 44.9186C74.132 47.9295 74.9458 50.859 76.4919 53.3816C79.1772 57.6944 88.2098 73.3996 88.9421 74.9457C89.6745 76.4919 90 77.2242 90 78.4448C89.6745 81.3743 88.3725 84.0597 86.2568 86.0127C84.2224 88.1284 81.5371 89.5118 78.689 89.7559C77.4684 89.7559 76.6546 89.4304 75.1899 88.698C73.7251 87.9656 57.7758 79.1772 53.4629 76.4919C53.1374 76.3291 52.8119 76.085 52.4051 75.9222L31.085 63.3092C31.5732 67.3779 33.3635 71.2839 36.2929 74.132C36.8626 74.7016 37.4322 75.1899 38.0832 75.6781C37.5949 75.9222 37.0253 76.1664 36.5371 76.4919C32.2242 79.1772 16.519 88.2098 14.9729 88.9421C13.4268 89.6745 12.6944 90 11.3924 90C8.46293 89.6745 5.77758 88.3725 3.82459 86.2568C1.70886 84.2224 0.325497 81.5371 0 78.6076C0.0813743 77.387 0.406872 76.1664 1.05787 75.1085C1.79024 73.5624 10.8228 57.8571 13.5081 53.5443C15.0542 51.0217 15.868 48.0922 15.868 45.0814C15.868 42.0705 15.0542 39.141 13.5081 36.6184C10.8228 32.1429 1.70886 16.4376 1.05787 14.8915C0.406872 13.8336 0.0813743 12.613 0 11.3924C0.325497 8.46293 1.62749 5.77758 3.74322 3.74322C5.77758 1.62749 8.46293 0.325497 11.3924 0C12.613 0.0813743 13.8336 0.406872 14.9729 1.05787C16.2749 1.62749 27.7486 8.30018 33.8517 11.8807L35.2351 12.6944C35.7233 13.0199 36.1302 13.264 36.4557 13.4268L37.1067 13.8336L58.8336 26.6908C58.3454 21.8083 55.8228 17.3327 51.9168 14.3219C52.4051 14.0778 52.9747 13.8336 53.4629 13.5081C57.7758 10.8228 73.481 1.70886 75.0271 1.05787C76.085 0.406872 77.3056 0.0813743 78.6076 0C81.4557 0.325497 84.1411 1.62749 86.1754 3.74322ZM46.1392 50.7776L50.7776 46.1392C51.4286 45.4882 51.4286 44.5118 50.7776 43.8608L46.1392 39.2224C45.4882 38.5714 44.5118 38.5714 43.8608 39.2224L39.2224 43.8608C38.5714 44.5118 38.5714 45.4882 39.2224 46.1392L43.8608 50.7776C44.4304 51.3472 45.4882 51.3472 46.1392 50.7776Z' fill='%23FF694A'/%3E %3C/svg%3E"},function(e,t,n){"use strict";n.r(t);var r=n(63),i=n.n(r);n(460),n(461),n(462),n(463),n(465);const o=n(9),a=(n(31),n(21));window.Prism=i.a,o.module("dbt").factory("code",["$sce",function(e){var t={copied:!1,highlight:function(t,n="sql"){if("sql"==n)var r=i.a.highlight(t,i.a.languages.sql,"sql");else if("python"==n)r=i.a.highlight(t,i.a.languages.python,"python");return e.trustAsHtml(r)},copy_to_clipboard:function(e){var t=document.createElement("textarea");t.value=e,t.setAttribute("readonly",""),t.style.position="absolute",t.style.left="-9999px",document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)},generateSourceSQL:function(e){var t=["select"],n=a.size(e.columns),r=a.keys(e.columns);a.each(r,(function(e,r){var i=" "+e;r+1!=n&&(i+=","),t.push(i)}));const i=(e.database?e.database+".":"")+e.schema+"."+e.identifier;return t.push("from "+i),t.join("\n")},generateMetricSQL:function(e){if("derived"==e.calculation_method)return"-- derived\n"+e.expression;const t=[`select ${e.calculation_method}(${e.expression})`,`from {{ ${e.model} }}`];if(e.filters.length>0){const n=e.filters.map(e=>`${e.field} ${e.operator} ${e.value}`).join(" AND ");t.push("where "+n)}return t.join("\n")}};return t}])},function(e,t){Prism.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},identifier:{pattern:/(^|[^@\\])`(?:\\[\s\S]|[^`\\]|``)*`/,greedy:!0,lookbehind:!0,inside:{punctuation:/^`|`$/}},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:COL|_INSERT)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:ING|S)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:FALSE|NULL|TRUE)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|ILIKE|IN|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}},function(e,t){Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},function(e,t){!function(){if("undefined"!=typeof Prism&&"undefined"!=typeof document){var e=/\n(?!$)/g,t=Prism.plugins.lineNumbers={getLine:function(e,t){if("PRE"===e.tagName&&e.classList.contains("line-numbers")){var n=e.querySelector(".line-numbers-rows");if(n){var r=parseInt(e.getAttribute("data-start"),10)||1,i=r+(n.children.length-1);ti&&(t=i);var o=t-r;return n.children[o]}}},resize:function(e){r([e])},assumeViewportIndependence:!0},n=void 0;window.addEventListener("resize",(function(){t.assumeViewportIndependence&&n===window.innerWidth||(n=window.innerWidth,r(Array.prototype.slice.call(document.querySelectorAll("pre.line-numbers"))))})),Prism.hooks.add("complete",(function(t){if(t.code){var n=t.element,i=n.parentNode;if(i&&/pre/i.test(i.nodeName)&&!n.querySelector(".line-numbers-rows")&&Prism.util.isActive(n,"line-numbers")){n.classList.remove("line-numbers"),i.classList.add("line-numbers");var o,a=t.code.match(e),s=a?a.length+1:1,l=new Array(s+1).join("");(o=document.createElement("span")).setAttribute("aria-hidden","true"),o.className="line-numbers-rows",o.innerHTML=l,i.hasAttribute("data-start")&&(i.style.counterReset="linenumber "+(parseInt(i.getAttribute("data-start"),10)-1)),t.element.appendChild(o),r([i]),Prism.hooks.run("line-numbers",t)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function r(t){if(0!=(t=t.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=t.map((function(t){var n=t.querySelector("code"),r=t.querySelector(".line-numbers-rows");if(n&&r){var i=t.querySelector(".line-numbers-sizer"),o=n.textContent.split(e);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:t,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i code {\n\tposition: relative;\n\twhite-space: inherit;\n}\n\n.line-numbers .line-numbers-rows {\n\tposition: absolute;\n\tpointer-events: none;\n\ttop: 0;\n\tfont-size: 100%;\n\tleft: -3.8em;\n\twidth: 3em; /* works for line-numbers below 1000 lines */\n\tletter-spacing: -1px;\n\tborder-right: 1px solid #999;\n\n\t-webkit-user-select: none;\n\t-moz-user-select: none;\n\t-ms-user-select: none;\n\tuser-select: none;\n\n}\n\n\t.line-numbers-rows > span {\n\t\tdisplay: block;\n\t\tcounter-increment: linenumber;\n\t}\n\n\t\t.line-numbers-rows > span:before {\n\t\t\tcontent: counter(linenumber);\n\t\t\tcolor: #999;\n\t\t\tdisplay: block;\n\t\t\tpadding-right: 0.8em;\n\t\t\ttext-align: right;\n\t\t}\n',""])},function(e,t,n){var r=n(466);"string"==typeof r&&(r=[[e.i,r,""]]);var i={hmr:!0,transform:void 0,insertInto:void 0};n(40)(r,i);r.locals&&(e.exports=r.locals)},function(e,t,n){(e.exports=n(39)(!1)).push([e.i,'/**\n * GHColors theme by Avi Aryan (http://aviaryan.in)\n * Inspired by Github syntax coloring\n */\n\ncode[class*="language-"],\npre[class*="language-"] {\n\tcolor: #393A34;\n\tfont-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace;\n\tdirection: ltr;\n\ttext-align: left;\n\twhite-space: pre;\n\tword-spacing: normal;\n\tword-break: normal;\n\tfont-size: .9em;\n\tline-height: 1.2em;\n\n\t-moz-tab-size: 4;\n\t-o-tab-size: 4;\n\ttab-size: 4;\n\n\t-webkit-hyphens: none;\n\t-moz-hyphens: none;\n\t-ms-hyphens: none;\n\thyphens: none;\n}\n\npre > code[class*="language-"] {\n\tfont-size: 1em;\n}\n\npre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,\ncode[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {\n\tbackground: #b3d4fc;\n}\n\npre[class*="language-"]::selection, pre[class*="language-"] ::selection,\ncode[class*="language-"]::selection, code[class*="language-"] ::selection {\n\tbackground: #b3d4fc;\n}\n\n/* Code blocks */\npre[class*="language-"] {\n\tpadding: 1em;\n\tmargin: .5em 0;\n\toverflow: auto;\n\tborder: 1px solid #dddddd;\n\tbackground-color: white;\n}\n\n/* Inline code */\n:not(pre) > code[class*="language-"] {\n\tpadding: .2em;\n\tpadding-top: 1px;\n\tpadding-bottom: 1px;\n\tbackground: #f8f8f8;\n\tborder: 1px solid #dddddd;\n}\n\n.token.comment,\n.token.prolog,\n.token.doctype,\n.token.cdata {\n\tcolor: #999988;\n\tfont-style: italic;\n}\n\n.token.namespace {\n\topacity: .7;\n}\n\n.token.string,\n.token.attr-value {\n\tcolor: #e3116c;\n}\n\n.token.punctuation,\n.token.operator {\n\tcolor: #393A34; /* no highlight */\n}\n\n.token.entity,\n.token.url,\n.token.symbol,\n.token.number,\n.token.boolean,\n.token.variable,\n.token.constant,\n.token.property,\n.token.regex,\n.token.inserted {\n\tcolor: #36acaa;\n}\n\n.token.atrule,\n.token.keyword,\n.token.attr-name,\n.language-autohotkey .token.selector {\n\tcolor: #00a4db;\n}\n\n.token.function,\n.token.deleted,\n.language-autohotkey .token.tag {\n\tcolor: #9a050f;\n}\n\n.token.tag,\n.token.selector,\n.language-autohotkey .token.keyword {\n\tcolor: #00009f;\n}\n\n.token.important,\n.token.function,\n.token.bold {\n\tfont-weight: bold;\n}\n\n.token.italic {\n\tfont-style: italic;\n}\n',""])},function(e,t,n){n(31);const r=n(21),i=n(148),o=n(203),a=n(468);angular.module("dbt").factory("graph",["$state","$window","$q","selectorService","project","locationService",function(e,t,n,s,l,c){var u={vertical:{userPanningEnabled:!1,boxSelectionEnabled:!1,maxZoom:1.5},horizontal:{userPanningEnabled:!0,boxSelectionEnabled:!1,maxZoom:1,minZoom:.05}},d={none:{name:"null"},left_right:{name:"dagre",rankDir:"LR",rankSep:200,edgeSep:30,nodeSep:50},top_down:{name:"preset",positions:function(t){var n=e.params.unique_id;if(!n)return{x:0,y:0};var a=f.graph.pristine.dag,s=r.sortBy(o.ancestorNodes(a,n,1)),l=r.sortBy(o.descendentNodes(a,n,1)),c=r.partial(r.includes,s),u=r.partial(r.includes,l),d=a.filterNodes(c),p=a.filterNodes(u);return function(e,t,n,i){console.log("Getting position for ",i,". Primary: ",e);var o,a=100/(1+Math.max(t.length,n.length));if(e==i)return{x:0,y:0};if(r.includes(t,i))o={set:t,index:r.indexOf(t,i),factor:-1,type:"parent"};else{if(!r.includes(n,i))return{x:0,y:0};o={set:n,index:r.indexOf(n,i),factor:1,type:"child"}}var s=o.set.length;if("parent"==o.type)var l={x:(0+o.index)*a,y:-200-100*(s-o.index-1)};else l={x:(0+o.index)*a,y:200+100*(s-o.index-1)};return l}(n,i.alg.topsort(d),i.alg.topsort(p).reverse(),t.data("id"))}}},f={loading:!0,loaded:n.defer(),graph_element:null,orientation:"sidebar",expanded:!1,graph:{options:u.vertical,pristine:{nodes:{},edges:{},dag:null},elements:[],layout:d.none,style:[{selector:"edge.vertical",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#027599","arrow-scale":1.5,"line-color":"#027599",width:3,"target-distance-from-node":"5px","source-endpoint":"0% 50%","target-endpoint":"0deg"}},{selector:"edge.horizontal",style:{"curve-style":"unbundled-bezier","target-arrow-shape":"triangle-backcurve","target-arrow-color":"#006f8a","arrow-scale":1.5,"target-distance-from-node":"10px","source-distance-from-node":"5px","line-color":"#006f8a",width:3,"source-endpoint":"50% 0%","target-endpoint":"270deg"}},{selector:"edge[selected=1]",style:{"line-color":"#bd6bb6","target-arrow-color":"#bd6bb6","z-index":1}},{selector:'node[display="none"]',style:{display:"none"}},{selector:"node.vertical",style:{"text-margin-x":"5px","background-color":"#0094b3","font-size":"16px",shape:"ellipse",color:"#fff",width:"5px",height:"5px",padding:"5px",content:"data(label)","font-weight":300,"text-valign":"center","text-halign":"right"}},{selector:"node.horizontal",style:{"background-color":"#0094b3","font-size":"24px",shape:"roundrectangle",color:"#fff",width:"label",height:"label",padding:"12px",content:"data(label)","font-weight":300,"font-family":'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Fira Sans", "Droid Sans", "Helvetica Neue", Helvetica, Arial, sans-serif',"text-valign":"center","text-halign":"center",ghost:"yes","ghost-offset-x":"2px","ghost-offset-y":"4px","ghost-opacity":.5,"text-outline-color":"#000","text-outline-width":"1px","text-outline-opacity":.2}},{selector:'node[resource_type="source"]',style:{"background-color":"#5fb825"}},{selector:'node[resource_type="exposure"]',style:{"background-color":"#ff694b"}},{selector:'node[resource_type="metric"]',style:{"background-color":"#ff5688"}},{selector:'node[language="python"]',style:{"background-color":"#6a5acd"}},{selector:"node[node_color]",style:{"background-color":"data(node_color)"}},{selector:"node[selected=1]",style:{"background-color":"#bd6bb6"}},{selector:"node.horizontal[selected=1]",style:{"background-color":"#88447d"}},{selector:"node.horizontal.dirty",style:{"background-color":"#919599"}},{selector:"node[hidden=1]",style:{"background-color":"#919599","background-opacity":.5}}],ready:function(e){console.log("graph ready")}}};function p(e,t,n){var i=r.map(e,(function(e){return f.graph.pristine.nodes[e]})),o=[];r.flatten(r.each(e,(function(t){var n=f.graph.pristine.edges[t];r.each(n,(function(t){r.includes(e,t.data.target)&&r.includes(e,t.data.source)&&o.push(t)}))})));var s=r.compact(i).concat(r.compact(o));return r.each(f.graph.elements,(function(e){e.data.display="none",e.data.selected=0,e.data.hidden=0,e.classes=n})),r.each(s,(function(e){e.data.display="element",e.classes=n,t&&r.includes(t,e.data.unique_id)&&(e.data.selected=1),r.get(e,["data","docs","show"],!0)||(e.data.hidden=1);var i=r.get(e,["data","docs","node_color"]);i&&a.isValidColor(i)&&(e.data.node_color=i)})),f.graph.elements=r.filter(s,(function(e){return"element"==e.data.display})),e}function h(e,t,n){var r=f.graph.pristine.dag;if(r){var i=f.graph.pristine.nodes,o=s.selectNodes(r,i,e),a=n?o.matched:[];return p(o.selected,a,t)}}return f.setGraphReady=function(e){f.loading=!1,f.loaded.resolve(),f.graph_element=e},f.ready=function(e){f.loaded.promise.then((function(){e(f)}))},f.manifest={},f.packages=[],f.selected_node=null,f.getCanvasHeight=function(){return.8*t.innerHeight+"px"},l.ready((function(e){f.manifest=e,f.packages=r.uniq(r.map(f.manifest.nodes,"package_name")),r.each(r.filter(f.manifest.nodes,(function(e){var t=r.includes(["model","seed","source","snapshot","analysis","exposure","metric","operation"],e.resource_type),n="test"==e.resource_type&&!e.hasOwnProperty("test_metadata");return t||n})),(function(e){var t={group:"nodes",data:r.assign(e,{parent:e.package_name,id:e.unique_id,is_group:"false"})};f.graph.pristine.nodes[e.unique_id]=t})),r.each(f.manifest.parent_map,(function(e,t){r.each(e,(function(e){var n=f.manifest.nodes[e],i=f.manifest.nodes[t];if(r.includes(["model","source","seed","snapshot","metric"],n.resource_type)&&("test"!=i.resource_type||!i.hasOwnProperty("test_metadata"))){var o=n.unique_id+"|"+i.unique_id,a={group:"edges",data:{source:n.unique_id,target:i.unique_id,unique_id:o}},s=i.unique_id;f.graph.pristine.edges[s]||(f.graph.pristine.edges[s]=[]),f.graph.pristine.edges[s].push(a)}}))}));var t=new i.Graph({directed:!0});r.each(f.graph.pristine.nodes,(function(e){t.setNode(e.data.unique_id,e.data.name)})),r.each(f.graph.pristine.edges,(function(e){r.each(e,(function(e){t.setEdge(e.data.source,e.data.target)}))})),f.graph.pristine.dag=t,f.graph.elements=r.flatten(r.values(f.graph.pristine.nodes).concat(r.values(f.graph.pristine.edges))),p(t.nodes())})),f.hideGraph=function(){f.orientation="sidebar",f.expanded=!1},f.showVerticalGraph=function(e,t){f.orientation="sidebar",t&&(f.expanded=!0);var n=h(r.assign({},s.options,{include:"+"+e+"+",exclude:"",hops:1}),"vertical",!0);return f.graph.layout=d.top_down,f.graph.options=u.vertical,n},f.showFullGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=r.assign({},s.options);e?(t.include="+"+e+"+",t.exclude=""):(t.include="",t.exclude="");var n=h(t,"horizontal",!0);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(t),n},f.updateGraph=function(e){f.orientation="fullscreen",f.expanded=!0;var t=h(e,"horizontal",!1);return f.graph.layout=d.left_right,f.graph.options=u.horizontal,c.setState(e),t},f.deselectNodes=function(){"fullscreen"==f.orientation&&f.graph_element.elements().data("selected",0)},f.selectNode=function(e){if("fullscreen"==f.orientation){f.graph.pristine.nodes[e];var t=f.graph.pristine.dag,n=r.indexBy(o.ancestorNodes(t,e)),i=r.indexBy(o.descendentNodes(t,e));n[e]=e,i[e]=e;var a=f.graph_element;r.each(f.graph.elements,(function(t){var r=a.$id(t.data.id);n[t.data.source]&&n[t.data.target]||i[t.data.source]&&i[t.data.target]||t.data.unique_id==e?r.data("selected",1):r.data("selected",0)}))}},f.markDirty=function(e){f.markAllClean(),r.each(e,(function(e){f.graph_element.$id(e).addClass("dirty")}))},f.markAllClean=function(){f.graph_element&&f.graph_element.elements().removeClass("dirty")},f}])},function(e,t,n){"use strict";n.r(t),n.d(t,"isValidColor",(function(){return i}));const r=new Set(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","ghostwhite","gold","goldenrod","gray","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"]);function i(e){if(!e)return!1;const t=e.trim().toLowerCase();if(""===t)return!1;const n=t.match(/^#([A-Fa-f0-9]{3}){1,2}$/),i=r.has(t);return Boolean(n)||i}},function(e,t,n){n(31);const r=n(21),i=n(470);angular.module("dbt").factory("selectorService",["$state",function(e){var t={include:"",exclude:"",packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"],depth:1},n={view_node:null,selection:{clean:r.clone(t),dirty:r.clone(t)},options:{packages:[],tags:[null],resource_types:["model","seed","snapshot","source","test","analysis","exposure","metric"]},init:function(e){r.each(e,(function(e,r){n.options[r]=e,t[r]=e,n.selection.clean[r]=e,n.selection.dirty[r]=e}))},resetSelection:function(e){var i={include:e&&r.includes(["model","seed","snapshot"],e.resource_type)?"+"+e.name+"+":e&&"source"==e.resource_type?"+source:"+e.source_name+"."+e.name+"+":e&&"exposure"==e.resource_type?"+exposure:"+e.name:e&&"metric"==e.resource_type?"+metric:"+e.name:e&&r.includes(["analysis","test"],e.resource_type)?"+"+e.name:""},o=r.assign({},t,i);n.selection.clean=r.clone(o),n.selection.dirty=r.clone(o),n.view_node=e},getViewNode:function(){return n.view_node},excludeNode:function(e,t){var r,i=n.selection.dirty.exclude,o=t.parents?"+":"",a=t.children?"+":"",s=i.length>0?" ":"";"source"==e.resource_type?(o+="source:",r=e.source_name+"."+e.name):["exposure","metric"].indexOf(e.resource_type)>-1?(o+=e.resource_type+":",r=e.name):r=e.name;var l=i+s+o+r+a;return n.selection.dirty.exclude=l,n.updateSelection()},selectSource:function(e,t){var r="source:"+e+(t.children?"+":"");return n.selection.dirty.include=r,n.updateSelection()},clearViewNode:function(){n.view_node=null},isDirty:function(){return!r.isEqual(n.selection.clean,n.selection.dirty)},updateSelection:function(){return n.selection.clean=r.clone(n.selection.dirty),n.selection.clean},selectNodes:function(e,t,n){return i.selectNodes(e,t,n)}};return n}])},function(e,t,n){const r=n(21),i=n(471);function o(e,t){return t||(t=" "),r.filter(r.uniq(e.split(t)),(function(e){return e.length>0}))}function a(e){var t={raw:e,select_at:!1,select_children:!1,children_depth:null,select_parents:!1,parents_depth:null};const n=new RegExp(""+/^/.source+/(?(\@))?/.source+/(?((?(\d*))\+))?/.source+/((?([\w.]+)):)?/.source+/(?(.*?))/.source+/(?(\+(?(\d*))))?/.source+/$/.source).exec(e).groups;t.select_at="@"==n.childs_parents,t.select_parents=!!n.parents,t.select_children=!!n.children,n.parents_depth&&(t.parents_depth=parseInt(n.parents_depth)),n.children_depth&&(t.children_depth=parseInt(n.children_depth));var r=n.method,i=n.value;return r?-1!=r.indexOf(".")&&([r,selector_modifier]=r.split(".",2),i={config:selector_modifier,value:i}):r="implicit",t.selector_type=r,t.selector_value=i,t}function s(e){var t=o(e," ");return r.map(t,(function(e){var t=o(e,",");return t.length>1?{method:"intersect",selectors:r.map(t,a)}:{method:"none",selectors:r.map([e],a)}}))}function l(e,t){var n=s(e),i=null,o=null;return r.each(n,(function(e){var n="intersect"==e.method?r.intersection:r.union;r.each(e.selectors,(function(e){var r=t(e);null===i?(i=r.matched,o=r.selected):(i=n(i,r.matched),o=n(o,r.selected))}))})),{matched:i||[],selected:o||[]}}e.exports={splitSpecs:o,parseSpec:a,parseSpecs:s,buildSpec:function(e,t,n){return{include:s(e),exclude:s(t),hops:n}},applySpec:l,selectNodes:function(e,t,n){n.include,n.exclude;var o,a=r.partial(i.getNodesFromSpec,e,t,n.hops);r.values(t),o=0==n.include.trim().length?{selected:e.nodes(),matched:[]}:l(n.include,a);var s=l(n.exclude,a),c=o.selected,u=o.matched;c=r.difference(c,s.selected),u=r.difference(u,s.matched);var d=[];return r.each(c,(function(e){var i=t[e];i.data.tags||(i.data.tags=[]);var o=r.includes(n.packages,i.data.package_name),a=r.intersection(n.tags,i.data.tags).length>0,s=r.includes(n.tags,null)&&0==i.data.tags.length,l=r.includes(n.resource_types,i.data.resource_type);o&&(a||s)&&l||d.push(i.data.unique_id)})),{selected:r.difference(c,d),matched:r.difference(u,d)}}}},function(e,t,n){const r=n(21),i=n(203);var o="fqn",a="tag",s="source",l="exposure",c="metric",u="path",d="file",f="package",p="config",h="test_name",g="test_type",m={};function v(e,t){if(t===r.last(e))return!0;var n=e.reduce((e,t)=>e.concat(t.split(".")),[]),i=t.split(".");if(n.length-1||!r.hasOwnProperty("test_metadata")&&["data","singular"].indexOf(t)>-1)&&n.push(r)})),n}function $(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("source"==r.resource_type){var i,o,a=r.source_name,s=r.name;-1!=t.indexOf(".")?[i,o]=t.split(".",2):(i=t,o=null),("*"==i||i==a&&"*"===o||i==a&&o===s||i==a&&null===o)&&n.push(e.data)}})),n}m["implicit"]=function(e,t){var n=b(e,t),i=y(e,t),o=[];t.toLowerCase().endsWith(".sql")&&(o=x(e,t));var a=r.uniq([].concat(r.map(n,"unique_id"),r.map(i,"unique_id"),r.map(o,"unique_id")));return r.map(a,t=>e[t].data)},m[o]=b,m[a]=w,m[s]=$,m[l]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("exposure"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[c]=function(e,t){var n=[];return r.each(e,(function(e){var r=e.data;if("metric"==r.resource_type){var i=r.name;("*"==t||t==i)&&n.push(e.data)}})),n},m[u]=y,m[d]=x,m[f]=k,m[p]=A,m[h]=E,m[g]=S,e.exports={isFQNMatch:v,getNodesByFQN:b,getNodesByTag:w,getNodesBySource:$,getNodesByPath:y,getNodesByPackage:k,getNodesByConfig:A,getNodesByTestName:E,getNodesByTestType:S,getNodesFromSpec:function(e,t,n,o){const a=m[o.selector_type];if(!a)return console.log("Node matcher for selector",o.selector_type,"is invalid"),{selected:[],matched:[]};var s=a(t,o.selector_value),l=[],c=[];return r.each(s,(function(t){var a=t.unique_id;c.push(t.unique_id);var s=[],u=[],d=[];if(o.select_at&&(d=r.union(i.selectAt(e,a))),o.select_parents){var f=n||o.parents_depth;s=i.ancestorNodes(e,a,f)}if(o.select_children){f=n||o.children_depth;u=i.descendentNodes(e,a,f)}l=r.union([a],l,u,s,d)})),{selected:l,matched:c}}}},function(e,t,n){const r=n(9);n(473);r.module("dbt").factory("trackingService",["$location","selectorService","$rootScope",function(e,t,n){var r={initialized:!1,snowplow:null,project_id:null,init:function(e){r.initialized||(r.initialized=!0,r.project_id=e.project_id,!0===e.track&&r.turn_on_tracking())},isHosted:function(){return window.location.hostname.indexOf(".getdbt.com")>-1},turn_on_tracking:function(){var e,t,n,i,o,a;e=window,t=document,n="script",e[i="snowplow"]||(e.GlobalSnowplowNamespace=e.GlobalSnowplowNamespace||[],e.GlobalSnowplowNamespace.push(i),e[i]=function(){(e[i].q=e[i].q||[]).push(arguments)},e[i].q=e[i].q||[],o=t.createElement(n),a=t.getElementsByTagName(n)[0],o.async=1,o.src="//d1fc8wv8zag5ca.cloudfront.net/2.9.0/sp.js",a.parentNode.insertBefore(o,a));var s={appId:"dbt-docs",forceSecureTracker:!0,respectDoNotTrack:!0,userFingerprint:!1,contexts:{webPage:!0}};r.isHosted()&&(s.cookieDomain=".getdbt.com"),r.snowplow=window.snowplow,r.snowplow("newTracker","sp","fishtownanalytics.sinter-collect.com",s),r.snowplow("enableActivityTracking",30,30),r.track_pageview()},fuzzUrls:function(){r.isHosted()||(r.snowplow("setCustomUrl","https://fuzzed.getdbt.com/"),r.snowplow("setReferrerUrl","https://fuzzed.getdbt.com/"))},getContext:function(){return[{schema:"iglu:com.dbt/dbt_docs/jsonschema/1-0-0",data:{is_cloud_hosted:r.isHosted(),core_project_id:r.project_id}}]},track_pageview:function(){if(r.snowplow){r.fuzzUrls();r.snowplow("trackPageView",null,r.getContext())}},track_event:function(e,t,n,i){r.snowplow&&(r.fuzzUrls(),r.snowplow("trackStructEvent","dbt-docs",e,t,n,i,r.getContext()))},track_graph_interaction:function(e,t){r.snowplow&&(r.fuzzUrls(),r.track_event("graph","interact",e,t))}};return r}])},function(e,t,n){var r,i,o,a,s;r=n(474),i=n(204).utf8,o=n(475),a=n(204).bin,(s=function(e,t){e.constructor==String?e=t&&"binary"===t.encoding?a.stringToBytes(e):i.stringToBytes(e):o(e)?e=Array.prototype.slice.call(e,0):Array.isArray(e)||e.constructor===Uint8Array||(e=e.toString());for(var n=r.bytesToWords(e),l=8*e.length,c=1732584193,u=-271733879,d=-1732584194,f=271733878,p=0;p>>24)|4278255360&(n[p]<<24|n[p]>>>8);n[l>>>5]|=128<>>9<<4)]=l;var h=s._ff,g=s._gg,m=s._hh,v=s._ii;for(p=0;p>>0,u=u+y>>>0,d=d+x>>>0,f=f+w>>>0}return r.endian([c,u,d,f])})._ff=function(e,t,n,r,i,o,a){var s=e+(t&n|~t&r)+(i>>>0)+a;return(s<>>32-o)+t},s._gg=function(e,t,n,r,i,o,a){var s=e+(t&r|n&~r)+(i>>>0)+a;return(s<>>32-o)+t},s._hh=function(e,t,n,r,i,o,a){var s=e+(t^n^r)+(i>>>0)+a;return(s<>>32-o)+t},s._ii=function(e,t,n,r,i,o,a){var s=e+(n^(t|~r))+(i>>>0)+a;return(s<>>32-o)+t},s._blocksize=16,s._digestsize=16,e.exports=function(e,t){if(null==e)throw new Error("Illegal argument "+e);var n=r.wordsToBytes(s(e,t));return t&&t.asBytes?n:t&&t.asString?a.bytesToString(n):r.bytesToHex(n)}},function(e,t){var n,r;n="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",r={rotl:function(e,t){return e<>>32-t},rotr:function(e,t){return e<<32-t|e>>>t},endian:function(e){if(e.constructor==Number)return 16711935&r.rotl(e,8)|4278255360&r.rotl(e,24);for(var t=0;t0;e--)t.push(Math.floor(256*Math.random()));return t},bytesToWords:function(e){for(var t=[],n=0,r=0;n>>5]|=e[n]<<24-r%32;return t},wordsToBytes:function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},bytesToHex:function(e){for(var t=[],n=0;n>>4).toString(16)),t.push((15&e[n]).toString(16));return t.join("")},hexToBytes:function(e){for(var t=[],n=0;n>>6*(3-o)&63)):t.push("=");return t.join("")},base64ToBytes:function(e){e=e.replace(/[^A-Z0-9+\/]/gi,"");for(var t=[],r=0,i=0;r>>6-2*i);return t}},e.exports=r},function(e,t){function n(e){return!!e.constructor&&"function"==typeof e.constructor.isBuffer&&e.constructor.isBuffer(e)} /*! * Determine if an object is a Buffer * From 987764858b81b95d8b93dcce67a4a9aace532ba3 Mon Sep 17 00:00:00 2001 From: leahwicz <60146280+leahwicz@users.noreply.github.com> Date: Thu, 17 Nov 2022 09:14:22 -0500 Subject: [PATCH 034/156] Revert "Bump python from 3.10.7-slim-bullseye to 3.11.0-slim-bullseye in /docker (#6180)" (#6281) This reverts commit 8e28f5906e60f7c97e874e9dd5962625267d336e. --- .changes/unreleased/Dependency-20221031-000329.yaml | 7 ------- docker/Dockerfile | 2 +- 2 files changed, 1 insertion(+), 8 deletions(-) delete mode 100644 .changes/unreleased/Dependency-20221031-000329.yaml diff --git a/.changes/unreleased/Dependency-20221031-000329.yaml b/.changes/unreleased/Dependency-20221031-000329.yaml deleted file mode 100644 index 6d19e098e3f..00000000000 --- a/.changes/unreleased/Dependency-20221031-000329.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: "Dependency" -body: "Bump python from 3.10.7-slim-bullseye to 3.11.0-slim-bullseye in /docker" -time: 2022-10-31T00:03:29.00000Z -custom: - Author: dependabot[bot] - Issue: 4904 - PR: 6180 diff --git a/docker/Dockerfile b/docker/Dockerfile index afda5e9ce72..8d3756ca786 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -9,7 +9,7 @@ ARG build_for=linux/amd64 ## # base image (abstract) ## -FROM --platform=$build_for python:3.11.0-slim-bullseye as base +FROM --platform=$build_for python:3.10.7-slim-bullseye as base # N.B. The refs updated automagically every release via bumpversion # N.B. dbt-postgres is currently found in the core codebase so a value of dbt-core@ is correct From 517576c088c6fa54c5fa510c455b924d735d63f5 Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Mon, 21 Nov 2022 23:20:55 -0600 Subject: [PATCH 035/156] add back in conditional node length check (#6298) --- core/dbt/graph/selector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 89de27b3697..13a3ae0a952 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -136,7 +136,7 @@ def select_nodes_recursively(self, spec: SelectionSpec) -> Tuple[Set[UniqueId], direct_nodes = self.incorporate_indirect_nodes(initial_direct, indirect_nodes) - if spec.expect_exists: + if spec.expect_exists and len(direct_nodes) == 0: warn_or_error(NoNodesForSelectionCriteria(spec_raw=str(spec.raw))) return direct_nodes, indirect_nodes From 7d7066466db4aa34ec89f4f75d039f57b65fbf1e Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Tue, 22 Nov 2022 14:54:20 -0500 Subject: [PATCH 036/156] CT 1537 fix event test and rename a couple of fields (#6293) * Rename MacroEvent to JinjaLog * Rename ConnectionClosed/2 * Fix LogSeedResult * Rename ConnectionLeftOpen events, fix test_events.py * Update events README.md, add "category" to EventInfo * Rename GeneralMacroWarning to JinjaLogWarning --- core/dbt/adapters/base/connections.py | 12 +- core/dbt/context/base.py | 6 +- core/dbt/events/README.md | 7 +- core/dbt/events/proto_types.py | 26 +- core/dbt/events/test_types.py | 15 - core/dbt/events/types.proto | 26 +- core/dbt/events/types.py | 371 +-------------------- core/dbt/exceptions.py | 4 +- core/dbt/task/seed.py | 1 + tests/unit/test_events.py | 446 ++++++++++++++------------ tests/unit/test_proto_events.py | 2 +- 11 files changed, 303 insertions(+), 613 deletions(-) diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py index 5fd3769aa74..ea7150a1b80 100644 --- a/core/dbt/adapters/base/connections.py +++ b/core/dbt/adapters/base/connections.py @@ -41,10 +41,10 @@ from dbt.events.types import ( NewConnection, ConnectionReused, + ConnectionLeftOpenInCleanup, ConnectionLeftOpen, - ConnectionLeftOpen2, + ConnectionClosedInCleanup, ConnectionClosed, - ConnectionClosed2, Rollback, RollbackFailed, ) @@ -306,9 +306,9 @@ def cleanup_all(self) -> None: with self.lock: for connection in self.thread_connections.values(): if connection.state not in {"closed", "init"}: - fire_event(ConnectionLeftOpen(conn_name=cast_to_str(connection.name))) + fire_event(ConnectionLeftOpenInCleanup(conn_name=cast_to_str(connection.name))) else: - fire_event(ConnectionClosed(conn_name=cast_to_str(connection.name))) + fire_event(ConnectionClosedInCleanup(conn_name=cast_to_str(connection.name))) self.close(connection) # garbage collect these connections @@ -345,10 +345,10 @@ def _close_handle(cls, connection: Connection) -> None: """Perform the actual close operation.""" # On windows, sometimes connection handles don't have a close() attr. if hasattr(connection.handle, "close"): - fire_event(ConnectionClosed2(conn_name=cast_to_str(connection.name))) + fire_event(ConnectionClosed(conn_name=cast_to_str(connection.name))) connection.handle.close() else: - fire_event(ConnectionLeftOpen2(conn_name=cast_to_str(connection.name))) + fire_event(ConnectionLeftOpen(conn_name=cast_to_str(connection.name))) @classmethod def _rollback(cls, connection: Connection) -> None: diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 68b5edb98c1..262ed45d3dc 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -16,7 +16,7 @@ disallow_secret_env_var, ) from dbt.events.functions import fire_event, get_invocation_id -from dbt.events.types import MacroEventInfo, MacroEventDebug +from dbt.events.types import JinjaLogInfo, JinjaLogDebug from dbt.version import __version__ as dbt_version # These modules are added to the context. Consider alternative @@ -557,9 +557,9 @@ def log(msg: str, info: bool = False) -> str: {% endmacro %}" """ if info: - fire_event(MacroEventInfo(msg=msg)) + fire_event(JinjaLogInfo(msg=msg)) else: - fire_event(MacroEventDebug(msg=msg)) + fire_event(JinjaLogDebug(msg=msg)) return "" @contextproperty diff --git a/core/dbt/events/README.md b/core/dbt/events/README.md index cadc59ab126..52edd7d35d4 100644 --- a/core/dbt/events/README.md +++ b/core/dbt/events/README.md @@ -8,9 +8,10 @@ The event module provides types that represent what is happening in dbt in `even When events are processed via `fire_event`, nearly everything is logged. Whether or not the user has enabled the debug flag, all debug messages are still logged to the file. However, some events are particularly time consuming to construct because they return a huge amount of data. Today, the only messages in this category are cache events and are only logged if the `--log-cache-events` flag is on. This is important because these messages should not be created unless they are going to be logged, because they cause a noticable performance degredation. These events use a "fire_event_if" functions. # Adding a New Event -New events need to have a proto message definition created in core/dbt/events/types.proto. Every message must include EventInfo as the first field, named "info" and numbered 1. To update the proto_types.py file, in the core/dbt/events directory: ```protoc --python_betterproto_out . types.proto``` - -A matching class needs to be created in the core/dbt/events/types.py file, which will have two superclasses, the "Level" mixin and the generated class from proto_types.py. These classes will also generally have two methods, a "code" method that returns the event code, and a "message" method that is used to construct the "msg" from the event fields. In addition the "Level" mixin will provide a "level_tag" method to set the level (which can also be overridden using the "info" convenience function from functions.py) +* Add a new message in types.proto with an EventInfo field first +* run the protoc compiler to update proto_types.py: ```protoc --python_betterproto_out . types.proto``` +* Add a wrapping class in core/dbt/event/types.py with a Level superclass and the superclass from proto_types.py, plus code and message methods +* Add the class to tests/unit/test_events.py Note that no attributes can exist in these event classes except for fields defined in the protobuf definitions, because the betterproto metaclass will throw an error. Betterproto provides a to_dict() method to convert the generated classes to a dictionary and from that to json. However some attributes will successfully convert to dictionaries but not to serialized protobufs, so we need to test both output formats. diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 09d29b5563b..c4d195e8777 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -23,6 +23,7 @@ class EventInfo(betterproto.Message): extra: Dict[str, str] = betterproto.map_field( 9, betterproto.TYPE_STRING, betterproto.TYPE_STRING ) + category: str = betterproto.string_field(10) @dataclass @@ -398,7 +399,7 @@ class ConnectionReused(betterproto.Message): @dataclass -class ConnectionLeftOpen(betterproto.Message): +class ConnectionLeftOpenInCleanup(betterproto.Message): """E007""" info: "EventInfo" = betterproto.message_field(1) @@ -406,7 +407,7 @@ class ConnectionLeftOpen(betterproto.Message): @dataclass -class ConnectionClosed(betterproto.Message): +class ConnectionClosedInCleanup(betterproto.Message): """E008""" info: "EventInfo" = betterproto.message_field(1) @@ -423,7 +424,7 @@ class RollbackFailed(betterproto.Message): @dataclass -class ConnectionClosed2(betterproto.Message): +class ConnectionClosed(betterproto.Message): """E010""" info: "EventInfo" = betterproto.message_field(1) @@ -431,7 +432,7 @@ class ConnectionClosed2(betterproto.Message): @dataclass -class ConnectionLeftOpen2(betterproto.Message): +class ConnectionLeftOpen(betterproto.Message): """E011""" info: "EventInfo" = betterproto.message_field(1) @@ -1242,7 +1243,7 @@ class NodeNotFoundOrDisabled(betterproto.Message): @dataclass -class GeneralMacroWarning(betterproto.Message): +class JinjaLogWarning(betterproto.Message): """I061""" info: "EventInfo" = betterproto.message_field(1) @@ -1334,7 +1335,7 @@ class SelectorReportInvalidSelector(betterproto.Message): @dataclass -class MacroEventInfo(betterproto.Message): +class JinjaLogInfo(betterproto.Message): """M011""" info: "EventInfo" = betterproto.message_field(1) @@ -1342,7 +1343,7 @@ class MacroEventInfo(betterproto.Message): @dataclass -class MacroEventDebug(betterproto.Message): +class JinjaLogDebug(betterproto.Message): """M012""" info: "EventInfo" = betterproto.message_field(1) @@ -1602,11 +1603,12 @@ class LogSeedResult(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) status: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - schema: str = betterproto.string_field(7) - relation: str = betterproto.string_field(8) + result_message: str = betterproto.string_field(4) + index: int = betterproto.int32_field(5) + total: int = betterproto.int32_field(6) + execution_time: float = betterproto.float_field(7) + schema: str = betterproto.string_field(8) + relation: str = betterproto.string_field(9) @dataclass diff --git a/core/dbt/events/test_types.py b/core/dbt/events/test_types.py index 5f4a10cd7d7..cf7307125ca 100644 --- a/core/dbt/events/test_types.py +++ b/core/dbt/events/test_types.py @@ -61,18 +61,3 @@ def code(self): def message(self) -> str: return f"Unit Test: {self.msg}" - - -# since mypy doesn't run on every file we need to suggest to mypy that every -# class gets instantiated. But we don't actually want to run this code. -# making the conditional `if False` causes mypy to skip it as dead code so -# we need to skirt around that by computing something it doesn't check statically. -# -# TODO remove these lines once we run mypy everywhere. -if 1 == 0: - IntegrationTestInfo(msg="") - IntegrationTestDebug(msg="") - IntegrationTestWarn(msg="") - IntegrationTestError(msg="") - IntegrationTestException(msg="") - UnitTestInfo(msg="") diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 8cafe71bd95..2666a3565e0 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -15,6 +15,7 @@ message EventInfo { string thread = 7; google.protobuf.Timestamp ts = 8; map extra = 9; + string category = 10; } // TimingInfo @@ -308,13 +309,13 @@ message ConnectionReused { } // E007 -message ConnectionLeftOpen { +message ConnectionLeftOpenInCleanup { EventInfo info = 1; string conn_name = 2; } // E008 -message ConnectionClosed { +message ConnectionClosedInCleanup { EventInfo info = 1; string conn_name = 2; } @@ -327,13 +328,13 @@ message RollbackFailed { } // E010 -message ConnectionClosed2 { +message ConnectionClosed { EventInfo info = 1; string conn_name = 2; } // E011 -message ConnectionLeftOpen2 { +message ConnectionLeftOpen { EventInfo info = 1; string conn_name = 2; } @@ -943,7 +944,7 @@ message NodeNotFoundOrDisabled { } // I061 -message GeneralMacroWarning { +message JinjaLogWarning { EventInfo info = 1; string msg = 2; } @@ -1015,13 +1016,13 @@ message SelectorReportInvalidSelector { } // M011 -message MacroEventInfo { +message JinjaLogInfo { EventInfo info = 1; string msg = 2; } // M012 -message MacroEventDebug { +message JinjaLogDebug { EventInfo info = 1; string msg = 2; } @@ -1227,11 +1228,12 @@ message LogSeedResult { EventInfo info = 1; NodeInfo node_info = 2; string status = 3; - int32 index = 4; - int32 total = 5; - float execution_time = 6; - string schema = 7; - string relation = 8; + string result_message = 4; + int32 index = 5; + int32 total = 6; + float execution_time = 7; + string schema = 8; + string relation = 9; } // Skipped Q017 diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index de738df9487..55439e0ec15 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -16,7 +16,7 @@ # The generated classes quote the included message classes, requiring the following line from dbt.events.proto_types import EventInfo, RunResultMsg, ListOfStrings # noqa -from dbt.events.proto_types import NodeInfo, ReferenceKeyMsg +from dbt.events.proto_types import NodeInfo, ReferenceKeyMsg # noqa from dbt.events import proto_types as pt from dbt.node_types import NodeType @@ -476,7 +476,7 @@ def message(self) -> str: @dataclass -class ConnectionLeftOpen(DebugLevel, pt.ConnectionLeftOpen): +class ConnectionLeftOpenInCleanup(DebugLevel, pt.ConnectionLeftOpenInCleanup): def code(self): return "E007" @@ -485,7 +485,7 @@ def message(self) -> str: @dataclass -class ConnectionClosed(DebugLevel, pt.ConnectionClosed): +class ConnectionClosedInCleanup(DebugLevel, pt.ConnectionClosedInCleanup): def code(self): return "E008" @@ -504,7 +504,7 @@ def message(self) -> str: # TODO: can we combine this with ConnectionClosed? @dataclass -class ConnectionClosed2(DebugLevel, pt.ConnectionClosed2): +class ConnectionClosed(DebugLevel, pt.ConnectionClosed): def code(self): return "E010" @@ -514,7 +514,7 @@ def message(self) -> str: # TODO: can we combine this with ConnectionLeftOpen? @dataclass -class ConnectionLeftOpen2(DebugLevel, pt.ConnectionLeftOpen2): +class ConnectionLeftOpen(DebugLevel, pt.ConnectionLeftOpen): def code(self): return "E011" @@ -1519,7 +1519,7 @@ def message(self) -> str: @dataclass -class GeneralMacroWarning(WarnLevel, pt.GeneralMacroWarning): +class JinjaLogWarning(WarnLevel, pt.JinjaLogWarning): def code(self): return "I061" @@ -1626,7 +1626,7 @@ def message(self) -> str: @dataclass -class MacroEventInfo(InfoLevel, EventStringFunctor, pt.MacroEventInfo): +class JinjaLogInfo(InfoLevel, EventStringFunctor, pt.JinjaLogInfo): def code(self): return "M011" @@ -1636,7 +1636,7 @@ def message(self) -> str: @dataclass -class MacroEventDebug(DebugLevel, EventStringFunctor, pt.MacroEventDebug): +class JinjaLogDebug(DebugLevel, EventStringFunctor, pt.JinjaLogDebug): def code(self): return "M012" @@ -1999,7 +1999,7 @@ def message(self) -> str: status = red(self.status.upper()) else: info = "OK loaded" - status = green(self.status) + status = green(self.result_message) msg = f"{info} seed file {self.schema}.{self.relation}" return format_fancy_output_line( msg=msg, @@ -2731,356 +2731,3 @@ def code(self): def message(self) -> str: # This is the message on the result object, cannot be formatted in event return self.msg - - -# since mypy doesn't run on every file we need to suggest to mypy that every -# class gets instantiated. But we don't actually want to run this code. -# making the conditional `if False` causes mypy to skip it as dead code so -# we need to skirt around that by computing something it doesn't check statically. -# -# TODO remove these lines once we run mypy everywhere. -if 1 == 0: - - # A - pre-project loading - MainReportVersion(version="") - MainReportArgs(args={}) - MainTrackingUserState(user_state="") - MergedFromState(num_merged=0, sample=[]) - MissingProfileTarget(profile_name="", target_name="") - InvalidVarsYAML() - DbtProjectError() - DbtProjectErrorException(exc="") - DbtProfileError() - DbtProfileErrorException(exc="") - ProfileListTitle() - ListSingleProfile(profile="") - NoDefinedProfiles() - ProfileHelpMessage() - StarterProjectPath(dir="") - ConfigFolderDirectory(dir="") - NoSampleProfileFound(adapter="") - ProfileWrittenWithSample(name="", path="") - ProfileWrittenWithTargetTemplateYAML(name="", path="") - ProfileWrittenWithProjectTemplateYAML(name="", path="") - SettingUpProfile() - InvalidProfileTemplateYAML() - ProjectNameAlreadyExists(name="") - ProjectCreated(project_name="") - - # D - Deprecations ====================== - PackageRedirectDeprecation(old_name="", new_name="") - PackageInstallPathDeprecation() - ConfigSourcePathDeprecation(deprecated_path="", exp_path="") - ConfigDataPathDeprecation(deprecated_path="", exp_path="") - AdapterDeprecationWarning(old_name="", new_name="") - MetricAttributesRenamed(metric_name="") - ExposureNameDeprecation(exposure="") - - # E - DB Adapter ====================== - AdapterEventDebug() - AdapterEventInfo() - AdapterEventWarning() - AdapterEventError() - NewConnection(conn_type="", conn_name="") - ConnectionReused(conn_name="") - ConnectionLeftOpen(conn_name="") - ConnectionClosed(conn_name="") - RollbackFailed(conn_name="") - ConnectionClosed2(conn_name="") - ConnectionLeftOpen2(conn_name="") - Rollback(conn_name="") - CacheMiss(conn_name="", database="", schema="") - ListRelations(database="", schema="") - ConnectionUsed(conn_type="", conn_name="") - SQLQuery(conn_name="", sql="") - SQLQueryStatus(status="", elapsed=0.1) - SQLCommit(conn_name="") - ColTypeChange( - orig_type="", new_type="", table=ReferenceKeyMsg(database="", schema="", identifier="") - ) - SchemaCreation(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - SchemaDrop(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - UncachedRelation( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - AddLink( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - AddRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - DropMissingRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")) - DropCascade( - dropped=ReferenceKeyMsg(database="", schema="", identifier=""), - consequences=[ReferenceKeyMsg(database="", schema="", identifier="")], - ) - DropRelation(dropped=ReferenceKeyMsg()) - UpdateReference( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - cached_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - TemporaryRelation(key=ReferenceKeyMsg(database="", schema="", identifier="")) - RenameSchema( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ) - DumpBeforeAddGraph(dump=dict()) - DumpAfterAddGraph(dump=dict()) - DumpBeforeRenameSchema(dump=dict()) - DumpAfterRenameSchema(dump=dict()) - AdapterImportError(exc="") - PluginLoadError(exc_info="") - NewConnectionOpening(connection_state="") - CodeExecution(conn_name="", code_content="") - CodeExecutionStatus(status="", elapsed=0.1) - CatalogGenerationError(exc="") - WriteCatalogFailure(num_exceptions=0) - CatalogWritten(path="") - CannotGenerateDocs() - BuildingCatalog() - DatabaseErrorRunningHook(hook_type="") - HooksRunning(num_hooks=0, hook_type="") - HookFinished(stat_line="", execution="", execution_time=0) - - # I - Project parsing ====================== - ParseCmdStart() - ParseCmdCompiling() - ParseCmdWritingManifest() - ParseCmdDone() - ManifestDependenciesLoaded() - ManifestLoaderCreated() - ManifestLoaded() - ManifestChecked() - ManifestFlatGraphBuilt() - ParseCmdPerfInfoPath(path="") - GenericTestFileParse(path="") - MacroFileParse(path="") - PartialParsingFullReparseBecauseOfError() - PartialParsingExceptionFile(file="") - PartialParsingFile(file_id="") - PartialParsingException(exc_info={}) - PartialParsingSkipParsing() - PartialParsingMacroChangeStartFullParse() - PartialParsingProjectEnvVarsChanged() - PartialParsingProfileEnvVarsChanged() - PartialParsingDeletedMetric(unique_id="") - ManifestWrongMetadataVersion(version="") - PartialParsingVersionMismatch(saved_version="", current_version="") - PartialParsingFailedBecauseConfigChange() - PartialParsingFailedBecauseProfileChange() - PartialParsingFailedBecauseNewProjectDependency() - PartialParsingFailedBecauseHashChanged() - PartialParsingNotEnabled() - ParsedFileLoadFailed(path="", exc="", exc_info="") - PartialParseSaveFileNotFound() - StaticParserCausedJinjaRendering(path="") - UsingExperimentalParser(path="") - SampleFullJinjaRendering(path="") - StaticParserFallbackJinjaRendering(path="") - StaticParsingMacroOverrideDetected(path="") - StaticParserSuccess(path="") - StaticParserFailure(path="") - ExperimentalParserSuccess(path="") - ExperimentalParserFailure(path="") - PartialParsingEnabled(deleted=0, added=0, changed=0) - PartialParsingAddedFile(file_id="") - PartialParsingDeletedFile(file_id="") - PartialParsingUpdatedFile(file_id="") - PartialParsingNodeMissingInSourceFile(file_id="") - PartialParsingMissingNodes(file_id="") - PartialParsingChildMapMissingUniqueID(unique_id="") - PartialParsingUpdateSchemaFile(file_id="") - PartialParsingDeletedSource(unique_id="") - PartialParsingDeletedExposure(unique_id="") - InvalidDisabledTargetInTestNode( - resource_type_title="", - unique_id="", - original_file_path="", - target_kind="", - target_name="", - target_package="", - ) - UnusedResourceConfigPath(unused_config_paths=[]) - SeedIncreased(package_name="", name="") - SeedExceedsLimitSamePath(package_name="", name="") - SeedExceedsLimitAndPathChanged(package_name="", name="") - SeedExceedsLimitChecksumChanged(package_name="", name="", checksum_name="") - UnusedTables(unused_tables=[]) - WrongResourceSchemaFile(patch_name="", resource_type="", file_path="", plural_resource_type="") - NoNodeForYamlKey(patch_name="", yaml_key="", file_path="") - MacroPatchNotFound(patch_name="") - NodeNotFoundOrDisabled( - original_file_path="", - unique_id="", - resource_type_title="", - target_name="", - target_kind="", - target_package="", - disabled="", - ) - - # M - Deps generation ====================== - - GitSparseCheckoutSubdirectory(subdir="") - GitProgressCheckoutRevision(revision="") - GitProgressUpdatingExistingDependency(dir="") - GitProgressPullingNewDependency(dir="") - GitNothingToDo(sha="") - GitProgressUpdatedCheckoutRange(start_sha="", end_sha="") - GitProgressCheckedOutAt(end_sha="") - RegistryProgressGETRequest(url="") - RegistryProgressGETResponse(url="", resp_code=1234) - SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec="") - MacroEventInfo(msg="") - MacroEventDebug(msg="") - DepsNoPackagesFound() - DepsStartPackageInstall(package_name="") - DepsInstallInfo(version_name="") - DepsUpdateAvailable(version_latest="") - DepsUpToDate() - DepsListSubdirectory(subdirectory="") - DepsNotifyUpdatesAvailable(packages=ListOfStrings()) - RetryExternalCall(attempt=0, max=0) - RecordRetryException(exc="") - RegistryIndexProgressGETRequest(url="") - RegistryIndexProgressGETResponse(url="", resp_code=1234) - RegistryResponseUnexpectedType(response=""), - RegistryResponseMissingTopKeys(response=""), - RegistryResponseMissingNestedKeys(response=""), - RegistryResponseExtraNestedKeys(response=""), - DepsSetDownloadDirectory(path="") - - # Q - Node execution ====================== - - RunningOperationCaughtError(exc="") - CompileComplete() - FreshnessCheckComplete() - SeedHeader(header="") - SeedHeaderSeparator(len_header=0) - SQLRunnerException(exc="") - LogTestResult( - name="", - index=0, - num_models=0, - execution_time=0, - num_failures=0, - ) - LogStartLine(description="", index=0, total=0, node_info=NodeInfo()) - LogModelResult( - description="", - status="", - index=0, - total=0, - execution_time=0, - ) - LogSnapshotResult( - status="", - description="", - cfg={}, - index=0, - total=0, - execution_time=0, - ) - LogSeedResult( - status="", - index=0, - total=0, - execution_time=0, - schema="", - relation="", - ) - LogFreshnessResult( - source_name="", - table_name="", - index=0, - total=0, - execution_time=0, - ) - LogCancelLine(conn_name="") - DefaultSelector(name="") - NodeStart(unique_id="") - NodeFinished(unique_id="") - QueryCancelationUnsupported(type="") - ConcurrencyLine(num_threads=0, target_name="") - CompilingNode(unique_id="") - WritingInjectedSQLForNode(unique_id="") - NodeCompiling(unique_id="") - NodeExecuting(unique_id="") - LogHookStartLine( - statement="", - index=0, - total=0, - ) - LogHookEndLine( - statement="", - status="", - index=0, - total=0, - execution_time=0, - ) - SkippingDetails( - resource_type="", - schema="", - node_name="", - index=0, - total=0, - ) - NothingToDo() - RunningOperationUncaughtError(exc="") - EndRunResult() - NoNodesSelected() - DepsUnpinned(revision="", git="") - NoNodesForSelectionCriteria(spec_raw="") - - # W - Node testing ====================== - - CatchableExceptionOnRun(exc="") - InternalExceptionOnRun(build_path="", exc="") - GenericExceptionOnRun(build_path="", unique_id="", exc="") - NodeConnectionReleaseError(node_name="", exc="") - FoundStats(stat_line="") - - # Z - misc ====================== - - MainKeyboardInterrupt() - MainEncounteredError(exc="") - MainStackTrace(stack_trace="") - SystemErrorRetrievingModTime(path="") - SystemCouldNotWrite(path="", reason="", exc="") - SystemExecutingCmd(cmd=[""]) - SystemStdOutMsg(bmsg=b"") - SystemStdErrMsg(bmsg=b"") - SystemReportReturnCode(returncode=0) - TimingInfoCollected() - LogDebugStackTrace() - CheckCleanPath(path="") - ConfirmCleanPath(path="") - ProtectedCleanPath(path="") - FinishedCleanPaths() - OpenCommand(open_cmd="", profiles_dir="") - EmptyLine() - ServingDocsPort(address="", port=0) - ServingDocsAccessInfo(port="") - ServingDocsExitInfo() - RunResultWarning(resource_type="", node_name="", path="") - RunResultFailure(resource_type="", node_name="", path="") - StatsLine(stats={}) - RunResultError(msg="") - RunResultErrorNoMessage(status="") - SQLCompiledPath(path="") - CheckNodeTestFailure(relation_name="") - FirstRunResultError(msg="") - AfterFirstRunResultError(msg="") - EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False) - LogSkipBecauseError(schema="", relation="", index=0, total=0) - EnsureGitInstalled() - DepsCreatingLocalSymlink() - DepsSymlinkNotAvailable() - DisableTracking() - SendingEvent(kwargs="") - SendEventFailure() - FlushEvents() - FlushEventsFailure() - TrackingInitializeFailure() - EventBufferFull() diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index f0eeb4f6d4f..05f3debafe6 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -3,7 +3,7 @@ from typing import NoReturn, Optional, Mapping, Any from dbt.events.helpers import env_secrets, scrub_secrets -from dbt.events.types import GeneralMacroWarning +from dbt.events.types import JinjaLogWarning from dbt.node_types import NodeType import dbt.dataclass_schema @@ -996,7 +996,7 @@ def raise_duplicate_alias( def warn(msg, node=None): - dbt.events.functions.warn_or_error(GeneralMacroWarning(msg=msg), node=node) + dbt.events.functions.warn_or_error(JinjaLogWarning(msg=msg), node=node) return "" diff --git a/core/dbt/task/seed.py b/core/dbt/task/seed.py index 16b731e4f7d..5c922a5ba90 100644 --- a/core/dbt/task/seed.py +++ b/core/dbt/task/seed.py @@ -51,6 +51,7 @@ def print_result_line(self, result): LogSeedResult( info=info(level=level), status=result.status, + result_message=result.message, index=self.node_index, total=self.num_nodes, execution_time=result.execution_time, diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 050d5153c8d..eb4a87f495e 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -1,7 +1,7 @@ # flake8: noqa from dbt.events.test_types import UnitTestInfo from dbt.events import AdapterLogger -from dbt.events.functions import event_to_json, LOG_VERSION, reset_event_history +from dbt.events.functions import event_to_json, LOG_VERSION, reset_event_history, event_to_dict from dbt.events.types import * from dbt.events.test_types import * @@ -29,10 +29,8 @@ def get_all_subclasses(cls): all_subclasses = [] for subclass in cls.__subclasses__(): - # If the test breaks because of abcs this list might have to be updated. - if subclass in [TestLevel, DebugLevel, WarnLevel, InfoLevel, ErrorLevel, DynamicLevel]: - continue - all_subclasses.append(subclass) + if subclass not in [TestLevel, DebugLevel, WarnLevel, InfoLevel, ErrorLevel, DynamicLevel]: + all_subclasses.append(subclass) all_subclasses.extend(get_all_subclasses(subclass)) return set(all_subclasses) @@ -81,7 +79,7 @@ def test_formatting(self): event = AdapterEventDebug(name="dbt_tests", base_msg=[1,2,3], args=(3,)) assert isinstance(event.base_msg, str) - event = MacroEventDebug(msg=[1,2,3]) + event = JinjaLogDebug(msg=[1,2,3]) assert isinstance(event.msg, str) @@ -93,15 +91,15 @@ def test_event_codes(self): all_concrete = get_all_subclasses(BaseEvent) all_codes = set() - for event in all_concrete: - if not inspect.isabstract(event): - # must be in the form 1 capital letter, 3 digits - assert re.match("^[A-Z][0-9]{3}", event.info.code) - # cannot have been used already - assert ( - event.info.code not in all_codes - ), f"{event.code} is assigned more than once. Check types.py for duplicates." - all_codes.add(event.info.code) + for event_cls in all_concrete: + code = event_cls.code(event_cls) + # must be in the form 1 capital letter, 3 digits + assert re.match("^[A-Z][0-9]{3}", code) + # cannot have been used already + assert ( + code not in all_codes + ), f"{code} is assigned more than once. Check types.py for duplicates." + all_codes.add(code) class TestEventBuffer: @@ -164,56 +162,62 @@ def MockNode(): sample_values = [ - MainReportVersion(version="", log_version=LOG_VERSION), - MainKeyboardInterrupt(), - MainEncounteredError(exc=""), - MainStackTrace(stack_trace=""), + # A - pre-project loading + MainReportVersion(version=""), + MainReportArgs(args={}), MainTrackingUserState(user_state=""), - ParseCmdStart(), - ParseCmdCompiling(), - ParseCmdWritingManifest(), - ParseCmdDone(), - ManifestDependenciesLoaded(), - ManifestLoaderCreated(), - ManifestLoaded(), - ManifestChecked(), - ManifestFlatGraphBuilt(), - ParseCmdPerfInfoPath(path=""), - GitSparseCheckoutSubdirectory(subdir=""), - GitProgressCheckoutRevision(revision=""), - GitProgressUpdatingExistingDependency(dir=""), - GitProgressPullingNewDependency(dir=""), - GitNothingToDo(sha=""), - GitProgressUpdatedCheckoutRange(start_sha="", end_sha=""), - GitProgressCheckedOutAt(end_sha=""), - SystemErrorRetrievingModTime(path=""), - SystemCouldNotWrite(path="", reason="", exc=""), - SystemExecutingCmd(cmd=[""]), - SystemStdOutMsg(bmsg=b""), - SystemStdErrMsg(bmsg=b""), - SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec=""), - MacroEventInfo(msg=""), - MacroEventDebug(msg=""), + MergedFromState(num_merged=0, sample=[]), + MissingProfileTarget(profile_name="", target_name=""), + InvalidVarsYAML(), + DbtProjectError(), + DbtProjectErrorException(exc=""), + DbtProfileError(), + DbtProfileErrorException(exc=""), + ProfileListTitle(), + ListSingleProfile(profile=""), + NoDefinedProfiles(), + ProfileHelpMessage(), + StarterProjectPath(dir=""), + ConfigFolderDirectory(dir=""), + NoSampleProfileFound(adapter=""), + ProfileWrittenWithSample(name="", path=""), + ProfileWrittenWithTargetTemplateYAML(name="", path=""), + ProfileWrittenWithProjectTemplateYAML(name="", path=""), + SettingUpProfile(), + InvalidProfileTemplateYAML(), + ProjectNameAlreadyExists(name=""), + ProjectCreated(project_name=""), + + # D - Deprecations ====================== + PackageRedirectDeprecation(old_name="", new_name=""), + PackageInstallPathDeprecation(), + ConfigSourcePathDeprecation(deprecated_path="", exp_path=""), + ConfigDataPathDeprecation(deprecated_path="", exp_path=""), + AdapterDeprecationWarning(old_name="", new_name=""), + MetricAttributesRenamed(metric_name=""), + ExposureNameDeprecation(exposure=""), + + # E - DB Adapter ====================== + AdapterEventDebug(), + AdapterEventInfo(), + AdapterEventWarning(), + AdapterEventError(), NewConnection(conn_type="", conn_name=""), ConnectionReused(conn_name=""), - ConnectionLeftOpen(conn_name=""), - ConnectionClosed(conn_name=""), + ConnectionLeftOpenInCleanup(conn_name=""), + ConnectionClosedInCleanup(conn_name=""), RollbackFailed(conn_name=""), - ConnectionClosed2(conn_name=""), - ConnectionLeftOpen2(conn_name=""), + ConnectionClosed(conn_name=""), + ConnectionLeftOpen(conn_name=""), Rollback(conn_name=""), CacheMiss(conn_name="", database="", schema=""), - ListRelations(database="", schema="", relations=[]), + ListRelations(database="", schema=""), ConnectionUsed(conn_type="", conn_name=""), SQLQuery(conn_name="", sql=""), SQLQueryStatus(status="", elapsed=0.1), - CodeExecution(conn_name="", code_content=""), - CodeExecutionStatus(status="", elapsed=0.1), SQLCommit(conn_name=""), ColTypeChange( - orig_type="", - new_type="", - table=ReferenceKeyMsg(database="", schema="", identifier=""), + orig_type="", new_type="", table=ReferenceKeyMsg(database="", schema="", identifier="") ), SchemaCreation(relation=ReferenceKeyMsg(database="", schema="", identifier="")), SchemaDrop(relation=ReferenceKeyMsg(database="", schema="", identifier="")), @@ -231,6 +235,7 @@ def MockNode(): dropped=ReferenceKeyMsg(database="", schema="", identifier=""), consequences=[ReferenceKeyMsg(database="", schema="", identifier="")], ), + DropRelation(dropped=ReferenceKeyMsg()), UpdateReference( old_key=ReferenceKeyMsg(database="", schema="", identifier=""), new_key=ReferenceKeyMsg(database="", schema="", identifier=""), @@ -246,29 +251,49 @@ def MockNode(): DumpBeforeRenameSchema(dump=dict()), DumpAfterRenameSchema(dump=dict()), AdapterImportError(exc=""), - PluginLoadError(), - SystemReportReturnCode(returncode=0), + PluginLoadError(exc_info=""), NewConnectionOpening(connection_state=""), - TimingInfoCollected(), - MergedFromState(num_merged=0, sample=[]), - MissingProfileTarget(profile_name="", target_name=""), - InvalidVarsYAML(), + CodeExecution(conn_name="", code_content=""), + CodeExecutionStatus(status="", elapsed=0.1), + CatalogGenerationError(exc=""), + WriteCatalogFailure(num_exceptions=0), + CatalogWritten(path=""), + CannotGenerateDocs(), + BuildingCatalog(), + DatabaseErrorRunningHook(hook_type=""), + HooksRunning(num_hooks=0, hook_type=""), + HookFinished(stat_line="", execution="", execution_time=0), + + # I - Project parsing ====================== + ParseCmdStart(), + ParseCmdCompiling(), + ParseCmdWritingManifest(), + ParseCmdDone(), + ManifestDependenciesLoaded(), + ManifestLoaderCreated(), + ManifestLoaded(), + ManifestChecked(), + ManifestFlatGraphBuilt(), + ParseCmdPerfInfoPath(path=""), GenericTestFileParse(path=""), MacroFileParse(path=""), PartialParsingFullReparseBecauseOfError(), - PartialParsingFile(file_id=""), PartialParsingExceptionFile(file=""), + PartialParsingFile(file_id=""), PartialParsingException(exc_info={}), PartialParsingSkipParsing(), PartialParsingMacroChangeStartFullParse(), + PartialParsingProjectEnvVarsChanged(), + PartialParsingProfileEnvVarsChanged(), + PartialParsingDeletedMetric(unique_id=""), ManifestWrongMetadataVersion(version=""), PartialParsingVersionMismatch(saved_version="", current_version=""), PartialParsingFailedBecauseConfigChange(), PartialParsingFailedBecauseProfileChange(), PartialParsingFailedBecauseNewProjectDependency(), PartialParsingFailedBecauseHashChanged(), - PartialParsingDeletedMetric(unique_id=""), - ParsedFileLoadFailed(path="", exc=""), + PartialParsingNotEnabled(), + ParsedFileLoadFailed(path="", exc="", exc_info=""), PartialParseSaveFileNotFound(), StaticParserCausedJinjaRendering(path=""), UsingExperimentalParser(path=""), @@ -297,50 +322,172 @@ def MockNode(): target_name="", target_package="", ), + UnusedResourceConfigPath(unused_config_paths=[]), + SeedIncreased(package_name="", name=""), + SeedExceedsLimitSamePath(package_name="", name=""), + SeedExceedsLimitAndPathChanged(package_name="", name=""), + SeedExceedsLimitChecksumChanged(package_name="", name="", checksum_name=""), + UnusedTables(unused_tables=[]), + WrongResourceSchemaFile(patch_name="", resource_type="", file_path="", plural_resource_type=""), + NoNodeForYamlKey(patch_name="", yaml_key="", file_path=""), + MacroPatchNotFound(patch_name=""), + NodeNotFoundOrDisabled( + original_file_path="", + unique_id="", + resource_type_title="", + target_name="", + target_kind="", + target_package="", + disabled="", + ), + JinjaLogWarning(), + + # M - Deps generation ====================== + + GitSparseCheckoutSubdirectory(subdir=""), + GitProgressCheckoutRevision(revision=""), + GitProgressUpdatingExistingDependency(dir=""), + GitProgressPullingNewDependency(dir=""), + GitNothingToDo(sha=""), + GitProgressUpdatedCheckoutRange(start_sha="", end_sha=""), + GitProgressCheckedOutAt(end_sha=""), + RegistryProgressGETRequest(url=""), + RegistryProgressGETResponse(url="", resp_code=1234), + SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec=""), + JinjaLogInfo(msg=""), + JinjaLogDebug(msg=""), + DepsNoPackagesFound(), + DepsStartPackageInstall(package_name=""), + DepsInstallInfo(version_name=""), + DepsUpdateAvailable(version_latest=""), + DepsUpToDate(), + DepsListSubdirectory(subdirectory=""), + DepsNotifyUpdatesAvailable(packages=ListOfStrings()), + RetryExternalCall(attempt=0, max=0), + RecordRetryException(exc=""), + RegistryIndexProgressGETRequest(url=""), + RegistryIndexProgressGETResponse(url="", resp_code=1234), + RegistryResponseUnexpectedType(response=""), + RegistryResponseMissingTopKeys(response=""), + RegistryResponseMissingNestedKeys(response=""), + RegistryResponseExtraNestedKeys(response=""), + DepsSetDownloadDirectory(path=""), + + # Q - Node execution ====================== + RunningOperationCaughtError(exc=""), + CompileComplete(), + FreshnessCheckComplete(), + SeedHeader(header=""), + SeedHeaderSeparator(len_header=0), + SQLRunnerException(exc=""), + LogTestResult( + name="", + index=0, + num_models=0, + execution_time=0, + num_failures=0, + ), + LogStartLine(description="", index=0, total=0, node_info=NodeInfo()), + LogModelResult( + description="", + status="", + index=0, + total=0, + execution_time=0, + ), + LogSnapshotResult( + status="", + description="", + cfg={}, + index=0, + total=0, + execution_time=0, + ), + LogSeedResult( + status="", + index=0, + total=0, + execution_time=0, + schema="", + relation="", + ), + LogFreshnessResult( + source_name="", + table_name="", + index=0, + total=0, + execution_time=0, + ), + LogCancelLine(conn_name=""), + DefaultSelector(name=""), + NodeStart(unique_id=""), + NodeFinished(unique_id=""), + QueryCancelationUnsupported(type=""), + ConcurrencyLine(num_threads=0, target_name=""), + CompilingNode(unique_id=""), + WritingInjectedSQLForNode(unique_id=""), + NodeCompiling(unique_id=""), + NodeExecuting(unique_id=""), + LogHookStartLine( + statement="", + index=0, + total=0, + ), + LogHookEndLine( + statement="", + status="", + index=0, + total=0, + execution_time=0, + ), + SkippingDetails( + resource_type="", + schema="", + node_name="", + index=0, + total=0, + ), + NothingToDo(), RunningOperationUncaughtError(exc=""), - DbtProjectError(), - DbtProjectErrorException(exc=""), - DbtProfileError(), - DbtProfileErrorException(exc=""), - ProfileListTitle(), - ListSingleProfile(profile=""), - NoDefinedProfiles(), - ProfileHelpMessage(), + EndRunResult(), + NoNodesSelected(), + DepsUnpinned(revision="", git=""), + NoNodesForSelectionCriteria(spec_raw=""), + + # W - Node testing ====================== + CatchableExceptionOnRun(exc=""), InternalExceptionOnRun(build_path="", exc=""), GenericExceptionOnRun(build_path="", unique_id="", exc=""), NodeConnectionReleaseError(node_name="", exc=""), + FoundStats(stat_line=""), + + # Z - misc ====================== + + MainKeyboardInterrupt(), + MainEncounteredError(exc=""), + MainStackTrace(stack_trace=""), + SystemErrorRetrievingModTime(path=""), + SystemCouldNotWrite(path="", reason="", exc=""), + SystemExecutingCmd(cmd=[""]), + SystemStdOutMsg(bmsg=b""), + SystemStdErrMsg(bmsg=b""), + SystemReportReturnCode(returncode=0), + TimingInfoCollected(), + LogDebugStackTrace(), CheckCleanPath(path=""), ConfirmCleanPath(path=""), ProtectedCleanPath(path=""), FinishedCleanPaths(), OpenCommand(open_cmd="", profiles_dir=""), - DepsNoPackagesFound(), - DepsStartPackageInstall(package_name=""), - DepsInstallInfo(version_name=""), - DepsUpdateAvailable(version_latest=""), - DepsListSubdirectory(subdirectory=""), - DepsNotifyUpdatesAvailable(packages=ListOfStrings()), - DepsNotifyUpdatesAvailable(packages=ListOfStrings(['dbt-utils'])), - DatabaseErrorRunningHook(hook_type=""), EmptyLine(), - HooksRunning(num_hooks=0, hook_type=""), - HookFinished(stat_line="", execution="", execution_time=0), - WriteCatalogFailure(num_exceptions=0), - CatalogWritten(path=""), - CannotGenerateDocs(), - BuildingCatalog(), - CompileComplete(), - FreshnessCheckComplete(), ServingDocsPort(address="", port=0), ServingDocsAccessInfo(port=""), ServingDocsExitInfo(), - SeedHeader(header=""), - SeedHeaderSeparator(len_header=0), RunResultWarning(resource_type="", node_name="", path=""), RunResultFailure(resource_type="", node_name="", path=""), - StatsLine(stats={"pass": 0, "warn": 0, "error": 0, "skip": 0, "total": 0}), + StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0,"total": 0}), RunResultError(msg=""), RunResultErrorNoMessage(status=""), SQLCompiledPath(path=""), @@ -348,90 +495,27 @@ def MockNode(): FirstRunResultError(msg=""), AfterFirstRunResultError(msg=""), EndOfRunSummary(num_errors=0, num_warnings=0, keyboard_interrupt=False), - LogStartLine(description="", index=0, total=0, node_info=NodeInfo()), - LogHookStartLine(statement="", index=0, total=0, node_info=NodeInfo()), - LogHookEndLine( - statement="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - SkippingDetails( - resource_type="", schema="", node_name="", index=0, total=0, node_info=NodeInfo() - ), - LogTestResult( - name="", index=0, num_models=0, execution_time=0, num_failures=0, node_info=NodeInfo() - ), LogSkipBecauseError(schema="", relation="", index=0, total=0), - LogModelResult( - description="", status="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - LogSnapshotResult( - status="", description="", cfg={}, index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - LogSeedResult( - status="", index=0, total=0, execution_time=0, schema="", relation="", node_info=NodeInfo() - ), - LogFreshnessResult( - source_name="", table_name="", index=0, total=0, execution_time=0, node_info=NodeInfo() - ), - LogCancelLine(conn_name=""), - DefaultSelector(name=""), - NodeStart(unique_id="", node_info=NodeInfo()), - NodeCompiling(unique_id="", node_info=NodeInfo()), - NodeExecuting(unique_id="", node_info=NodeInfo()), - NodeFinished(unique_id="", node_info=NodeInfo(), run_result=RunResultMsg()), - QueryCancelationUnsupported(type=""), - ConcurrencyLine(num_threads=0, target_name=""), - StarterProjectPath(dir=""), - ConfigFolderDirectory(dir=""), - NoSampleProfileFound(adapter=""), - ProfileWrittenWithSample(name="", path=""), - ProfileWrittenWithTargetTemplateYAML(name="", path=""), - ProfileWrittenWithProjectTemplateYAML(name="", path=""), - SettingUpProfile(), - InvalidProfileTemplateYAML(), - ProjectNameAlreadyExists(name=""), - ProjectCreated(project_name="", docs_url="", slack_url=""), - DepsSetDownloadDirectory(path=""), EnsureGitInstalled(), DepsCreatingLocalSymlink(), DepsSymlinkNotAvailable(), - FoundStats(stat_line=""), - CompilingNode(unique_id=""), - WritingInjectedSQLForNode(unique_id=""), DisableTracking(), SendingEvent(kwargs=""), SendEventFailure(), FlushEvents(), FlushEventsFailure(), TrackingInitializeFailure(), - RetryExternalCall(attempt=0, max=0), - PartialParsingProfileEnvVarsChanged(), - AdapterEventDebug(name="", base_msg="", args=()), - AdapterEventInfo(name="", base_msg="", args=()), - AdapterEventWarning(name="", base_msg="", args=()), - AdapterEventError(name="", base_msg="", args=()), - LogDebugStackTrace(), - MainReportArgs(args={}), - RegistryProgressGETRequest(url=""), - RegistryIndexProgressGETRequest(url=""), - RegistryIndexProgressGETResponse(url="", resp_code=1), - RegistryResponseUnexpectedType(response=""), - RegistryResponseMissingTopKeys(response=""), - RegistryResponseMissingNestedKeys(response=""), - RegistryResponseExtraNestedKeys(response=""), - DepsUpToDate(), - PartialParsingNotEnabled(), - SQLRunnerException(exc=""), - DropRelation(dropped=ReferenceKeyMsg(database="", schema="", identifier="")), - PartialParsingProjectEnvVarsChanged(), - RegistryProgressGETResponse(url="", resp_code=1), - IntegrationTestDebug(msg=""), - IntegrationTestInfo(msg=""), - IntegrationTestWarn(msg=""), - IntegrationTestError(msg=""), - IntegrationTestException(msg=""), EventBufferFull(), - RecordRetryException(exc=""), - UnitTestInfo(msg=""), + RunResultWarningMessage(), + + # T - tests ====================== + IntegrationTestInfo(), + IntegrationTestDebug(), + IntegrationTestWarn(), + IntegrationTestError(), + IntegrationTestException(), + UnitTestInfo(), + ] @@ -441,13 +525,8 @@ class TestEventJSONSerialization: # event types that take `Any` are not possible to test in this way since some will serialize # just fine and others won't. def test_all_serializable(self): - no_test = [DummyCacheEvent] - all_non_abstract_events = set( - filter( - lambda x: not inspect.isabstract(x) and x not in no_test, - get_all_subclasses(BaseEvent), - ) + get_all_subclasses(BaseEvent), ) all_event_values_list = list(map(lambda x: x.__class__, sample_values)) diff = all_non_abstract_events.difference(set(all_event_values_list)) @@ -461,7 +540,7 @@ def test_all_serializable(self): # if we have everything we need to test, try to serialize everything for event in sample_values: - event_dict = event.to_dict() + event_dict = event_to_dict(event) try: event_json = event_to_json(event) except Exception as e: @@ -469,30 +548,3 @@ def test_all_serializable(self): T = TypeVar("T") - - -@dataclass -class Counter(Generic[T], SerializableType): - dummy_val: T - count: int = 0 - - def next(self) -> T: - self.count = self.count + 1 - return self.dummy_val - - # mashumaro serializer - def _serialize() -> Dict[str, int]: - return {"count": count} - - -@dataclass -class DummyCacheEvent(InfoLevel, Cache, SerializableType): - code = "X999" - counter: Counter - - def message(self) -> str: - return f"state: {self.counter.next()}" - - # mashumaro serializer - def _serialize() -> str: - return "DummyCacheEvent" diff --git a/tests/unit/test_proto_events.py b/tests/unit/test_proto_events.py index 31837ed0271..d5b070c41e2 100644 --- a/tests/unit/test_proto_events.py +++ b/tests/unit/test_proto_events.py @@ -13,7 +13,7 @@ from dbt.version import installed -info_keys = {"name", "code", "msg", "level", "invocation_id", "pid", "thread", "ts", "extra"} +info_keys = {"name", "code", "msg", "level", "invocation_id", "pid", "thread", "ts", "extra", "category"} def test_events(): From bce0e7c0964e67d5a463bba5ce498a3fe497e0ab Mon Sep 17 00:00:00 2001 From: Itamar Hartstein Date: Mon, 28 Nov 2022 17:23:40 +0200 Subject: [PATCH 037/156] BaseContext: expose md5 function in context (#6247) * BaseContext: expose md5 function in context * BaseContext: add return value type * Add changie entry * rename "md5" to "local_md5" * fix test_context.py --- .changes/unreleased/Features-20221114-185207.yaml | 7 +++++++ core/dbt/context/base.py | 14 ++++++++++++++ test/unit/test_context.py | 1 + 3 files changed, 22 insertions(+) create mode 100644 .changes/unreleased/Features-20221114-185207.yaml diff --git a/.changes/unreleased/Features-20221114-185207.yaml b/.changes/unreleased/Features-20221114-185207.yaml new file mode 100644 index 00000000000..16ea3fb5918 --- /dev/null +++ b/.changes/unreleased/Features-20221114-185207.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Added an md5 function to the base context +time: 2022-11-14T18:52:07.788593+02:00 +custom: + Author: haritamar + Issue: "6246" + PR: "6247" diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 262ed45d3dc..2fe56de0200 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -4,6 +4,7 @@ from dbt import flags from dbt import tracking +from dbt import utils from dbt.clients.jinja import get_rendered from dbt.clients.yaml_helper import yaml, safe_load, SafeLoader, Loader, Dumper # noqa: F401 from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER @@ -687,6 +688,19 @@ def diff_of_two_dicts( dict_diff.update({k: dict_a[k]}) return dict_diff + @contextmember + @staticmethod + def local_md5(value: str) -> str: + """Calculates an MD5 hash of the given string. + It's called "local_md5" to emphasize that it runs locally in dbt (in jinja context) and not an MD5 SQL command. + + :param value: The value to hash + + Usage: + {% set value_hash = local_md5("hello world") %} + """ + return utils.md5(value) + def generate_base_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]: ctx = BaseContext(cli_vars) diff --git a/test/unit/test_context.py b/test/unit/test_context.py index 01c4f678abe..1aaf3711909 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -201,6 +201,7 @@ def assert_has_keys(required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str "flags", "print", "diff_of_two_dicts", + "local_md5" } ) From 55db15aba8eb9a37621d9f75fa7ddd791249dea2 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Tue, 29 Nov 2022 00:06:07 -0800 Subject: [PATCH 038/156] Convert test 067. (#6305) * Convert test 067. One bug outstanding. * Test now working! Schema needed renaming to avoid 63 char max problems * Remove old test. * Add some docs and rewrite. * Add exception for when audit tables' schema runs over the db limit. * Code cleanup. * Revert exception. * Round out comments. * Rename what shouldn't be a base class. Co-authored-by: Mila Page --- .../models/fine_model.sql | 1 - ...odel_but_with_a_no_good_very_long_name.sql | 1 - .../models/problematic_model.sql | 11 -- .../models/schema.yml | 40 ----- .../expected/expected_accepted_values.csv | 3 - .../seeds/expected/expected_failing_test.csv | 11 -- ...expected_not_null_problematic_model_id.csv | 3 - .../expected_unique_problematic_model_id.csv | 3 - .../seeds/people.csv | 11 -- .../test_store_test_failures.py | 91 ----------- .../tests/failing_test.sql | 1 - .../tests/passing_test.sql | 2 - .../store_test_failures_tests/fixtures.py | 126 +++++++++++++++ .../test_store_test_failures.py | 152 ++++++++++++++++++ 14 files changed, 278 insertions(+), 178 deletions(-) delete mode 100644 test/integration/067_store_test_failures_tests/models/fine_model.sql delete mode 100644 test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql delete mode 100644 test/integration/067_store_test_failures_tests/models/problematic_model.sql delete mode 100644 test/integration/067_store_test_failures_tests/models/schema.yml delete mode 100644 test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv delete mode 100644 test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv delete mode 100644 test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv delete mode 100644 test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv delete mode 100644 test/integration/067_store_test_failures_tests/seeds/people.csv delete mode 100644 test/integration/067_store_test_failures_tests/test_store_test_failures.py delete mode 100644 test/integration/067_store_test_failures_tests/tests/failing_test.sql delete mode 100644 test/integration/067_store_test_failures_tests/tests/passing_test.sql create mode 100644 tests/functional/store_test_failures_tests/fixtures.py create mode 100644 tests/functional/store_test_failures_tests/test_store_test_failures.py diff --git a/test/integration/067_store_test_failures_tests/models/fine_model.sql b/test/integration/067_store_test_failures_tests/models/fine_model.sql deleted file mode 100644 index 94b923a17c2..00000000000 --- a/test/integration/067_store_test_failures_tests/models/fine_model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('people') }} diff --git a/test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql b/test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql deleted file mode 100644 index 97536ffaf06..00000000000 --- a/test/integration/067_store_test_failures_tests/models/fine_model_but_with_a_no_good_very_long_name.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as quite_long_column_name diff --git a/test/integration/067_store_test_failures_tests/models/problematic_model.sql b/test/integration/067_store_test_failures_tests/models/problematic_model.sql deleted file mode 100644 index e780d6b001e..00000000000 --- a/test/integration/067_store_test_failures_tests/models/problematic_model.sql +++ /dev/null @@ -1,11 +0,0 @@ -select * from {{ ref('people') }} - -union all - -select * from {{ ref('people') }} -where id in (1,2) - -union all - -select null as id, first_name, last_name, email, gender, ip_address from {{ ref('people') }} -where id in (3,4) diff --git a/test/integration/067_store_test_failures_tests/models/schema.yml b/test/integration/067_store_test_failures_tests/models/schema.yml deleted file mode 100644 index f01a9e350d8..00000000000 --- a/test/integration/067_store_test_failures_tests/models/schema.yml +++ /dev/null @@ -1,40 +0,0 @@ -version: 2 - -models: - - - name: fine_model - columns: - - name: id - tests: - - unique - - not_null - - - name: problematic_model - columns: - - name: id - tests: - - unique: - store_failures: true - - not_null - - name: first_name - tests: - # test truncation of really long test name - - accepted_values: - values: - - Jack - - Kathryn - - Gerald - - Bonnie - - Harold - - Jacqueline - - Wanda - - Craig - # - Gary - # - Rose - - - name: fine_model_but_with_a_no_good_very_long_name - columns: - - name: quite_long_column_name - tests: - # test truncation of really long test name with builtin - - unique diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv deleted file mode 100644 index 02f28435b46..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_accepted_values.csv +++ /dev/null @@ -1,3 +0,0 @@ -value_field,n_records -Gary,1 -Rose,1 diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv deleted file mode 100644 index d9e7257f122..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_failing_test.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,first_name,last_name,email,gender,ip_address -1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 -2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 -3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 -4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 -5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 -6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 -7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 -8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 -9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 -10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv deleted file mode 100644 index 95fef8a2594..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_not_null_problematic_model_id.csv +++ /dev/null @@ -1,3 +0,0 @@ -id,first_name,last_name,email,gender,ip_address -,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 -,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 diff --git a/test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv b/test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv deleted file mode 100644 index 431d54ef8d0..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/expected/expected_unique_problematic_model_id.csv +++ /dev/null @@ -1,3 +0,0 @@ -unique_field,n_records -2,2 -1,2 \ No newline at end of file diff --git a/test/integration/067_store_test_failures_tests/seeds/people.csv b/test/integration/067_store_test_failures_tests/seeds/people.csv deleted file mode 100644 index d9e7257f122..00000000000 --- a/test/integration/067_store_test_failures_tests/seeds/people.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,first_name,last_name,email,gender,ip_address -1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 -2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 -3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 -4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 -5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 -6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 -7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 -8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 -9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 -10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 diff --git a/test/integration/067_store_test_failures_tests/test_store_test_failures.py b/test/integration/067_store_test_failures_tests/test_store_test_failures.py deleted file mode 100644 index b0ba0875128..00000000000 --- a/test/integration/067_store_test_failures_tests/test_store_test_failures.py +++ /dev/null @@ -1,91 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestStoreTestFailures(DBTIntegrationTest): - @property - def schema(self): - return "test_store_test_failures_067" - - def tearDown(self): - test_audit_schema = self.unique_schema() + "_dbt_test__audit" - with self.adapter.connection_named('__test'): - self._drop_schema_named(self.default_database, test_audit_schema) - - super().tearDown() - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - "config-version": 2, - "test-paths": ["tests"], - "seeds": { - "quote_columns": False, - "test": { - "expected": self.column_type_overrides() - }, - }, - } - - def column_type_overrides(self): - return {} - - def run_tests_store_one_failure(self): - test_audit_schema = self.unique_schema() + "_dbt_test__audit" - - self.run_dbt(["seed"]) - self.run_dbt(["run"]) - self.run_dbt(["test"], expect_pass=False) - - # one test is configured with store_failures: true, make sure it worked - self.assertTablesEqual("unique_problematic_model_id", "expected_unique_problematic_model_id", test_audit_schema) - - def run_tests_store_failures_and_assert(self): - test_audit_schema = self.unique_schema() + "_dbt_test__audit" - - self.run_dbt(["seed"]) - self.run_dbt(["run"]) - # make sure this works idempotently for all tests - self.run_dbt(["test", "--store-failures"], expect_pass=False) - results = self.run_dbt(["test", "--store-failures"], expect_pass=False) - - # compare test results - actual = [(r.status, r.failures) for r in results] - expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0), - ('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)] - self.assertEqual(sorted(actual), sorted(expected)) - - # compare test results stored in database - self.assertTablesEqual("failing_test", "expected_failing_test", test_audit_schema) - self.assertTablesEqual("not_null_problematic_model_id", "expected_not_null_problematic_model_id", test_audit_schema) - self.assertTablesEqual("unique_problematic_model_id", "expected_unique_problematic_model_id", test_audit_schema) - self.assertTablesEqual("accepted_values_problematic_mo_c533ab4ca65c1a9dbf14f79ded49b628", "expected_accepted_values", test_audit_schema) - - -class PostgresTestStoreTestFailures(TestStoreTestFailures): - - @property - def schema(self): - return "067" # otherwise too long + truncated - - def column_type_overrides(self): - return { - "expected_unique_problematic_model_id": { - "+column_types": { - "n_records": "bigint", - }, - }, - "expected_accepted_values": { - "+column_types": { - "n_records": "bigint", - }, - }, - } - - @use_profile('postgres') - def test__postgres__store_and_assert(self): - self.run_tests_store_one_failure() - self.run_tests_store_failures_and_assert() diff --git a/test/integration/067_store_test_failures_tests/tests/failing_test.sql b/test/integration/067_store_test_failures_tests/tests/failing_test.sql deleted file mode 100644 index 1bb5ae5ba6e..00000000000 --- a/test/integration/067_store_test_failures_tests/tests/failing_test.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('fine_model') }} diff --git a/test/integration/067_store_test_failures_tests/tests/passing_test.sql b/test/integration/067_store_test_failures_tests/tests/passing_test.sql deleted file mode 100644 index 15c9a7a642d..00000000000 --- a/test/integration/067_store_test_failures_tests/tests/passing_test.sql +++ /dev/null @@ -1,2 +0,0 @@ -select * from {{ ref('fine_model') }} -where false diff --git a/tests/functional/store_test_failures_tests/fixtures.py b/tests/functional/store_test_failures_tests/fixtures.py new file mode 100644 index 00000000000..dae8530135e --- /dev/null +++ b/tests/functional/store_test_failures_tests/fixtures.py @@ -0,0 +1,126 @@ +# +# Seeds +# +seeds__people = """id,first_name,last_name,email,gender,ip_address +1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 +2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 +3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 +6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 +7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 +8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 +9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 +10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 +""" + +seeds__expected_accepted_values = """value_field,n_records +Gary,1 +Rose,1 +""" + +seeds__expected_failing_test = """id,first_name,last_name,email,gender,ip_address +1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 +2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 +3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 +6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 +7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 +8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 +9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 +10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 +""" + +seeds__expected_not_null_problematic_model_id = """id,first_name,last_name,email,gender,ip_address +,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +""" + +seeds__expected_unique_problematic_model_id = """unique_field,n_records +2,2 +1,2 +""" + +# +# Schema +# +properties__schema_yml = """ +version: 2 + +models: + + - name: fine_model + columns: + - name: id + tests: + - unique + - not_null + + - name: problematic_model + columns: + - name: id + tests: + - unique: + store_failures: true + - not_null + - name: first_name + tests: + # test truncation of really long test name + - accepted_values: + values: + - Jack + - Kathryn + - Gerald + - Bonnie + - Harold + - Jacqueline + - Wanda + - Craig + # - Gary + # - Rose + + - name: fine_model_but_with_a_no_good_very_long_name + columns: + - name: quite_long_column_name + tests: + # test truncation of really long test name with builtin + - unique +""" + +# +# Models +# +models__fine_model = """ +select * from {{ ref('people') }} +""" + +models__file_model_but_with_a_no_good_very_long_name = """ +select 1 as quite_long_column_name +""" + +models__problematic_model = """ +select * from {{ ref('people') }} + +union all + +select * from {{ ref('people') }} +where id in (1,2) + +union all + +select null as id, first_name, last_name, email, gender, ip_address from {{ ref('people') }} +where id in (3,4) +""" + +# +# Tests +# +tests__failing_test = """ +select * from {{ ref('fine_model') }} +""" + +tests__passing_test = """ +select * from {{ ref('fine_model') }} +where false +""" diff --git a/tests/functional/store_test_failures_tests/test_store_test_failures.py b/tests/functional/store_test_failures_tests/test_store_test_failures.py new file mode 100644 index 00000000000..ff26d7d97d3 --- /dev/null +++ b/tests/functional/store_test_failures_tests/test_store_test_failures.py @@ -0,0 +1,152 @@ +import pytest + +from dbt.tests.util import ( + check_relations_equal, + run_dbt, +) + +from tests.functional.store_test_failures_tests.fixtures import ( + seeds__people, + seeds__expected_accepted_values, + seeds__expected_failing_test, + seeds__expected_not_null_problematic_model_id, + seeds__expected_unique_problematic_model_id, + properties__schema_yml, + models__problematic_model, + models__fine_model, + models__file_model_but_with_a_no_good_very_long_name, + tests__failing_test, + tests__passing_test, +) + +# used to rename test audit schema to help test schema meet max char limit +# the default is _dbt_test__audit but this runs over the postgres 63 schema name char limit +# without which idempotency conditions will not hold (i.e. dbt can't drop the schema properly) +TEST_AUDIT_SCHEMA_SUFFIX = "dbt_test__aud" + + +class StoreTestFailuresBase: + @pytest.fixture(scope="function", autouse=True) + def setUp(self, project): + self.test_audit_schema = f"{project.test_schema}_{TEST_AUDIT_SCHEMA_SUFFIX}" + run_dbt(["seed"]) + run_dbt(["run"]) + + @pytest.fixture(scope="class") + def seeds(self): + return { + "people.csv": seeds__people, + "expected_accepted_values.csv": seeds__expected_accepted_values, + "expected_failing_test.csv": seeds__expected_failing_test, + "expected_not_null_problematic_model_id.csv": + seeds__expected_not_null_problematic_model_id, + "expected_unique_problematic_model_id.csv": + seeds__expected_unique_problematic_model_id, + } + + @pytest.fixture(scope="class") + def tests(self): + return { + "failing_test.sql": tests__failing_test, + "passing_test.sql": tests__passing_test, + } + + @pytest.fixture(scope="class") + def properties(self): + return {"schema.yml": properties__schema_yml} + + @pytest.fixture(scope="class") + def models(self): + return { + "fine_model.sql": models__fine_model, + "fine_model_but_with_a_no_good_very_long_name.sql": + models__file_model_but_with_a_no_good_very_long_name, + "problematic_model.sql": models__problematic_model, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + "test": self.column_type_overrides(), + }, + "tests": { + "+schema": TEST_AUDIT_SCHEMA_SUFFIX + } + } + + def column_type_overrides(self): + return {} + + def run_tests_store_one_failure(self, project): + run_dbt(["test"], expect_pass=False) + + # one test is configured with store_failures: true, make sure it worked + check_relations_equal( + project.adapter, + [ + f"{self.test_audit_schema}.unique_problematic_model_id", + "expected_unique_problematic_model_id" + ] + ) + + def run_tests_store_failures_and_assert(self, project): + # make sure this works idempotently for all tests + run_dbt(["test", "--store-failures"], expect_pass=False) + results = run_dbt(["test", "--store-failures"], expect_pass=False) + + # compare test results + actual = [(r.status, r.failures) for r in results] + expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0), + ('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)] + assert sorted(actual) == sorted(expected) + + # compare test results stored in database + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.failing_test", + "expected_failing_test" + ]) + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.not_null_problematic_model_id", + "expected_not_null_problematic_model_id" + ]) + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.unique_problematic_model_id", + "expected_unique_problematic_model_id" + ]) + check_relations_equal(project.adapter, [ + f"{self.test_audit_schema}.accepted_values_problemat" + "ic_mo_c533ab4ca65c1a9dbf14f79ded49b628", + "expected_accepted_values" + ]) + + +class TestStoreTestFailures(StoreTestFailuresBase): + @pytest.fixture(scope="function") + def clean_up(self, project): + yield + with project.adapter.connection_named('__test'): + relation = project.adapter.Relation.create(database=project.database, schema=self.test_audit_schema) + project.adapter.drop_schema(relation) + + relation = project.adapter.Relation.create(database=project.database, schema=project.test_schema) + project.adapter.drop_schema(relation) + + def column_type_overrides(self): + return { + "expected_unique_problematic_model_id": { + "+column_types": { + "n_records": "bigint", + }, + }, + "expected_accepted_values": { + "+column_types": { + "n_records": "bigint", + }, + }, + } + + def test__store_and_assert(self, project, clean_up): + self.run_tests_store_one_failure(project) + self.run_tests_store_failures_and_assert(project) From 020f639c7a34352f76663ae4eeabac5c6acc32c5 Mon Sep 17 00:00:00 2001 From: leahwicz <60146280+leahwicz@users.noreply.github.com> Date: Tue, 29 Nov 2022 09:40:59 -0500 Subject: [PATCH 039/156] Update stale.yml (#6258) --- .github/workflows/stale.yml | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index f09533b8b36..d902340a91b 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,13 +9,4 @@ permissions: jobs: stale: - runs-on: ubuntu-latest - steps: - # pinned at v4 (https://github.com/actions/stale/releases/tag/v4.0.0) - - uses: actions/stale@cdf15f641adb27a71842045a94023bef6945e3aa - with: - stale-issue-message: "This issue has been marked as Stale because it has been open for 180 days with no activity. If you would like the issue to remain open, please remove the stale label or comment on the issue, or it will be closed in 7 days." - stale-pr-message: "This PR has been marked as Stale because it has been open for 180 days with no activity. If you would like the PR to remain open, please remove the stale label or comment on the PR, or it will be closed in 7 days." - close-issue-message: "Although we are closing this issue as stale, it's not gone forever. Issues can be reopened if there is renewed community interest; add a comment to notify the maintainers." - # mark issues/PRs stale when they haven't seen activity in 180 days - days-before-stale: 180 + uses: dbt-labs/actions/.github/workflows/stale-bot-matrix.yml@main From b3440417ad1e38a40c488c3632a63f5d2e0c88f4 Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Tue, 29 Nov 2022 13:30:47 -0600 Subject: [PATCH 040/156] Add GHA workflow to build CLI API docs (#6187) --- .../Under the Hood-20221116-130037.yaml | 7 + .github/workflows/generate-cli-api-docs.yml | 166 + .gitignore | 1 + .pre-commit-config.yaml | 2 +- .../docs/build/doctrees/environment.pickle | Bin 0 -> 65160 bytes core/dbt/docs/build/doctrees/index.doctree | Bin 0 -> 87794 bytes core/dbt/docs/build/html/.buildinfo | 4 + .../docs/build/html/_sources/index.rst.txt | 4 + .../_sphinx_javascript_frameworks_compat.js | 134 + .../dbt/docs/build/html/_static/alabaster.css | 701 + core/dbt/docs/build/html/_static/basic.css | 900 ++ core/dbt/docs/build/html/_static/custom.css | 1 + core/dbt/docs/build/html/_static/doctools.js | 156 + .../html/_static/documentation_options.js | 14 + core/dbt/docs/build/html/_static/file.png | Bin 0 -> 286 bytes .../docs/build/html/_static/jquery-3.6.0.js | 10881 ++++++++++++++++ core/dbt/docs/build/html/_static/jquery.js | 2 + .../docs/build/html/_static/language_data.js | 199 + core/dbt/docs/build/html/_static/minus.png | Bin 0 -> 90 bytes core/dbt/docs/build/html/_static/plus.png | Bin 0 -> 90 bytes core/dbt/docs/build/html/_static/pygments.css | 83 + .../docs/build/html/_static/searchtools.js | 566 + .../build/html/_static/sphinx_highlight.js | 144 + .../build/html/_static/underscore-1.13.1.js | 2042 +++ .../dbt/docs/build/html/_static/underscore.js | 6 + core/dbt/docs/build/html/genindex.html | 102 + core/dbt/docs/build/html/index.html | 855 ++ core/dbt/docs/build/html/objects.inv | Bin 0 -> 250 bytes core/dbt/docs/build/html/search.html | 121 + core/dbt/docs/build/html/searchindex.js | 1 + core/dbt/docs/source/conf.py | 2 +- 31 files changed, 17092 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221116-130037.yaml create mode 100644 .github/workflows/generate-cli-api-docs.yml create mode 100644 core/dbt/docs/build/doctrees/environment.pickle create mode 100644 core/dbt/docs/build/doctrees/index.doctree create mode 100644 core/dbt/docs/build/html/.buildinfo create mode 100644 core/dbt/docs/build/html/_sources/index.rst.txt create mode 100644 core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js create mode 100644 core/dbt/docs/build/html/_static/alabaster.css create mode 100644 core/dbt/docs/build/html/_static/basic.css create mode 100644 core/dbt/docs/build/html/_static/custom.css create mode 100644 core/dbt/docs/build/html/_static/doctools.js create mode 100644 core/dbt/docs/build/html/_static/documentation_options.js create mode 100644 core/dbt/docs/build/html/_static/file.png create mode 100644 core/dbt/docs/build/html/_static/jquery-3.6.0.js create mode 100644 core/dbt/docs/build/html/_static/jquery.js create mode 100644 core/dbt/docs/build/html/_static/language_data.js create mode 100644 core/dbt/docs/build/html/_static/minus.png create mode 100644 core/dbt/docs/build/html/_static/plus.png create mode 100644 core/dbt/docs/build/html/_static/pygments.css create mode 100644 core/dbt/docs/build/html/_static/searchtools.js create mode 100644 core/dbt/docs/build/html/_static/sphinx_highlight.js create mode 100644 core/dbt/docs/build/html/_static/underscore-1.13.1.js create mode 100644 core/dbt/docs/build/html/_static/underscore.js create mode 100644 core/dbt/docs/build/html/genindex.html create mode 100644 core/dbt/docs/build/html/index.html create mode 100644 core/dbt/docs/build/html/objects.inv create mode 100644 core/dbt/docs/build/html/search.html create mode 100644 core/dbt/docs/build/html/searchindex.js diff --git a/.changes/unreleased/Under the Hood-20221116-130037.yaml b/.changes/unreleased/Under the Hood-20221116-130037.yaml new file mode 100644 index 00000000000..b7ed5e750d6 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221116-130037.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Add github actions workflow to generate high level CLI API docs +time: 2022-11-16T13:00:37.916202-06:00 +custom: + Author: stu-k + Issue: "5942" + PR: "6187" diff --git a/.github/workflows/generate-cli-api-docs.yml b/.github/workflows/generate-cli-api-docs.yml new file mode 100644 index 00000000000..2364ea99fd6 --- /dev/null +++ b/.github/workflows/generate-cli-api-docs.yml @@ -0,0 +1,166 @@ +# **what?** +# On push, if anything in core/dbt/docs or core/dbt/cli has been +# created or modified, regenerate the CLI API docs using sphinx. + +# **why?** +# We watch for changes in core/dbt/cli because the CLI API docs rely on click +# and all supporting flags/params to be generated. We watch for changes in +# core/dbt/docs since any changes to sphinx configuration or any of the +# .rst files there could result in a differently build final index.html file. + +# **when?** +# Whenever a change has been pushed to a branch, and only if there is a diff +# between the PR branch and main's core/dbt/cli and or core/dbt/docs dirs. + +# TODO: add bot comment to PR informing contributor that the docs have been committed +# TODO: figure out why github action triggered pushes cause github to fail to report +# the status of jobs + +name: Generate CLI API docs + +on: + pull_request: + +permissions: + contents: write + pull-requests: write + +env: + CLI_DIR: ${{ github.workspace }}/core/dbt/cli + DOCS_DIR: ${{ github.workspace }}/core/dbt/docs + DOCS_BUILD_DIR: ${{ github.workspace }}/core/dbt/docs/build + +jobs: + check_gen: + name: check if generation needed + runs-on: ubuntu-latest + outputs: + cli_dir_changed: ${{ steps.check_cli.outputs.cli_dir_changed }} + docs_dir_changed: ${{ steps.check_docs.outputs.docs_dir_changed }} + + steps: + - name: "[DEBUG] print variables" + run: | + echo "env.CLI_DIR: ${{ env.CLI_DIR }}" + echo "env.DOCS_BUILD_DIR: ${{ env.DOCS_BUILD_DIR }}" + echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}" + echo ">>>>> git log" + git log --pretty=oneline | head -5 + + - name: git checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.head_ref }} + + - name: set shas + id: set_shas + run: | + THIS_SHA=$(git rev-parse @) + LAST_SHA=$(git rev-parse @~1) + + echo "this sha: $THIS_SHA" + echo "last sha: $LAST_SHA" + + echo "this_sha=$THIS_SHA" >> $GITHUB_OUTPUT + echo "last_sha=$LAST_SHA" >> $GITHUB_OUTPUT + + - name: check for changes in core/dbt/cli + id: check_cli + run: | + CLI_DIR_CHANGES=$(git diff \ + ${{ steps.set_shas.outputs.last_sha }} \ + ${{ steps.set_shas.outputs.this_sha }} \ + -- ${{ env.CLI_DIR }}) + + if [ -n "$CLI_DIR_CHANGES" ]; then + echo "changes found" + echo $CLI_DIR_CHANGES + echo "cli_dir_changed=true" >> $GITHUB_OUTPUT + exit 0 + fi + echo "cli_dir_changed=false" >> $GITHUB_OUTPUT + echo "no changes found" + + - name: check for changes in core/dbt/docs + id: check_docs + if: steps.check_cli.outputs.cli_dir_changed == 'false' + run: | + DOCS_DIR_CHANGES=$(git diff --name-only \ + ${{ steps.set_shas.outputs.last_sha }} \ + ${{ steps.set_shas.outputs.this_sha }} \ + -- ${{ env.DOCS_DIR }} ':!${{ env.DOCS_BUILD_DIR }}') + + DOCS_BUILD_DIR_CHANGES=$(git diff --name-only \ + ${{ steps.set_shas.outputs.last_sha }} \ + ${{ steps.set_shas.outputs.this_sha }} \ + -- ${{ env.DOCS_BUILD_DIR }}) + + if [ -n "$DOCS_DIR_CHANGES" ] && [ -z "$DOCS_BUILD_DIR_CHANGES" ]; then + echo "changes found" + echo $DOCS_DIR_CHANGES + echo "docs_dir_changed=true" >> $GITHUB_OUTPUT + exit 0 + fi + echo "docs_dir_changed=false" >> $GITHUB_OUTPUT + echo "no changes found" + + gen_docs: + name: generate docs + runs-on: ubuntu-latest + needs: [check_gen] + if: | + needs.check_gen.outputs.cli_dir_changed == 'true' + || needs.check_gen.outputs.docs_dir_changed == 'true' + + steps: + - name: "[DEBUG] print variables" + run: | + echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}" + echo "github head_ref: ${{ github.head_ref }}" + + - name: git checkout + uses: actions/checkout@v3 + with: + ref: ${{ github.head_ref }} + + - name: install python + uses: actions/setup-python@v4.3.0 + with: + python-version: 3.8 + + - name: install dev requirements + run: | + python3 -m venv env + source env/bin/activate + python -m pip install --upgrade pip + pip install -r requirements.txt -r dev-requirements.txt + + - name: generate docs + run: | + source env/bin/activate + cd ${{ env.DOCS_DIR }} + + echo "cleaning existing docs" + make clean + + echo "creating docs" + make html + + - name: debug + run: | + echo ">>>>> status" + git status + echo ">>>>> remotes" + git remote -v + echo ">>>>> branch" + git branch -v + echo ">>>>> log" + git log --pretty=oneline | head -5 + + - name: commit docs + run: | + git config user.name 'Github Build Bot' + git config user.email 'buildbot@fishtownanalytics.com' + git commit -am "Add generated CLI API docs" + git push -u origin ${{ github.head_ref }} diff --git a/.gitignore b/.gitignore index ac91d49c9c4..dc9996305d3 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ __pycache__/ env*/ dbt_env/ build/ +!core/dbt/docs/build develop-eggs/ dist/ downloads/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6877497ae37..ce9847cf454 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # Eventually the hooks described here will be run as tests before merging each PR. # TODO: remove global exclusion of tests when testing overhaul is complete -exclude: ^test/ +exclude: ^(test/|core/dbt/docs/build/) # Force all unspecified python hooks to run python 3.8 default_language_version: diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle new file mode 100644 index 0000000000000000000000000000000000000000..73d18c236adbabe5cec4cc557ff682bc63a5baf8 GIT binary patch literal 65160 zcmd6wd7LC?S;sfCyK{HXxmPwJ6P9BW*cnJrxrFTIBJO53lAxkl%IfZ#sp_q+>aAmU zC&>y?NwQcFAccy8w}Nv^B&d7rns`vtpSap9{w(aO0nI=#|rBuAQI&@A zLZ0o8og|eP<(yBPAoju_&QImp1TMmA`d;&NZf9fNC~P}Tz!@*D4K12scO&xFT@=@^ zdBQchM9HFH~qBbi0Z_N z)I=uTq~lwO>vV9H6Si+R>=>1}PqHFT?8IyK1lFBy+OV2&Y^`}dt`K9?WxIi*nk;Jf zHL59&R-CS_dFS(Nsu}t|u2|y8rcOpJCvsXhcwhjK&(5=5%TX+pC34~I4Y$ zII9I|2e-mn3!{#mNW~e8l`LHG?dq?kM+-`5hK_kQc9XZhEQ@w0cEd->_#NU^7ELz8 zmUE=xhqxEnV2Hss(!zMU=@AkoeOMnyZ7E5g<*1G`R=0b!ref8 zK?`15cc3Qc6U+Af@SL@l-gVc8l|*(BL&@ZgyO@pXHsVsV$&OSV3o8$b?*%RAycRVs z#*yXjopM(0`RaWeRl9DfUuN8J|ueeEW5rz?=H;TA$+Q%qmUdhe z*1H?)!g9t|oNUO;lIk??!Zi58JoN4?bW8)#*yl`}u&ZvFzPavY?&V4?X9^2pHDI)$ zTC1wOcG_{e);T*0a8r7dnTykI7i%&$HK+7odx22ib1a2UCk!mK6Kfsya>A2X(3aEn z{BXUvHL?f`sIO>Vf)chH3*TDvZH%q&Q@sm@cC-OKl4P@yE*6Vi1JeP#1prBRldrA{ zZJbqQeP;IpYEUuVv4buaQEa6V)~J#&9(YL?)_5boC>z!Fd&^r7;A(IcvUqW@7&meq zO0FwMm0>Dw+=8}Kuub^OYB^mP<>Qyg@!V_NYt_Iruwkx+1`Q_?E3N1~C(E5PfLK^M zJ=LkA2Nrh0W-{?(3kJqp+bD{rL{Ssk6(wuloTJ}iK_mx?; zTsV2Q^UMt}RTB4Nf4z1p!rL9rCJIX;P0>i(hT9pJ{uss&oZ#a@ANEn&awk>tE?3%V z!GF1S?8WdK6j z-G@c5c~fTftqoZ>sp`B0s|};bvME`ny)KNuEL1ZpE;)Bks$%6NF}m-?E?jBRTszO6 zTZdk zi`LP~DT?&$BB$rVRZ}&KZYY+W_U*F5eYE=+HKJlx1Q7#`GKB(GQLFh*B~)P(#R!7* zI_@=%;WI(_?*8#%F)XZdtj=N~EXwQb*t*<0=e5K-+M#->tC4mSt6A7xFYvtpVkJ3M z4XwA7J9(Fh5wwA(^h3NI`}899s!5o73O!^RSaTav}96C5%qhk@-wq)>&TdO^xm_Fvpl zuPKd|?5gU7FYh`C450+4p$>~WTrX_UvXGEx3vi|)0E7YWM%u@V$n#!34|P?2f*w%w zTa*(p<(8zBjlBCUx}%DQ`xQ=3PT#Dq?#DIj7hSY|=&*iq9J!DZis#2PO8>ie%IBnI15p~T4vnOIlPXTO?{ z2!PZ$=-Uy#x|ncJSGSru+))8V2L{Nx< z5Vi~dB`;2Ck@CC3trokEk>w@bSq-4)+B4u9fxc1+tlv3E*=pG0D#gYVCgPY34)k3S z(RfQ4F-*(cGcjNJJh zqS2|MQBM%GL4-E?ZWMj5L0nl@e(mP+V%wrs14{8+;qhmSaHceu~e&D9(oH((xdi2&Cm!5v( ztw(P;aoy1yZg}>Mx8MGZT#dZPbXA)@?BVSWEZK>pt2aOA=*{SN;>2~nw;V?6MW1Q0 z?LUu|vMyX-Hr{O^az=^JOkDbmsxG{U48+_!v+2UYEQ_Z7xre^2!YJak6z(m-CQAyN zRKm2+kVUhq2-zFxrnJf6AZre)8exRgUma}EU^D%+699Gp>_nUMu<3}vRrvwcBMG~{ zbJp=;S!5(C_K2&h#iD9493*d$rL zSC#kqrd|Xj>*rLxax2Qo78Is*m5kro`6WAcD3N*^DoE&CS5lsaIv7Fp&k(n>3H6fR zQTKqgL6F!yZFVhcpo78=shT2u>sN-MMsmjwBZj1l7XiB&qi!ULU|1-;9Q%MNkDN2e znIx9$*fM-C0M7L{QM+mEhGg%)s>JI!F#N?W?yC{GKd~J+zI&*X9W)UK3HvX_?KdOo z8UrL7dES+UDqm*WMOZGdy&d@HVg@%~7rjgFC6v$!ECd~nz{Fws^ylh|m9!*)9JG&L z(pxSy(ak8KR?g0)bj zRb+5jENe3_Y+eqwzknXu6(<8{30LbZ^##34;o4F!^H{spi<^!QUkLoMS?=@2FeR&d zwx}CkEJaQE+U(8UfXT*-?1m*^z6=VLvl1)cpxZ8Hjyjp!v*dO9G6ND8`5 zR|J{a*wx>7HRQDobBkzIvTFRPnvS}L+W?QB5(Tk;lXV+d~7 z16y2>{3~3;+#6wtxOPl9JzrS!39*av0uh5{<06HD6e>2}HJ#9f-9Tah$&kd2uud(R zG=wLXQ>r(0&JUMhoxK*!W%2%|%=%c!?Gy$=H_)DOU$fg);q7n^NJ^NK4N5UAimEL@ zpcWxgx{jHKr?HtVR*C4QDx@xnlz&lo0I6fIYax*s*dl>!HSFeTBrjvPxS}h&8%U!E zmhe4ka0Y%(Y`|Mqve7Maf%@9_K>e{89lwiUAZY1SA<_fLnHM!O*|5-iTQo#avD$v( z$c`Pt6)&3VmUC9*k3=Mr?RJ7ASarQ-`%N=FT#?|&E_tG9RyWPmcC9C%Z=BN&s3Ovv z;(S3IJIFod5hG;Q2tiYs6Dt!FIUSL@9IW4G`l{StVTmvHL>w?pI$nTaChAx|A{96f zgQbsJ42Q3D-96b8(lP`eahXfc!W}c0deF2K5x39`tV)r0!9q2& z>7(Ckrc#lojI6yzmb`TAGgPoWvy#TqO2Z3QTrW*L-*oAt)sw6G%=P*d6{gpc83^IC z+_l0e;QR*L;{>Z7kxLK7ER;$qsY#QGtgtMU`Z*^Y%3zahs=x(TqOGsoAjxJWdqh(y z9B8qmJj(LWVk63juciXN!$mf8FN2N18UWn%WhJ z{d|HIbo`QRVW31AP+V0uiYO9kbVL?=v${?T^xWUrZTbkVILC9x-3f6@0XH8u)v>^X z*xBe~jhChk_a>3>oYjJqpNp59`SFo#|K>s$=QcQUNW}E^ZLaimS_gH-foV?~&O?un z_mUVzvD7yq>%b;iZTqaPa#~^<3!7@iMopCO*;KBKV19h((D>pE4jnd6D^6OXcvQ*B zk3S{N7PWm6xiRop`J4P=Ha}2~y*hH~I5yH(g5WwE2N#i26EWuDtS zh(#tOiGBl;>RvFA({LME9UY|Ivd)VmO#w{=g>l*#ES)Pt#n|!JRA%8KGLSaTyW|E5 zfpNB1u2v7(o=9wQu_F}rqJ_&}=+D^~M}#5FGzz6<{B=A$3`)R*r+a`l-QTS-Uk{gA2=hh*;KVN%pl9JYhvNLL*K;5H*@ z*(nynL-`AH9P^Ir6TMd6ehnnHUx)u>MGYAa5trEhIeR0{A4Jm*fP=6A6$Ut74ur9XWswb3o=+&-L}Z`SSvS~+@urLC1`vrMG;9Bwlzs;ct(g5I z3IqAs7rZiFI(pkJn=-O5%*6u?*{C}G%iC{5@3>)SlYQ}?7;vX2LpX$KV&jG>;rx8J zGA{UQ97%HXMg%KzUwR4$(PN2?AO>f)?YBa7+K97BKZFkzdEedzt6_1r(sz&4g-kV4 z9Ji}1S?}`O?|>-xSo@uLDXw@zI9kFWz((rbVbGC z_U9^U+Fz`ykSy!h<4D#HT2Eb4nJlHR>Ut)3GO^314~b=?tyVq+ats4+6XY%_q7vlc zb`?n~$lWk&$g=kHyPA=oyj%CEF^Bj)cKiGuMHTTUP&cJMSE^g^e+_y}=cxuz-^zx% zen_D%Mje9vO;l25)!$4N!47+3O$1A8NL3Hu?y}*Y)ws>LL%=&!S_XKVDuNyO^cwuJ z2n-ROx&zD~Vq?BgFz+9F2>VN^%nbGyQ$?_2pU1{Yrdg5H(@!4`z<&!H`~w<($pnUO z!7o$!nOpD%stER5FphI0l_%-aS2a8Ps2>HbDFs8}Y({4h%hn{h3r|M#rB{6~T^u3fC?lhu%!xVWU2;ZA$;hL(m&kVg|ZJ z6~PW22~rWP;Ng*eEB*i*@xo>Hh8@EFLMkhR`@5+k*m3U?=ZFKSe~u0HlvZvzM3>A z!&}R5VB=jxCjBE1L4OjJn9=ViQbn*sA5#wx4ce4%X9Herd5TeoV1Fx>l!1K*RRlZi zU1!BJZ2ckN1vb(}Tq1`Y!h9E%k->b9DuNwzO+T<;$;V{}c>fq1??vq`2ge=)|7t2V z1OA7oBG|!~=1+c;4SD@g6Q8~g?$NvX;p547QArutZ>Nf2hh3dN`8*r(g+byfbu*;n ze@10yuz!Xsf*pHh{$yg5D=M5f)O(qmA?TwRew(4;PO1oY=w(s(C8|>L0?8+xmUQT6Ya6d>D!H&E7!H8dFW4@=yeBh~!At(7}Dm4TCjZ_ir z;LFeDKgkAtt_QvPWd37RS_b$ZQbn)>FFTq4DjV*Z9`35M`G2R9GO+)ZDuNyMB%Vyh zga2XRZw&0{uN>nFyRq5PUbAoLA(Qe5jK9se`yf>WJM_{G{S9oyjq9x3(jTR=GCKYY zst9)6)m!>?Hs;24YUUN2`WBU%0dG@9u!FDM)W3+0`=YTfeGg;|nV0*i%nbJLrHWw3 zUjA6bFR(E;5li)h5wD}tGQeL$6~PX?>l zAK6%&;IJxb^>dW-*;Y{VA^iC1j!KSyO|uz!{+f*pJ127hXT zD?pq#)T>YJ$1(gi!^7QF5$w>*wm{2lyla|w)duKsR8B^}4^u_3qaF?oznKm6lm=ZM z7_L$o8O+D2BG@r+5f~ga)^!dq~cs2o-AS5Z3f(zQ$?^-F7=W( zv4O4~^6*9jw*uv4wOA6 z{yaA7(~5f4GvePt#bii-8&w29Y4vk+1JTL7+@$rGLlMb$Q85|PcT+{Mlb%vnZQ)04 z2EWkYr`T}MDaGy|c!>FrQ-K-gKSCA3&b)MY|6VrU+G|!G)BhF~lwtiFR1xf~t9SQb zWCK306nyK)@c)8}%uxSxst9)KmAm`eG*@h>tqP!5@9rlt`ZnXkF{%i5=J=Hk_4^s} z7qbRj=M&hdFDO+X9D2x_AE81sx_&iP1UvWg&pzDFhPr`c`zMqQ8kba0Nf}Qkmrs{2MoF|n+ zmuISejf%)nehXCuJ7xJ}oSVN9{L^fp>xbk=jqxn9{JM_eZ?yVjDk%f|<5Ussu*<)| zV5G(s`^{+D)n8xmUsP5G_kU1Du;VV>Pd=KBcA?vq8_Gvw=xv7CS5QT;!>-;?K8ua` zg4XY?ca%3$nHe2FMis%1y>dr+nvMFrpomH&VK_{1UveY@8JASHsX_q zqBN-b{>@WVMh5e@Q$?_2E`8ePd)PovOVE`M`@De4$)LVK6~T_W;#XjQh7ERI!rtOH zV1JUz%HaMnst9)6)n9e`J8Z<~6!9&80rt14#0>P`q>5mNUY;fVD>mRY3A{Q-_ysB{ z1N-w-5$v$bo?V-n;fe>RBi@h{dlSf zcGT6W*yphER#toKMC`Mv#0>P4R1xgZD-*Hj*_h8tYXaugY1ov?%OH=aBG{3aKVkC2 zY`m4DtbV}c2dShC>{n1lu){8Sz~milq}5)s;#)O;oyy2y{#B|7cFbi*3xCSSS;Zq& z2Md2frDQ;Vk}857bmhUqu34_=Puc4&j}#QDK2T^=nHlWsR1xgh zciGYU;Ae7P%0_!Z&^{7&oybnSFt|q!J#>ihVk#?x`$bd{?6^xe?!U}NeD5aWm0S2X zP>C7nzd#ki4!wE{|3_@(50;a!_!f>opt3XgKSUM5j=yp<{}nd+mk!gf-qwGaO3uLl zN2&;R_+?w5%jUR(@;#flS8aw4UOm?j!#ksjVCP+y|9dkV?pcGo zDhK#RDk?+!^;8k;v`ces>g|>YU&+sJM)# zKaDDaop?#Ms>O!8FFLIFh=WZ9WmrFtDuSJLS;p#qHrRd9N>$eCd#R`l?R%*r*lCw$ zP+rG|yDwU)%%Z%83d`{RDXIu|-osgx-(`c1bDY9%muFJmN5y1Fzn3b4opi~L{_ogO zPaR6|d$o{jd5 zqP@izoLozVWpw)qR1xgF|EC|1dM+FI{R;fo^5aq8Ld9q9!0l8K?02C2Nx^&AxYrf; z>L&#^sHhC>vs4l6w97JuKgz~>TCuLm626Lx$&h{}RRlZf(k$V-*-&e*S(zbxCl!=o z{WhuycGlGy!q2e*pI5rQb$0NxRAh$wr>P>?saIwP$Mv*s#5BoAICPVrqR1xf? zE1w?x6*k=3TWsCz-<-umgm%Q5~o?e~iRcI^}wLJ(nQafuXRZ)X;t!Bn<6i%&`s z#95?`IGahrrWK?e%L$UmLtu6xZ{G!>$6pa=bx{{PO;k6a7^qT3Yugli z(Dc)m6p_uY;RokpD+#U0X{4Ut5}j-7e%L^nY#!$i=GmN*2b0W3Z&&Xg%CEXtw2^mrHJgsThVKRIM;qZ;CpYhpIX*rX zw3_YqTDV;NG1YRKzOBCa2XDt(VKcBhqRTGy7q2s3{A{Pc)x~VWYPz_h$O-aOxG2jG z!r*#Sk~Xrjupu_Df|z^L@f~S4(RA%#-HDwEB`+y|38oaFSa-EKbG6w@+EDw#e&n<>E2mPe{7RFoyy}h z8)=KFA8ubjSE$1nl7=a?1Z9fSu8y6!{Vi&)9b3+sRH6~>q-tJ_WpnrCMppZWrAQ@a zXM~@iFm<{xPJ|8~&O}OMJ?skDQlg&BsBbqMU(PQZUv~nipL0HMzaDbkV_7Zkx?XU8 z*$rP%a-}m%M9GGPL{ZeeUaN9=WhQZRn-0p`0 zPl|P8s7utE_kDT$ZRp*8yA*+UVv2g6cqb%l@%QDaxWC!%#EGEASQu$G>UA*NIM1f^ zT}u-W&}VOgNfN93Ie~ZP2Ctdm#=vSkm}9-Rd}*)p_D3N)tKATu3;fV-i5{_Pmd%He mx9$bDZ%Of18hIEln-HtU(xxh#!t%75VURfIg$kyP<^KjH(5{C7 literal 0 HcmV?d00001 diff --git a/core/dbt/docs/build/doctrees/index.doctree b/core/dbt/docs/build/doctrees/index.doctree new file mode 100644 index 0000000000000000000000000000000000000000..3acd417b911278b24a5d2810fb56f09df6f5612c GIT binary patch literal 87794 zcmeHw50D(yc_*O1tKHRKKwuCwMew4al}voZ0c;a2T*4(lI1+IPQKouldUs|uGd*-Vo;-?{4k51ex9DfnM!L)fa;nn!lJ zl}gjEcwx7*w&IuD4X+t>@9S>At9yI*;?7Xu&V+tDD0|(`DRf79AtoWg;3c`++wWI#=KLDJMmD z<2+zTW-AYPN8nHAy!7rL-uXbk`|s{19T|c7-W?LSW+4t+7+e~qLQW^i##qsx1j6P0WtS_*sncyVvR)K~g2)l&-)jVN*+t!Mhv=CkE!QPI_x|wCe;?mX-NPLYU8ELteXz^EULf+7 zBpdr`s&;?;Tz6bjD)k$qkYwgY4s|;nw=(UPNwRoYWKVx|3DAQMcR1c*uj$k#p-Vhk z{LqU#4Dqi{)jjMv6CT~+9ck5TW$G`_X1TCx7YCgqxXUtvns(1hwA)8-tl+B%MylYo zb$_bVa-*!Btqau%?v?H*{E6wcUss_+ZWYl9KRHzN8hT8h~Yv~z_E`FwqdG2D5 zzGUf<#@tB4oQgQ}_84U?!Uxmeh%snGQ1#>_(nEU8w zs}Q6JaR*CMMrbqtH*+xAhcCbu1ir=*?kH)(i`=pF9@vl+16P^x=79qNWR{YXu4q@D28PJe=61`AIX28c&mJMb)NI@j*I{*$|Q zVkRpM?vn9`qx>(HQPxDMy54CZV>PidH=HI{<}l@jYN!j-NMrxEQueaW_UW(~K$jPZ z5TQ#T8oCS$=(49ZQqsJ}n{?atxKA>-5HU8aQC~cn{tdInY%!pjtq)tqPE*^ae<~IgaMAT!4LWR_TYDn#}1DKeNM`+aj z#gsf6!FK5FaNIDN2A~;PY7f(ULNtVFgJo&WahtL%fkwFMrKbLMne7?MT*~&!u3-FlghaFh;YF(a1EY6w4ci5xK8LcS~U0W$Ajv)w?p(+ z+{gvTXfh%%Xw;f+MzP$G{eIBWZ;jn`Np+4{VP+qvjHS%3ikBZ!YSk6{`lQHLaQo-5 z$aAarR_5%$JS##3t3WhZ{cX$$Vx5I5Y|P$N4O1rE-=I1SQj-~#xKA@3#uL7EjA(tQ z>^JZPpn1foN_WywRyCa217RT{IQlPI#zIqyTAN8&W~sbJIZLIoK8$FQk!Jqm3Gp!1 zOm8x~|AxqYi02<+krz)-6EI)H@Qw%<5(2Ivp-X8w0MjHiGl$7F>O6$b#V9#Cu7N)j zZckeVv=S3;m<Mtt9Nb<9i#)f59{~qNh~O272CuW`bmLf3+!Ht3dEn~b z*z?H)b4_^OWqLq!l$sAGY|c~jW3Fk>LzWTM)Tc%ex^7s4M=5`)1lL5>z;kJ*jB3ua zYnI+;MQ%fScfuksy`f~5y@oA4YR#Hc81G&Y9)t%xLwLWFuK73HjR`MEx|DG_)d<3w zq1{hjy3$NflVm!w&+J-=M~rHG;_*e+`&d{Ij>p56@z7+W=H61SSu#&h-cre|q+K$Z zUXlxT_9>CG5X;wLkr#_!6^t3klm{XmN3B zr!#i6FTU1#u{0cCTZ&#&3ws>Dsb$m_Z(p`#O%t^me#L5JMZZoNOBFpz ze1^0}UMg3;@}V@oJ*lj?H$}cf<3EN)UgN?Z^}8ZmC>^+l(kDLI46*V^Gjz07Qx)53 zxDk~+8IJi)8j__OIkN7xxx5gI*|~Tu@NR9>HiL1>ZCvFwwml7Bm7$3Gfx$>vec&kP z?V76AtfV-LdhSVtG*xX$dhVtoSYBHRp|8C9?cprzyBT?FXOhf%I#=X1>8V<98)vp0 zmOk9mPBdeWOR=Ya85Vghi^dFBicq0spc+aZY-LLYWUx#)il4G9kEXlPSQWRW46)*F zro8(qZjNol=1Q00L`43(GTUZRB%ST0s&?OKJT`tBNl?vlxKr>rk zvy7dlwoRGZvISHY+q0CvRBY)KnF>&P$xc5navBo*udv8VO!V}7MT82O0o9P1Wco{$ zUSh%#`jMrt8m%JW2TUew5)d= zhLpP8OJLIvQ|?mo%-4q$gJvSr_?XCj(D+BN$kQmS&)*Q?f=J*RL@v@BJ}RJ#gwD); z+cJWh@J>&KXS{{R9qgA0LaIJ~DMJN{6WI4xMZQCIe+!Ge>I%;<{6vHZy#dkC+wxl) z(?7S2qNcfxspk5;r?F#L4y}#ABCj>kgKC=y6?y}zp|}1I zD(m+&F0u4T<8FP*UG7bdD=BknORG`_7VTR)5~Xi!;l-}Qv~vkBw59*kbK zjS+qd_yi#3w!kvOjL2`W8o?sZs;FMxEkXsSKs7iWY=uCzQ-fu~?B8cu9*zC1`z#=b#!X1I9MTigp5Dft=nTdTB+$G}=^ZFgjC~LCJ zI;TpOy-N+R5QJ2l#IeUwtit5 zJ56s}vWKAdwiZ7Hhu6sE#;@L}AaMe_zJ)TD^-nj#A}=)2bMOKYD#QjS%8%M1BJg_6QT zLa<)Sma)+ElFhO9zO@lj-cqroOOGlD=_NaSROB$^as(E6xriEuVY2B?N)B+H7b z^b!+}(3dQI)hNw|w7Ip&$0&0tr^ZfwRWu`weSbpaJJ|gyEb{CYZVa9gA%a;T8q6;5 z-j8-_dkep78BtAtS+Ar2{{2@8K`O#*zrGS+cFp2@UF0~#_Ybhhi?87R{kKJUkRI?1 z=`HNmdKIdL1>yMri)B1C;bf!s{0{yNYvrPua~H){%23utJslQ#xu~{k{vG@mi4Y+e zAR2;M-bHcJcTs%Uk~K}%Bk3rtXCK9G%39hG_UzVIb-X8)E9G91_h=Nag+*TF!fEE4 zMYzy9a1E`$&-d_8Sw=|H)ft(tjQ2Wtd_O}NeFerFb$Gje#hYkPrE~VZ$*ic`M1GT^ zif+l`xedlC{L&wM^hlk>1 z1v&+WUjMH(=>$A_O$CR-CTHRCaR4bb1LneU{8P&kX$ssJi{>_sITqj3lzm_E&9z4- zovu&2dFeHK{jA7qv}k_`ivihxQG^QifoiZn*y@N1$Y7ap6#w3`JR0|-3HK>OO#EAv zx0HBuEkza9Ol0TZ5jhVD{4ZGKB~W;nxnhXyMH^s&Xwbi87Bp0FmyAEm>n6)6Yogq2 zh_ZKiQL!`9*#GS!|Dnqd!eRhjenf4PGtA+g_vMP6c}zRw{MDr5#!LuQi6P*r+~2}fww z(pQbvk%ZRV?CCMeTFU60a}f86dZB@clsr$ir31HTj@z-=TnCgGF8eg$JIW5+QG#P=GbrJMNsK{H4M) z#+WK}MjE^Ci`<7S>#!I=mbZ%#Axj_{vRsV8p9rku#BB1Hkx)##MWon`5Hnj3Y# zNI6RhopaRripW)v`PZ<>lUaDw`6Ce`hynm(X33f+>7iKCJ%i2_!^aVH z-%$a5VK!a)(nbYH<;u5~auoH&e16?Fc=w=Vo(1PEb^6D z)FNLdLPbRes!@>#8%L-%daz8G!QGbS(Ih?sQS=#%?5C`y&9*u2P-Qg}+4oySzM~oc zEm-6gP6Cr|qAR6p1na;lo?vn9`d7ZJ0vZl#Rx+Z%EBcG%ErNT4@9x8N38oU2R zk^7Kk2Nna!@*xo-WC=t=mWweWP+?pQXlCni%h+jR+n9>2&tT-6l)Y4Io5Mr37QXU^ zFMZL6U88m@eG>QkiOceBQCXn8{{)M?@`N+ZFNtuWKj0erJ8wUgnRfrsmtSb}4}DZs z6(3&~X}7YYxaYf#WAU1$Pa3aVV_y4^!8<+viBP2Urh`i*|MZew|GCI@(D=V$k*87A zvspDlc@&=91&Kg4NR$j5Rp})r9HA|izG~zS$K>Y5nio>eQbOk(YhEI96=Yrvi#(Zy z$C{Um5J4mm4I-DvnkRj%xyO<uqVs z`H}VjW!+aWb1>KMTl0#c$rk>~Rye1`rBk*`nzd^J?i-v9sgXXu}? zjG(64t+8t7{0jXGgd!DidcaiLCsRiuu~5G+avehb9xU=g6*WAsi%=mspc<0v-|$$U zH}$rqM;e90F@?FqrcNETpv`Ybt#ziN@Pgl1Ih7Kbb3{}9Ryx8;!~_rpXa)TqkEC1FF7$N-uNMO8Raak!^24~ zr9<9qHeqh0vEv^TISyG|0gJpWM74RZ2o=-;bz!Kx+0qw{x{Yx?)`QOc)hK%@e+B0J zZxwkB-kPu&kheQTsNfB#25*B+*Q=+}V41M`@3Ab8#(mOx%)j~a5M?dp-JIT6Wi=Do z_dgc-4h8%nEb$U2odZ9(O`ec%(1KBE*XEA*JmxGtZ8yn+#V@S_RjggMEOgF zY0U4d&>3m${@;n*hb;dF76Zug4G|(_2}DDdi!t%8!nhdF%+@=WvD3tsJp4#(edhdE zt(R+Ulsj2wA^&9W3&CyS1ltI@a*$(}d-?CmZ{Nzg+qh z`)pVX@ps&B#~=7uGydxxY5Dln9R3cQZj0XKjnhNVt01l7O}g|!XZ-2b>)b7Vpa+?{ z!U=ZP0gmz0tJH_|I`$*IfG_FasXys;>Qi=2ex-ja-_lF+kN(tq+vkYlj9P{`8?|h% zuC%HRbKSz6Z@2VCQ|;!si8Q6gowmyfK-xy~*>|ceoWP!6E%F?#W5(05r1nvs2IVa!d!Cc<+9G#Ba{!Ax&4v4BpA{j3 zVjvn6FXS4mzZ#bp0o5-<1?9Q%dK&KpK_KGI_Dba--}!Yng1OYc`^&H zalR=+1d%{Ah+JOdJn3tk?^?2^NqQ)5qI%Xihc+BX(7eXU$N9vk;ivPQy~$kt)>EFM z9(LhXPIYEc>cX1znz7H9Vo#q7i+p_&?&BF3;i6Un*Qiw=%;uI0eHo$X?HrpcEq&B9 zG#X2Q&g@CP%f=nxy_C1KVdM|tsMekn*y;Tur_n&(2#Y+Sg?mP>2oY2Q(V%kq%Q(}% zWfV2Btxv?(=esy}Q07v#6__LcoXBTr?e}1j*P5tjb-xG|dIPGVx51{jRqY!r6Rxq3 zT9!v+UNtlMN%AKtZz=KSe7GvCnaIw6Q{+4(@E5SiOQ3Lb^SlTV^aIhLf5}WrtKcpf zf0)--ETgQ6a&yug9cPyO4FZrV)0h!gu`|-x|9=ws4_*Eg76a(=JrN>w2}DDei!oKL z!nhdF%+~NmioIwCjg#S<^(Fd|LIkp83!>DPyUkdycPD^}8pPE9B2a zzC+{x6Bc=m3#TYg-9)*FI;Frhl>Rq*ZP^D7I~$H z+pJB}W{q_6yEmByHYO?t1olg?7+7GJiEz=B0@n~&(MfGp$BN$0vDt0uqek)R3B^6E za6f84LGI8DFe zh1q-4CnPV9>fM&5(*!s~qPZ>Mj#YO*<=ariPvbQaF6aZUL7%ff@>`B~#4ESs51QbcCbb}p9F&~4OlH+{{u?cFdz817#*xW(yvwrKG2DmQ%tCsW8Gp)+$ASVmBj-NsmUx>{Iyms0jpOQ+X(d5XW`wWtfg9;_XXjG$Y1De8Wwq-iTW5v zMX1mkPz|m1?_*fMAbiZyBaOFp;4NuIwQZ;mP{vZ$3T&5nMC31c`!XyB)#i-4gq`*7I^^_en7k~ zLIn9hG{|2vn-f%UmyAEm>)V!5)+8BsSdOz*;?ym2T^idWROE~__I`-6mvxj^!(sqc zZWSRyl|VF9xfuHuR2UZnn%TO@GIpBQHsGO0TI;h>;xfuz+63}zSSmoAz;1s`T+j1b)~zCM!EzH8 zd6o-5;O-D1f?*&U3@_w6Q%VL43Bg*q$1)b0R^qABc-~j*4nIWsN`;bcmr?;pFWK2Y z7C8&4{2?szQV}%>Pl`|>7oZw)k!*!hrI(m+gr2qZRiiT=-sJX9y+j#H`4sG(`a6-| zVD@idk!QAWBk+a@5v&5yV0C%#l(Bc}9m|Mn+KYP`{dZ2SI$bWnY^Ri}Q`t3Fm{H1B z){$KYi@fj(?wmSDga_FH&yd~1Zfa7YT38T{|8~oGXrhTn=J|b7ms8Ht7HiIZQ&)>z zMH}^TSmdRm+NhhazW%y&K|)XB;OrA2LM%Wu#In3^>ZI?RI%vt7Ch4^pZT0M$YEZUP zJ@@R7Qgyp0mG#yZc@BjKu*fT1xR&`@5iWENTtnyY^IcQ-T1H6I)TUTd$JsOW2<6{b zT)dy=OnpC1$#2Cc;rrRnn&bV<>iQE=8Ax44_t>m-gQ+lWNs|jkeM*Wt{dHL6HLW+% zsv_x0jQE@g5o!jaq2~XvzujupXB{e78_Tv~y1l01mIHsMbD&xaol32YHCQ*8O;&m* zJg4EdTC}-F?Zu&!CTIMhqU92=+|muyCdQj};^ThlTF*Acbm*j{cD>?IwyuL`747W_ zXcx~8$D7*ej2&&vmeOrLS2*!Sz3tsTmh$kIOTGwnqSJ8-L8mNf?{&Ov$){HLHdEaj z&#KIAv0iy``E>&5TlwbNR+KJ9q+Q$ensxZ5$ZxdfKZZqK`J&PCyCPJmAE<`<2V3<~ z0U0b4j^dg#C?3I<%7zLui=`hV2%xXBc_7VS z$;BD8>3FFpm4)_Gk;^3QqQm9_ZzjAa#h(6WSmdjfsIeSAlkyOCxq)g_t^T_Pt=~*I z+tMRVHX|Ur&#MXBDeJz1nPc#0ZeLf;l$pr-xm4skn#P^5$a5&1)VoTAiv|+728)Zd zt6T+ikn$UwscuuOI^z`x9(Eq2{H4<4J?tt$9u%=0f~E6gbLz-Y7jTrw4!P=2g`)p z*`HXJN25LImgJ`--=(ajyqmL+X}3h>oy;h)@274hJ_~p9pn(62(&iOV_!+T5gb4P5 zXt2LzrT|rNmyAEm>o&_MYnn_tErxEm(=tx^OND97I;zkaY3%-GBKIN7UxCE{vfM2~ zge-w*$Z|2J163Fo1De_Tv}Noxv2Bc>dnC3#6Ob-tFVz}vWLFX51opco@*66fghgIu zqJGPa2o*{Ls-ZN?z3KP=m)|@k{-278Wi}kZyDf{N5ufx_`|NSQpYoQnzLFNpXVR&h z^v_oyd-iJ)czGL^3)dY#k?j$ z1gStYNL|P`3sePPNC+0nTb8lVgt9IXO3ycC-lL4AN=cWRR0PsX_IA}-#Js4-dm1eA zS`jt+TSTbP3s4QcNS2XQ=_Mu{p$ja1)%Z+?vAG4OODSh5p@P>JE*CkDX5d$0k*BtB zGq6X52wH(?(7OEkg7NyoO_mYW#Fz9X`@g^7QT9>=X74MgT9sXMm1&5)hVrIikyl>9 z_ZN38M<58R|{0A78Zo#f6Ou-nrf0MnEVS24^Y-p#mu=#{)os|XywbW$ZJKl zQuDvS@Pr5vY5}65mgPnAlfFp)j3sNDrj!16&)WD4l&@6KJ?|>0+TD}N)$#ix*CFxm z!6Gkl;ZW;!5iW!dTtn#Z^EVjYwv3P_s?Bj<{dmW|o_h9i)Wth5Hf66cq+i`lhpT;l zVNtE7oJAe?qB}6=IUIDW6nFY`SPU%Yi$u5(GjI(tpO6DRRb@XRc`=(Gwk(|{#Pz9s zawmlDrp%>HT!DQ!dqqB@S-Tb%c_oO3jyH=?p#z{AI_SU7X}u3;%F-i^yNwxl`C~$7 zD0?Y;^R7|fA@UjA-3E&Rxw}V%3hsbffII6o>IW@7(zx4@ahF@Cew=cbQdeM|`bm+~ zAn(s%F(7%*icmowPz~}1TVhr%?_im5ll!7&c{J*|+UD1)U!&}$+>6$#-w^o^75pPC z@+v4iz4ML;5efjJp@1c`Hmt<7Wc*=WuJ{14RCq>~qpa!jjI2pI&U*D0!jOv7SesVi zGt#&S=ZJ~`X`Tg(0i?NIgovO4(U9h1EE=mYE(SES^%2Y1X@Wal7hIn;>pcV^)f+ES zt0-{-`@T=)JCt@EEb>Yd^+6AcP@y)U8fvp#Y*wMLwC1=SvMhnd`KFBXKFifdDSs*9 z^Q>xqR^&3Mz7rOCstZ5w?iC?|W*{0gFXWP=(!xSQuwWjxjD;o`9-#J~%k~6iE>%mq zl&PYSUb4qei9Cj0z7C7LUPMj9b0Sn|2B?N+B+Hbl^b!+}(94#-YMk=mC%3x!I^`}U zHCszk<;t$v={H4AgVrCzB2R00A?U6nRX_HdX)_nqTsaw6Dzdq%!R94g@>L3xxA5 zJor-F={hX(6WWQLCUzV=I2mA zfklT$MgF2W{{vX$RUqoseNBW44FJ{9zzJS-_?D$d8gC;pZ}|m>7b$BgbJ|zPluor2 zR@*Bgufg75!y?a~unzx7gbV(FYw$P!=f+gQ=f9ET@iR-mG#1y#EcW{_*@|=JJZ`Qv zD>eG|R#Zv`^>mb5`XuW26PIL|z>0SZ!R%UC|pBBB|Z1j;c4;RbX3|7hb~eJx<06#op!d!SEvEL8fs|ok58#{+csv|sxc&vwXP~Bm#>D1yq>$7C#haUgxRNccK z9d1W=ct=|GTDca{fqQ8#{9&`jL1(tdEF-Atc5AHLK62x&@gE@+X+y+seyi%p3GDt= zBKINIkHR7^*1}Us*NYG#IUpL6TYeRM`WDM5YO))RW!L9b@I#chRBZDuvxg#wA+#1O z@p` zR1gPLgSf#~$5op-SSH-ge%rD<8tq9ZHNU+6GG#60U9`OZs>pXJ;BR4(S3u!s#7{(s zU>}GE`%7jyTM22&_`{m~xn-0!O>T;tqT?*Buh}LSrm>8!LT9A0`x_{GSwDLO76Zs~ zn+OrI1fn6!#aQ1~VO$JoX6q8m*lA+hn2N2>vig;jy|fYJRdf|0PGG-xi~L68_v^68 zt4!2y`LqZXN&~8)G|Po?6$(phj$6sH1RCE-2Q?Y;`GNNFDxdO}l0DCo`i#h3&>X=c zPjlgC+}$EXPz*$a;)PuARytTn2o}qIma)*pl1#n!E~h_A8B3M2K8*apD`6g~O_LSr z3`=h^d;hq|duZn?u*hpiIFTx4>AA-DmiS@NLlsY$yg~?4;?s>2O8V&~yZxHTZ4mtfSmcQo zJqz9vp@L?h8Z=8bMX1tCOgKWnu=G_UHW}UK_D~F;Cnr?!hWHlBUe=l12#Y+mh1>rN zM2Mgjhz708Z-^Uji0`tDs3yLoi`oAT@vA6%X_J!e+EKMCyXGphN8~jcm1|&;S6;zg zJU5B(pgZ6hx?9){HY!vL3&Qa)TgF3EO)@i+-@y}7)>6gHxr67Z$X96P2rTkiQLWVc zZ-^fgAwn%cG}N-ZgXg60;Q5jzYnrBq;{H|7?w!XdU#Xyb-UU~+yC;>a;}as+A@Q%m zA}?`aXZjftE`$zTL+J1GH^jed86iznn-ftT=OcHo57Ui)T&%OO0 z_Ol#G`g7GC)${hQY)@#poQqt_H1HV@$K6(KyyBPR&$zi}ww>(0vCyIWoZkDe{J#BQ zw_UxYx{mggt4cLb+@p4Uw8XmU7&8b zp))k)HSrNZnmVBi8o$xvJRK897sClmiSj-~m+RDJJE+suXylhm6K?qsekHh7F8>J(Ye$ePr3JsKlTB}s{8}X;{D9dw_6xt^cr<7)D zQMFXAyJ1K!6QVN65=H{AYklcDbsv$vaY`Ska+i?J&J*Rfjgs zO|+*dzx6Y2(4<(=9f9XIC}k)LXd?m&)%|>DeXZGQMj^CdYqmJgm{* z=bAs3yddy{Qq^r%>Rv!`hO6bM+u7i^BQuv(aW37?ww_;lvY6g{{wb$))+hP052^LJ z<_|r_osB3%NP((Si0V|PZfCUaHmBOOGlU{uq3hGUX1BA2Hm=a81xppLMfxC^S{&)s zWxqK|#iWSTDy3H7P1cTdtE)TfX+Mcy;-t=LH4@cmrQK-J?iVFI3T&KEBh3YLlpv_v z5T{Udn<41uq*my)D}L!n1CrWEJ2+@_M@hGgRYIF<4%fn3BGC<{N-cCJ>L`fva!pzx zsyjL(RG5&eKvGFbHR`s~WUWqxJJ{`tqi1OjUoyjgqqMJ~wNIA_Z z5@Nm9s72lDH;#4Ix4c$~g5ptKqzx+M`3O=l?NLJI=$uuJqE>jtu3eNI?rhD{o|WcK zKbYFp?VN!-V+ZKYQsmKjj8x;gowbxce$j{i6~%LXFj?-&9x0st2h;2^*#gNP@`9Z; zFPe-osM_zUMvXdw4bfZ-)uKvN?e1OM*+!adl(OoX6`&Nhppavojp1xFa*vd%wW(^I z{*iuH)_q8SBy6};(Qa*1y$&f*TjJ~FvQ+oWt_tn8a@8e&i1?(wQIz8llJ%i>iy+pM zcM)yT@l-o-jMU?oYt2d0K-w_S`5@&eY)|yuS8G7LII}XYjVt3;i8Rxg@G4Y=XUT=C z=S2i^Mw#w9RPq`$K~-`q6`HX^3-b96ok$!%60lWkGHKS1byj%I!`&O&6P>k5b3*l( zq?m29Nd|eba5j-sB&`UWs+uIT$&lL6&SoRE={5+8k}0>z>|!;lP$f=ZgT&7UZ` zX!-B^ZdV;Lktt<8 literal 0 HcmV?d00001 diff --git a/core/dbt/docs/build/html/.buildinfo b/core/dbt/docs/build/html/.buildinfo new file mode 100644 index 00000000000..39803f13c3e --- /dev/null +++ b/core/dbt/docs/build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 1ee31fc16e025fb98598189ba2cb5fcb +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/core/dbt/docs/build/html/_sources/index.rst.txt b/core/dbt/docs/build/html/_sources/index.rst.txt new file mode 100644 index 00000000000..d5e3c6007af --- /dev/null +++ b/core/dbt/docs/build/html/_sources/index.rst.txt @@ -0,0 +1,4 @@ +dbt-core's API documentation +============================ + +.. dbt_click:: dbt.cli.main:cli diff --git a/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js b/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 00000000000..8549469dc29 --- /dev/null +++ b/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,134 @@ +/* + * _sphinx_javascript_frameworks_compat.js + * ~~~~~~~~~~ + * + * Compatability shim for jQuery and underscores.js. + * + * WILL BE REMOVED IN Sphinx 6.0 + * xref RemovedInSphinx60Warning + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/core/dbt/docs/build/html/_static/alabaster.css b/core/dbt/docs/build/html/_static/alabaster.css new file mode 100644 index 00000000000..0eddaeb07d1 --- /dev/null +++ b/core/dbt/docs/build/html/_static/alabaster.css @@ -0,0 +1,701 @@ +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: Georgia, serif; + font-size: 17px; + background-color: #fff; + color: #000; + margin: 0; + padding: 0; +} + + +div.document { + width: 940px; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 220px; +} + +div.sphinxsidebar { + width: 220px; + font-size: 14px; + line-height: 1.5; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #fff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +div.body > .section { + text-align: left; +} + +div.footer { + width: 940px; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +p.caption { + font-family: inherit; + font-size: inherit; +} + + +div.relations { + display: none; +} + + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0; + margin: -10px 0 0 0px; + text-align: center; +} + +div.sphinxsidebarwrapper h1.logo { + margin-top: -10px; + text-align: center; + margin-bottom: 5px; + text-align: left; +} + +div.sphinxsidebarwrapper h1.logo-name { + margin-top: 0px; +} + +div.sphinxsidebarwrapper p.blurb { + margin-top: 0; + font-style: normal; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: Georgia, serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar ul li.toctree-l1 > a { + font-size: 120%; +} + +div.sphinxsidebar ul li.toctree-l2 > a { + font-size: 110%; +} + +div.sphinxsidebar input { + border: 1px solid #CCC; + font-family: Georgia, serif; + font-size: 1em; +} + +div.sphinxsidebar hr { + border: none; + height: 1px; + color: #AAA; + background: #AAA; + + text-align: left; + margin-left: 0; + width: 50%; +} + +div.sphinxsidebar .badge { + border-bottom: none; +} + +div.sphinxsidebar .badge:hover { + border-bottom: none; +} + +/* To address an issue with donation coming after search */ +div.sphinxsidebar h3.donation { + margin-top: 10px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: Georgia, serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #DDD; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #EAEAEA; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + margin: 20px 0px; + padding: 10px 30px; + background-color: #EEE; + border: 1px solid #CCC; +} + +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fafafa; +} + +div.admonition p.admonition-title { + font-family: Georgia, serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: #fff; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.note { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.seealso { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.topic { + background-color: #EEE; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt, code { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +.hll { + background-color: #FFC; + margin: 0 -12px; + padding: 0 12px; + display: block; +} + +img.screenshot { +} + +tt.descname, tt.descclassname, code.descname, code.descclassname { + font-size: 0.95em; +} + +tt.descname, code.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #EEE; + background: #FDFDFD; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.field-list p { + margin-bottom: 0.8em; +} + +/* Cloned from + * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 + */ +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +table.footnote td.label { + width: .1px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #EEE; + padding: 7px 30px; + margin: 15px 0px; + line-height: 1.3em; +} + +div.viewcode-block:target { + background: #ffd; +} + +dl pre, blockquote pre, li pre { + margin-left: 0; + padding-left: 30px; +} + +tt, code { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, code.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fff; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt, a:hover code { + background: #EEE; +} + + +@media screen and (max-width: 870px) { + + div.sphinxsidebar { + display: none; + } + + div.document { + width: 100%; + + } + + div.documentwrapper { + margin-left: 0; + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + } + + div.bodywrapper { + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + margin-left: 0; + } + + ul { + margin-left: 0; + } + + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .bodywrapper { + margin: 0; + } + + .footer { + width: auto; + } + + .github { + display: none; + } + + + +} + + + +@media screen and (max-width: 875px) { + + body { + margin: 0; + padding: 20px 30px; + } + + div.documentwrapper { + float: none; + background: #fff; + } + + div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: #FFF; + } + + div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, + div.sphinxsidebar h3 a { + color: #fff; + } + + div.sphinxsidebar a { + color: #AAA; + } + + div.sphinxsidebar p.logo { + display: none; + } + + div.document { + width: 100%; + margin: 0; + } + + div.footer { + display: none; + } + + div.bodywrapper { + margin: 0; + } + + div.body { + min-height: 0; + padding: 0; + } + + .rtd_doc_footer { + display: none; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .footer { + width: auto; + } + + .github { + display: none; + } +} + + +/* misc. */ + +.revsys-inline { + display: none!important; +} + +/* Make nested-list/multi-paragraph items look better in Releases changelog + * pages. Without this, docutils' magical list fuckery causes inconsistent + * formatting between different release sub-lists. + */ +div#changelog > div.section > ul > li > p:only-child { + margin-bottom: 0; +} + +/* Hide fugly table cell borders in ..bibliography:: directive output */ +table.docutils.citation, table.docutils.citation td, table.docutils.citation th { + border: none; + /* Below needed in some edge cases; if not applied, bottom shadows appear */ + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + + +/* relbar */ + +.related { + line-height: 30px; + width: 100%; + font-size: 0.9rem; +} + +.related.top { + border-bottom: 1px solid #EEE; + margin-bottom: 20px; +} + +.related.bottom { + border-top: 1px solid #EEE; +} + +.related ul { + padding: 0; + margin: 0; + list-style: none; +} + +.related li { + display: inline; +} + +nav#rellinks { + float: right; +} + +nav#rellinks li+li:before { + content: "|"; +} + +nav#breadcrumbs li+li:before { + content: "\00BB"; +} + +/* Hide certain items when printing */ +@media print { + div.related { + display: none; + } +} \ No newline at end of file diff --git a/core/dbt/docs/build/html/_static/basic.css b/core/dbt/docs/build/html/_static/basic.css new file mode 100644 index 00000000000..4e9a9f1faca --- /dev/null +++ b/core/dbt/docs/build/html/_static/basic.css @@ -0,0 +1,900 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/core/dbt/docs/build/html/_static/custom.css b/core/dbt/docs/build/html/_static/custom.css new file mode 100644 index 00000000000..2a924f1d6a8 --- /dev/null +++ b/core/dbt/docs/build/html/_static/custom.css @@ -0,0 +1 @@ +/* This file intentionally left blank. */ diff --git a/core/dbt/docs/build/html/_static/doctools.js b/core/dbt/docs/build/html/_static/doctools.js new file mode 100644 index 00000000000..527b876ca63 --- /dev/null +++ b/core/dbt/docs/build/html/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/core/dbt/docs/build/html/_static/documentation_options.js b/core/dbt/docs/build/html/_static/documentation_options.js new file mode 100644 index 00000000000..b57ae3b8393 --- /dev/null +++ b/core/dbt/docs/build/html/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/core/dbt/docs/build/html/_static/file.png b/core/dbt/docs/build/html/_static/file.png new file mode 100644 index 0000000000000000000000000000000000000000..a858a410e4faa62ce324d814e4b816fff83a6fb3 GIT binary patch literal 286 zcmV+(0pb3MP)s`hMrGg#P~ix$^RISR_I47Y|r1 z_CyJOe}D1){SET-^Amu_i71Lt6eYfZjRyw@I6OQAIXXHDfiX^GbOlHe=Ae4>0m)d(f|Me07*qoM6N<$f}vM^LjV8( literal 0 HcmV?d00001 diff --git a/core/dbt/docs/build/html/_static/jquery-3.6.0.js b/core/dbt/docs/build/html/_static/jquery-3.6.0.js new file mode 100644 index 00000000000..fc6c299b73e --- /dev/null +++ b/core/dbt/docs/build/html/_static/jquery-3.6.0.js @@ -0,0 +1,10881 @@ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.6.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.6 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2021-02-16 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +} +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the primary Deferred + primary = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + primary.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( primary.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return primary.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); + } + + return primary.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
    " ], + col: [ 2, "", "
    " ], + tr: [ 2, "", "
    " ], + td: [ 3, "", "
    " ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + + +
    +
    +
    + + +
    + + +

    Index

    + +
    + +
    + + +
    + +
    +
    + +
    +
    + + + + + + + \ No newline at end of file diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html new file mode 100644 index 00000000000..d4238bb08c3 --- /dev/null +++ b/core/dbt/docs/build/html/index.html @@ -0,0 +1,855 @@ + + + + + + + + + dbt-core’s API documentation — dbt-core documentation + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + +
    + +
    +

    dbt-core’s API documentation

    +
    +

    Command: build

    +
    +

    defer

    +

    Type: boolean

    +

    If set, defer to the state variable for resolving unselected nodes.

    +
    +
    +

    exclude

    +

    Type: string

    +

    Specify the nodes to exclude.

    +
    +
    +

    fail_fast

    +

    Type: boolean

    +

    Stop execution on first failure.

    +
    +
    +

    full_refresh

    +

    Type: boolean

    +

    If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

    +
    +
    +

    indirect_selection

    +

    Type: choice: [‘eager’, ‘cautious’]

    +

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    +
    +
    +

    log_path

    +

    Type: path

    +

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    +
    +
    +

    models

    +

    Type: string

    +

    Specify the nodes to include.

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    selector

    +

    Type: string

    +

    The selector name to use, as defined in selectors.yml

    +
    +
    +

    show

    +

    Type: boolean

    +

    Show a sample of the loaded data in the terminal

    +
    +
    +

    state

    +

    Type: path

    +

    If set, use the given directory as the source for json files to compare with this project.

    +
    +
    +

    store_failures

    +

    Type: boolean

    +

    Store test results (failing rows) in the database

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    target_path

    +

    Type: path

    +

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    +
    +
    +

    threads

    +

    Type: int

    +

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +
    +

    version_check

    +

    Type: boolean

    +

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    +
    +

    Command: clean

    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +

    Command: compile

    +
    +

    defer

    +

    Type: boolean

    +

    If set, defer to the state variable for resolving unselected nodes.

    +
    +
    +

    exclude

    +

    Type: string

    +

    Specify the nodes to exclude.

    +
    +
    +

    full_refresh

    +

    Type: boolean

    +

    If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

    +
    +
    +

    log_path

    +

    Type: path

    +

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    +
    +
    +

    models

    +

    Type: string

    +

    Specify the nodes to include.

    +
    +
    +

    parse_only

    +

    Type: boolean

    +

    TODO: No help text currently available

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    selector

    +

    Type: string

    +

    The selector name to use, as defined in selectors.yml

    +
    +
    +

    state

    +

    Type: path

    +

    If set, use the given directory as the source for json files to compare with this project.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    target_path

    +

    Type: path

    +

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    +
    +
    +

    threads

    +

    Type: int

    +

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +
    +

    version_check

    +

    Type: boolean

    +

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    +
    +

    Command: debug

    +
    +

    config_dir

    +

    Type: string

    +

    If specified, DBT will show path information for this project

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +
    +

    version_check

    +

    Type: boolean

    +

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    +
    +

    Command: deps

    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +

    Command: docs

    +

    Command: init

    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    skip_profile_setup

    +

    Type: boolean

    +

    Skip interative profile setup.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +

    Command: list

    +
    +

    exclude

    +

    Type: string

    +

    Specify the nodes to exclude.

    +
    +
    +

    indirect_selection

    +

    Type: choice: [‘eager’, ‘cautious’]

    +

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    +
    +
    +

    models

    +

    Type: string

    +

    Specify the nodes to include.

    +
    +
    +

    output

    +

    Type: choice: [‘json’, ‘name’, ‘path’, ‘selector’]

    +

    TODO: No current help text

    +
    +
    +

    output_keys

    +

    Type: string

    +

    TODO: No current help text

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    resource_type

    +

    Type: choice: [‘metric’, ‘source’, ‘analysis’, ‘model’, ‘test’, ‘exposure’, ‘snapshot’, ‘seed’, ‘default’, ‘all’]

    +

    TODO: No current help text

    +
    +
    +

    selector

    +

    Type: string

    +

    The selector name to use, as defined in selectors.yml

    +
    +
    +

    state

    +

    Type: path

    +

    If set, use the given directory as the source for json files to compare with this project.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +

    Command: parse

    +
    +

    compile

    +

    Type: boolean

    +

    TODO: No help text currently available

    +
    +
    +

    log_path

    +

    Type: path

    +

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    target_path

    +

    Type: path

    +

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    +
    +
    +

    threads

    +

    Type: int

    +

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +
    +

    version_check

    +

    Type: boolean

    +

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    +
    +
    +

    write_manifest

    +

    Type: boolean

    +

    TODO: No help text currently available

    +
    +

    Command: run

    +
    +

    defer

    +

    Type: boolean

    +

    If set, defer to the state variable for resolving unselected nodes.

    +
    +
    +

    exclude

    +

    Type: string

    +

    Specify the nodes to exclude.

    +
    +
    +

    fail_fast

    +

    Type: boolean

    +

    Stop execution on first failure.

    +
    +
    +

    full_refresh

    +

    Type: boolean

    +

    If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

    +
    +
    +

    log_path

    +

    Type: path

    +

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    +
    +
    +

    models

    +

    Type: string

    +

    Specify the nodes to include.

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    selector

    +

    Type: string

    +

    The selector name to use, as defined in selectors.yml

    +
    +
    +

    state

    +

    Type: path

    +

    If set, use the given directory as the source for json files to compare with this project.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    target_path

    +

    Type: path

    +

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    +
    +
    +

    threads

    +

    Type: int

    +

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +
    +

    version_check

    +

    Type: boolean

    +

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    +
    +

    Command: run_operation

    +
    +

    args

    +

    Type: YAML

    +

    Supply arguments to the macro. This dictionary will be mapped to the keyword arguments defined in the selected macro. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +

    Command: seed

    +
    +

    exclude

    +

    Type: string

    +

    Specify the nodes to exclude.

    +
    +
    +

    full_refresh

    +

    Type: boolean

    +

    If specified, dbt will drop incremental models and fully-recalculate the incremental table from the model definition.

    +
    +
    +

    log_path

    +

    Type: path

    +

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    +
    +
    +

    models

    +

    Type: string

    +

    Specify the nodes to include.

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    selector

    +

    Type: string

    +

    The selector name to use, as defined in selectors.yml

    +
    +
    +

    show

    +

    Type: boolean

    +

    Show a sample of the loaded data in the terminal

    +
    +
    +

    state

    +

    Type: path

    +

    If set, use the given directory as the source for json files to compare with this project.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    target_path

    +

    Type: path

    +

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    +
    +
    +

    threads

    +

    Type: int

    +

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +
    +

    version_check

    +

    Type: boolean

    +

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    +
    +

    Command: snapshot

    +
    +

    defer

    +

    Type: boolean

    +

    If set, defer to the state variable for resolving unselected nodes.

    +
    +
    +

    exclude

    +

    Type: string

    +

    Specify the nodes to exclude.

    +
    +
    +

    models

    +

    Type: string

    +

    Specify the nodes to include.

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    selector

    +

    Type: string

    +

    The selector name to use, as defined in selectors.yml

    +
    +
    +

    state

    +

    Type: path

    +

    If set, use the given directory as the source for json files to compare with this project.

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    threads

    +

    Type: int

    +

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +

    Command: source

    +

    Command: test

    +
    +

    defer

    +

    Type: boolean

    +

    If set, defer to the state variable for resolving unselected nodes.

    +
    +
    +

    exclude

    +

    Type: string

    +

    Specify the nodes to exclude.

    +
    +
    +

    fail_fast

    +

    Type: boolean

    +

    Stop execution on first failure.

    +
    +
    +

    indirect_selection

    +

    Type: choice: [‘eager’, ‘cautious’]

    +

    Select all tests that are adjacent to selected resources, even if they those resources have been explicitly selected.

    +
    +
    +

    log_path

    +

    Type: path

    +

    Configure the ‘log-path’. Only applies this setting for the current run. Overrides the ‘DBT_LOG_PATH’ if it is set.

    +
    +
    +

    models

    +

    Type: string

    +

    Specify the nodes to include.

    +
    +
    +

    profile

    +

    Type: string

    +

    Which profile to load. Overrides setting in dbt_project.yml.

    +
    +
    +

    profiles_dir

    +

    Type: path

    +

    Which directory to look in for the profiles.yml file. If not set, dbt will look in the current working directory first, then HOME/.dbt/

    +
    +
    +

    project_dir

    +

    Type: path

    +

    Which directory to look in for the dbt_project.yml file. Default is the current working directory and its parents.

    +
    +
    +

    selector

    +

    Type: string

    +

    The selector name to use, as defined in selectors.yml

    +
    +
    +

    state

    +

    Type: path

    +

    If set, use the given directory as the source for json files to compare with this project.

    +
    +
    +

    store_failures

    +

    Type: boolean

    +

    Store test results (failing rows) in the database

    +
    +
    +

    target

    +

    Type: string

    +

    Which target to load for the given profile

    +
    +
    +

    target_path

    +

    Type: path

    +

    Configure the ‘target-path’. Only applies this setting for the current run. Overrides the ‘DBT_TARGET_PATH’ if it is set.

    +
    +
    +

    threads

    +

    Type: int

    +

    Specify number of threads to use while executing models. Overrides settings in profiles.yml.

    +
    +
    +

    vars

    +

    Type: YAML

    +

    Supply variables to the project. This argument overrides variables defined in your dbt_project.yml file. This argument should be a YAML string, eg. ‘{my_variable: my_value}’

    +
    +
    +

    version_check

    +

    Type: boolean

    +

    Ensure dbt’s version matches the one specified in the dbt_project.yml file (‘require-dbt-version’)

    +
    +
    +
    + + +
    + +
    +
    + +
    +
    + + + + + + + \ No newline at end of file diff --git a/core/dbt/docs/build/html/objects.inv b/core/dbt/docs/build/html/objects.inv new file mode 100644 index 0000000000000000000000000000000000000000..e46f393260842c5dac58a56bf83b50a2e3e25f20 GIT binary patch literal 250 zcmY#Z2rkIT%&Sny%qvUHE6FdaR47X=D$dN$Q!wIERtPA{&q_@$u~JA$D$z~OFG>Z9 zg+b)46oBlIj8ui9)ZE0(yp+@;h5R&yl8nq^g|y6^R0SZNoS$1zlv-SznxarzoSB!d zP?eLJq{mfp%iI6#=`-G*YXda&JU5>5^zk|yc)>KpqrK}@>XawXG%A{A#Bii){yeGR z>Y)`h$*W}Y=k6eR(=$3@1w0=@7_>Cc`kvMH)H!+P{Nqz+{SPL2Ixf}oVM^-Cuk*_E u)4F1%63KLnNz2>l)8~Lv@#Sh_s_6@|S~?uRusA + + + + + + Search — dbt-core documentation + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + + +
    + +

    Search

    + + + + +

    + Searching for multiple words only shows matches that contain + all words. +

    + + +
    + + + +
    + + + +
    + +
    + + +
    + +
    +
    + +
    +
    + + + + + + + \ No newline at end of file diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js new file mode 100644 index 00000000000..25dd9fd3af5 --- /dev/null +++ b/core/dbt/docs/build/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "from": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "select": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "those": 0, "have": 0, "been": 0, "explicitli": 0, "path": 0, "configur": 0, "log": 0, "onli": 0, "appli": 0, "thi": 0, "current": 0, "overrid": 0, "dbt_log_path": 0, "i": 0, "includ": 0, "which": 0, "load": 0, "dbt_project": 0, "yml": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "The": 0, "name": 0, "us": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "project": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "command": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "log_path": 0, "model": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "log_path": [[0, "build|log_path"], [0, "compile|log_path"], [0, "parse|log_path"], [0, "run|log_path"], [0, "seed|log_path"], [0, "test|log_path"]], "models": [[0, "build|models"], [0, "compile|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/core/dbt/docs/source/conf.py b/core/dbt/docs/source/conf.py index 17ff44e41a0..d9962bbfc8b 100644 --- a/core/dbt/docs/source/conf.py +++ b/core/dbt/docs/source/conf.py @@ -7,7 +7,7 @@ # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html -sys.path.insert(0, os.path.abspath("../..")) +sys.path.insert(0, os.path.abspath("../../..")) sys.path.insert(0, os.path.abspath("./_ext")) # -- Project information ----------------------------------------------------- From 47c2edb42a331ab71c840dc7e2e5169b8cb6b490 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Tue, 29 Nov 2022 12:25:36 -0800 Subject: [PATCH 041/156] Ct 1518/convert 063 relation names tests (#6304) * Convert old test. Add documentation. Adapt and reenable previously skipped test. * Convert test and adapt and comment for current standards. * Remove old versions of tests. Co-authored-by: Mila Page --- ..._characters_incremental_abcdefghijklmn.sql | 9 -- ...characters_abcdefghijklmnopqrstuvwxyz0.sql | 8 -- ...abcdefghijklmnopqrstuvwxyz012345678901.sql | 8 -- ...bcdefghijklmnopqrstuvwxyz0123456789012.sql | 8 -- .../063_relation_name_tests/seeds/seed.csv | 4 - .../test_relation_name.py | 74 ----------- .../relation_names/test_relation_name.py | 124 ++++++++++++++++++ 7 files changed, 124 insertions(+), 111 deletions(-) delete mode 100644 test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql delete mode 100644 test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql delete mode 100644 test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql delete mode 100644 test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql delete mode 100644 test/integration/063_relation_name_tests/seeds/seed.csv delete mode 100644 test/integration/063_relation_name_tests/test_relation_name.py create mode 100644 tests/functional/relation_names/test_relation_name.py diff --git a/test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql b/test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql deleted file mode 100644 index 0f6028e5306..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_51_characters_incremental_abcdefghijklmn.sql +++ /dev/null @@ -1,9 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "unique_key": "col_A", - "materialized": "incremental" - }) -}} diff --git a/test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql b/test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql deleted file mode 100644 index 3f6bdab0112..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "materialized": "table" - }) -}} diff --git a/test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql b/test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql deleted file mode 100644 index 3f6bdab0112..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "materialized": "table" - }) -}} diff --git a/test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql b/test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql deleted file mode 100644 index 3f6bdab0112..00000000000 --- a/test/integration/063_relation_name_tests/models/my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012.sql +++ /dev/null @@ -1,8 +0,0 @@ - -select * from {{ this.schema }}.seed - -{{ - config({ - "materialized": "table" - }) -}} diff --git a/test/integration/063_relation_name_tests/seeds/seed.csv b/test/integration/063_relation_name_tests/seeds/seed.csv deleted file mode 100644 index d4a1e26eed2..00000000000 --- a/test/integration/063_relation_name_tests/seeds/seed.csv +++ /dev/null @@ -1,4 +0,0 @@ -col_A,col_B -1,2 -3,4 -5,6 diff --git a/test/integration/063_relation_name_tests/test_relation_name.py b/test/integration/063_relation_name_tests/test_relation_name.py deleted file mode 100644 index df81b57f69b..00000000000 --- a/test/integration/063_relation_name_tests/test_relation_name.py +++ /dev/null @@ -1,74 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -from pytest import mark - - -class TestAdapterDDL(DBTIntegrationTest): - def setUp(self): - DBTIntegrationTest.setUp(self) - self.run_dbt(["seed"]) - - @property - def schema(self): - return "adapter_ddl_063" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - "config-version": 2, - "seeds": { - "quote_columns": False, - }, - } - - # 63 characters is the character limit for a table name in a postgres database - # (assuming compiled without changes from source) - @use_profile("postgres") - def test_postgres_name_longer_than_63_fails(self): - self.run_dbt( - [ - "run", - "-m", - "my_name_is_64_characters_abcdefghijklmnopqrstuvwxyz0123456789012", - ], - expect_pass=False, - ) - - @mark.skip( - reason="Backup table generation currently adds 12 characters to the relation name, meaning the current name limit is 51." - ) - @use_profile("postgres") - def test_postgres_name_shorter_or_equal_to_63_passes(self): - self.run_dbt( - [ - "run", - "-m", - "my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0" - "my_name_is_63_characters_abcdefghijklmnopqrstuvwxyz012345678901", - ], - expect_pass=True, - ) - - @use_profile("postgres") - def test_postgres_long_name_passes_when_temp_tables_are_generated(self): - self.run_dbt( - [ - "run", - "-m", - "my_name_is_51_characters_incremental_abcdefghijklmn", - ], - expect_pass=True, - ) - - # Run again to trigger incremental materialization - self.run_dbt( - [ - "run", - "-m", - "my_name_is_51_characters_incremental_abcdefghijklmn", - ], - expect_pass=True, - ) diff --git a/tests/functional/relation_names/test_relation_name.py b/tests/functional/relation_names/test_relation_name.py new file mode 100644 index 00000000000..5d941d96da5 --- /dev/null +++ b/tests/functional/relation_names/test_relation_name.py @@ -0,0 +1,124 @@ +import pytest + +from dbt.contracts.results import RunStatus +from dbt.tests.util import run_dbt + +# Test coverage: A relation is a name for a database entity, i.e. a table or view. Every relation has +# a name. These tests verify the default Postgres rules for relation names are followed. Adapters +# may override connection rules and thus may have their own tests. + +seeds__seed = """col_A,col_B +1,2 +3,4 +5,6 +""" + +models__basic_incremental = """ +select * from {{ this.schema }}.seed + +{{ + config({ + "unique_key": "col_A", + "materialized": "incremental" + }) +}} +""" + +models__basic_table = """ +select * from {{ this.schema }}.seed + +{{ + config({ + "materialized": "table" + }) +}} +""" + + +class TestGeneratedDDLNameRules: + @classmethod + def setup_class(self): + self.incremental_filename = "my_name_is_51_characters_incremental_abcdefghijklmn" + # length is 63 + self.max_length_filename = "my_name_is_max_length_chars_abcdefghijklmnopqrstuvwxyz123456789" + # length is 64 + self.over_max_length_filename = "my_name_is_one_over_max_length_chats_abcdefghijklmnopqrstuvwxyz1" + + self.filename_for_backup_file = "my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0" + + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project): + run_dbt(["seed"]) + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": seeds__seed} + + @pytest.fixture(scope="class") + def models(self): + return { + f"{self.incremental_filename}.sql": + models__basic_incremental, + f"{self.filename_for_backup_file}.sql": + models__basic_table, + f"{self.max_length_filename}.sql": + models__basic_table, + f"{self.over_max_length_filename}.sql": + models__basic_table, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + } + + # Backup table name generation: + # 1. for len(relation name) <= 51, backfills + # 2. for len(relation name) > 51 characters, overwrites + # the last 12 characters with __dbt_backup + def test_name_shorter_or_equal_to_63_passes(self, project): + run_dbt( + [ + "run", + "-s", + f"{self.max_length_filename}", + f"{self.filename_for_backup_file}", + ], + ) + + def test_long_name_passes_when_temp_tables_are_generated(self): + run_dbt( + [ + "run", + "-s", + f"{self.incremental_filename}", + ], + ) + + # Run again to trigger incremental materialization + run_dbt( + [ + "run", + "-s", + f"{self.incremental_filename}", + ], + ) + + # 63 characters is the character limit for a table name in a postgres database + # (assuming compiled without changes from source) + def test_name_longer_than_63_does_not_build(self): + err_msg = "Relation name 'my_name_is_one_over_max"\ + "_length_chats_abcdefghijklmnopqrstuvwxyz1' is longer than 63 characters" + res = run_dbt( + [ + "run", + "-s", + self.over_max_length_filename, + ], + expect_pass=False + ) + assert res[0].status == RunStatus.Error + assert err_msg in res[0].message From c3be975783e065c4dc42c10fca29aca8e5584945 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Tue, 29 Nov 2022 12:47:20 -0800 Subject: [PATCH 042/156] Ct 288/convert 070 incremental test (#6330) * Convert incremental schema tests. * Drop the old test. * Bad git add. My disappoint is immeasurable and my day has been ruined. * Adjustments for flake8. Co-authored-by: Mila Page --- .../models/incremental_append_new_columns.sql | 29 -- ...remental_append_new_columns_remove_one.sql | 28 -- ...l_append_new_columns_remove_one_target.sql | 19 - .../incremental_append_new_columns_target.sql | 19 - .../models/incremental_fail.sql | 19 - .../models/incremental_ignore.sql | 19 - .../models/incremental_ignore_target.sql | 15 - .../models/incremental_sync_all_columns.sql | 31 -- .../incremental_sync_all_columns_target.sql | 20 - .../models/incremental_sync_remove_only.sql | 29 -- .../incremental_sync_remove_only_target.sql | 17 - .../models/model_a.sql | 22 - .../models/schema.yml | 54 --- .../test_incremental_schema.py | 88 ---- .../tests/select_from_a.sql | 1 - ...ct_from_incremental_append_new_columns.sql | 1 - ..._incremental_append_new_columns_target.sql | 1 - .../tests/select_from_incremental_ignore.sql | 1 - .../select_from_incremental_ignore_target.sql | 1 - ...lect_from_incremental_sync_all_columns.sql | 1 - ...om_incremental_sync_all_columns_target.sql | 1 - .../incremental_schema_tests/fixtures.py | 395 ++++++++++++++++++ .../test_incremental_schema.py | 136 ++++++ 23 files changed, 531 insertions(+), 416 deletions(-) delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_fail.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_ignore.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/model_a.sql delete mode 100644 test/integration/070_incremental_schema_tests/models/schema.yml delete mode 100644 test/integration/070_incremental_schema_tests/test_incremental_schema.py delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_a.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql delete mode 100644 test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql create mode 100644 tests/functional/incremental_schema_tests/fixtures.py create mode 100644 tests/functional/incremental_schema_tests/test_incremental_schema.py diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql deleted file mode 100644 index f9eebdcb852..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns.sql +++ /dev/null @@ -1,29 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='append_new_columns' - ) -}} - -{% set string_type = 'varchar(10)' %} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2, - cast(field3 as {{string_type}}) as field3, - cast(field4 as {{string_type}}) as field4 -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 -FROM source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql deleted file mode 100644 index dbb4962a7e5..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one.sql +++ /dev/null @@ -1,28 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='append_new_columns' - ) -}} - -{% set string_type = 'varchar(10)' %} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field3 as {{string_type}}) as field3, - cast(field4 as {{string_type}}) as field4 -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 -FROM source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql deleted file mode 100644 index f3a279f0285..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_remove_one_target.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config(materialized='table') -}} - -{% set string_type = 'varchar(10)' %} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -select id, - cast(field1 as {{string_type}}) as field1, - cast(CASE WHEN id > 3 THEN NULL ELSE field2 END as {{string_type}}) AS field2, - cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3, - cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql deleted file mode 100644 index 5ff759d7dab..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_append_new_columns_target.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config(materialized='table') -}} - -{% set string_type = 'varchar(10)' %} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -select id - ,cast(field1 as {{string_type}}) as field1 - ,cast(field2 as {{string_type}}) as field2 - ,cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3 - ,cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_fail.sql b/test/integration/070_incremental_schema_tests/models/incremental_fail.sql deleted file mode 100644 index 590f5b56d97..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_fail.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='fail' - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, field1, field2 FROM source_data - -{% else %} - -SELECT id, field1, field3 FROm source_data - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_ignore.sql b/test/integration/070_incremental_schema_tests/models/incremental_ignore.sql deleted file mode 100644 index 51dee6022fb..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_ignore.sql +++ /dev/null @@ -1,19 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='ignore' - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% if is_incremental() %} - -SELECT id, field1, field2, field3, field4 FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -SELECT id, field1, field2 FROM source_data LIMIT 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql deleted file mode 100644 index 92d4564e0e8..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_ignore_target.sql +++ /dev/null @@ -1,15 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -select id - ,field1 - ,field2 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql deleted file mode 100644 index b742c970419..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns.sql +++ /dev/null @@ -1,31 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='sync_all_columns' - - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% set string_type = 'varchar(10)' %} - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1, - cast(field3 as {{string_type}}) as field3, -- to validate new fields - cast(field4 as {{string_type}}) AS field4 -- to validate new fields - -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -select id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 - -from source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql deleted file mode 100644 index 6cdbaba5c0d..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_all_columns_target.sql +++ /dev/null @@ -1,20 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -{% set string_type = 'varchar(10)' %} - -select id - ,cast(field1 as {{string_type}}) as field1 - --,field2 - ,cast(case when id <= 3 then null else field3 end as {{string_type}}) as field3 - ,cast(case when id <= 3 then null else field4 end as {{string_type}}) as field4 - -from source_data -order by id \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql deleted file mode 100644 index 55bae0ad17e..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only.sql +++ /dev/null @@ -1,29 +0,0 @@ -{{ - config( - materialized='incremental', - unique_key='id', - on_schema_change='sync_all_columns' - - ) -}} - -WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) - -{% set string_type = 'varchar(10)' %} - -{% if is_incremental() %} - -SELECT id, - cast(field1 as {{string_type}}) as field1 - -FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) - -{% else %} - -select id, - cast(field1 as {{string_type}}) as field1, - cast(field2 as {{string_type}}) as field2 - -from source_data where id <= 3 - -{% endif %} \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql b/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql deleted file mode 100644 index ff88512c6f5..00000000000 --- a/test/integration/070_incremental_schema_tests/models/incremental_sync_remove_only_target.sql +++ /dev/null @@ -1,17 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select * from {{ ref('model_a') }} - -) - -{% set string_type = 'varchar(10)' %} - -select id - ,cast(field1 as {{string_type}}) as field1 - -from source_data -order by id \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/model_a.sql b/test/integration/070_incremental_schema_tests/models/model_a.sql deleted file mode 100644 index 2a0b2ddaff2..00000000000 --- a/test/integration/070_incremental_schema_tests/models/model_a.sql +++ /dev/null @@ -1,22 +0,0 @@ -{{ - config(materialized='table') -}} - -with source_data as ( - - select 1 as id, 'aaa' as field1, 'bbb' as field2, 111 as field3, 'TTT' as field4 - union all select 2 as id, 'ccc' as field1, 'ddd' as field2, 222 as field3, 'UUU' as field4 - union all select 3 as id, 'eee' as field1, 'fff' as field2, 333 as field3, 'VVV' as field4 - union all select 4 as id, 'ggg' as field1, 'hhh' as field2, 444 as field3, 'WWW' as field4 - union all select 5 as id, 'iii' as field1, 'jjj' as field2, 555 as field3, 'XXX' as field4 - union all select 6 as id, 'kkk' as field1, 'lll' as field2, 666 as field3, 'YYY' as field4 - -) - -select id - ,field1 - ,field2 - ,field3 - ,field4 - -from source_data \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/models/schema.yml b/test/integration/070_incremental_schema_tests/models/schema.yml deleted file mode 100644 index 5546314e413..00000000000 --- a/test/integration/070_incremental_schema_tests/models/schema.yml +++ /dev/null @@ -1,54 +0,0 @@ -version: 2 - -models: - - name: model_a - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns_target - columns: - - name: id - tags: [column_leveL_tag] - tests: - - unique - - - \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/test_incremental_schema.py b/test/integration/070_incremental_schema_tests/test_incremental_schema.py deleted file mode 100644 index 09a494b8952..00000000000 --- a/test/integration/070_incremental_schema_tests/test_incremental_schema.py +++ /dev/null @@ -1,88 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestIncrementalSchemaChange(DBTIntegrationTest): - @property - def schema(self): - return "test_incremental_schema_070" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - "config-version": 2, - "test-paths": ["tests"] - } - - def run_twice_and_assert( - self, include, compare_source, compare_target - ): - - # dbt run (twice) - run_args = ['run'] - if include: - run_args.extend(('--models', include)) - results_one = self.run_dbt(run_args) - results_two = self.run_dbt(run_args) - - self.assertEqual(len(results_one), 3) - self.assertEqual(len(results_two), 3) - - self.assertTablesEqual(compare_source, compare_target) - - def run_incremental_ignore(self): - select = 'model_a incremental_ignore incremental_ignore_target' - compare_source = 'incremental_ignore' - compare_target = 'incremental_ignore_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_append_new_columns(self): - select = 'model_a incremental_append_new_columns incremental_append_new_columns_target' - compare_source = 'incremental_append_new_columns' - compare_target = 'incremental_append_new_columns_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_append_new_columns_remove_one(self): - select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target' - compare_source = 'incremental_append_new_columns_remove_one' - compare_target = 'incremental_append_new_columns_remove_one_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_sync_all_columns(self): - select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target' - compare_source = 'incremental_sync_all_columns' - compare_target = 'incremental_sync_all_columns_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_sync_remove_only(self): - select = 'model_a incremental_sync_remove_only incremental_sync_remove_only_target' - compare_source = 'incremental_sync_remove_only' - compare_target = 'incremental_sync_remove_only_target' - self.run_twice_and_assert(select, compare_source, compare_target) - - def run_incremental_fail_on_schema_change(self): - select = 'model_a incremental_fail' - results_one = self.run_dbt(['run', '--models', select, '--full-refresh']) - results_two = self.run_dbt(['run', '--models', select], expect_pass = False) - self.assertIn('Compilation Error', results_two[1].message) - - @use_profile('postgres') - def test__postgres__run_incremental_ignore(self): - self.run_incremental_ignore() - - @use_profile('postgres') - def test__postgres__run_incremental_append_new_columns(self): - self.run_incremental_append_new_columns() - self.run_incremental_append_new_columns_remove_one() - - @use_profile('postgres') - def test__postgres__run_incremental_sync_all_columns(self): - self.run_incremental_sync_all_columns() - self.run_incremental_sync_remove_only() - - @use_profile('postgres') - def test__postgres__run_incremental_fail_on_schema_change(self): - self.run_incremental_fail_on_schema_change() diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_a.sql b/test/integration/070_incremental_schema_tests/tests/select_from_a.sql deleted file mode 100644 index 3dc8f2857bd..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_a.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('model_a') }} where false diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql deleted file mode 100644 index 947e8458854..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_append_new_columns') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql deleted file mode 100644 index 8b86eddd71d..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_append_new_columns_target.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_append_new_columns_target') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql deleted file mode 100644 index d565c846465..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_ignore') }} where false diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql deleted file mode 100644 index 35d535c5ca5..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_ignore_target.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_ignore_target') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql deleted file mode 100644 index aedc9f80396..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_sync_all_columns') }} where false \ No newline at end of file diff --git a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql b/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql deleted file mode 100644 index 4b703c988bf..00000000000 --- a/test/integration/070_incremental_schema_tests/tests/select_from_incremental_sync_all_columns_target.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('incremental_sync_all_columns_target') }} where false \ No newline at end of file diff --git a/tests/functional/incremental_schema_tests/fixtures.py b/tests/functional/incremental_schema_tests/fixtures.py new file mode 100644 index 00000000000..c6eebc5e183 --- /dev/null +++ b/tests/functional/incremental_schema_tests/fixtures.py @@ -0,0 +1,395 @@ + +# +# Properties +# +_PROPERTIES__SCHEMA = """ +version: 2 + +models: + - name: model_a + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_ignore + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_ignore_target + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_append_new_columns + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_append_new_columns_target + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_sync_all_columns + columns: + - name: id + tags: [column_level_tag] + tests: + - unique + + - name: incremental_sync_all_columns_target + columns: + - name: id + tags: [column_leveL_tag] + tests: + - unique +""" + +# +# Models +# +_MODELS__INCREMENTAL_SYNC_REMOVE_ONLY = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='sync_all_columns' + + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% set string_type = 'varchar(10)' %} + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1 + +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +select id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 + +from source_data where id <= 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_IGNORE = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='ignore' + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, field1, field2, field3, field4 FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +SELECT id, field1, field2 FROM source_data LIMIT 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +{% set string_type = 'varchar(10)' %} + +select id + ,cast(field1 as {{string_type}}) as field1 + +from source_data +order by id +""" + +_MODELS__INCREMENTAL_IGNORE_TARGET = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +select id + ,field1 + ,field2 + +from source_data +""" + +_MODELS__INCREMENTAL_FAIL = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='fail' + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, field1, field2 FROM source_data + +{% else %} + +SELECT id, field1, field3 FROm source_data + +{% endif %} +""" + +_MODELS__INCREMENTAL_SYNC_ALL_COLUMNS = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='sync_all_columns' + + ) +}} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% set string_type = 'varchar(10)' %} + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field3 as {{string_type}}) as field3, -- to validate new fields + cast(field4 as {{string_type}}) AS field4 -- to validate new fields + +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +select id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 + +from source_data where id <= 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='append_new_columns' + ) +}} + +{% set string_type = 'varchar(10)' %} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field3 as {{string_type}}) as field3, + cast(field4 as {{string_type}}) as field4 +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 +FROM source_data where id <= 3 + +{% endif %} +""" + +_MODELS__A = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select 1 as id, 'aaa' as field1, 'bbb' as field2, 111 as field3, 'TTT' as field4 + union all select 2 as id, 'ccc' as field1, 'ddd' as field2, 222 as field3, 'UUU' as field4 + union all select 3 as id, 'eee' as field1, 'fff' as field2, 333 as field3, 'VVV' as field4 + union all select 4 as id, 'ggg' as field1, 'hhh' as field2, 444 as field3, 'WWW' as field4 + union all select 5 as id, 'iii' as field1, 'jjj' as field2, 555 as field3, 'XXX' as field4 + union all select 6 as id, 'kkk' as field1, 'lll' as field2, 666 as field3, 'YYY' as field4 + +) + +select id + ,field1 + ,field2 + ,field3 + ,field4 + +from source_data +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET = """ +{{ + config(materialized='table') +}} + +{% set string_type = 'varchar(10)' %} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +select id + ,cast(field1 as {{string_type}}) as field1 + ,cast(field2 as {{string_type}}) as field2 + ,cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3 + ,cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 + +from source_data +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS = """ +{{ + config( + materialized='incremental', + unique_key='id', + on_schema_change='append_new_columns' + ) +}} + +{% set string_type = 'varchar(10)' %} + +WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) + +{% if is_incremental() %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2, + cast(field3 as {{string_type}}) as field3, + cast(field4 as {{string_type}}) as field4 +FROM source_data WHERE id NOT IN (SELECT id from {{ this }} ) + +{% else %} + +SELECT id, + cast(field1 as {{string_type}}) as field1, + cast(field2 as {{string_type}}) as field2 +FROM source_data where id <= 3 + +{% endif %} +""" + +_MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET = """ +{{ + config(materialized='table') +}} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +{% set string_type = 'varchar(10)' %} + +select id + ,cast(field1 as {{string_type}}) as field1 + --,field2 + ,cast(case when id <= 3 then null else field3 end as {{string_type}}) as field3 + ,cast(case when id <= 3 then null else field4 end as {{string_type}}) as field4 + +from source_data +order by id +""" + +_MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET = """ +{{ + config(materialized='table') +}} + +{% set string_type = 'varchar(10)' %} + +with source_data as ( + + select * from {{ ref('model_a') }} + +) + +select id, + cast(field1 as {{string_type}}) as field1, + cast(CASE WHEN id > 3 THEN NULL ELSE field2 END as {{string_type}}) AS field2, + cast(CASE WHEN id <= 3 THEN NULL ELSE field3 END as {{string_type}}) AS field3, + cast(CASE WHEN id <= 3 THEN NULL ELSE field4 END as {{string_type}}) AS field4 + +from source_data +""" + +# +# Tests +# + +_TESTS__SELECT_FROM_INCREMENTAL_IGNORE = """ +select * from {{ ref('incremental_ignore') }} where false +""" + +_TESTS__SELECT_FROM_A = """ +select * from {{ ref('model_a') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET = """ +select * from {{ ref('incremental_append_new_columns_target') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS = """ +select * from {{ ref('incremental_sync_all_columns') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET = """ +select * from {{ ref('incremental_sync_all_columns_target') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET = """ +select * from {{ ref('incremental_ignore_target') }} where false +""" + +_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS = """ +select * from {{ ref('incremental_append_new_columns') }} where false +""" diff --git a/tests/functional/incremental_schema_tests/test_incremental_schema.py b/tests/functional/incremental_schema_tests/test_incremental_schema.py new file mode 100644 index 00000000000..3ee9e6477e4 --- /dev/null +++ b/tests/functional/incremental_schema_tests/test_incremental_schema.py @@ -0,0 +1,136 @@ +import pytest + +from dbt.tests.util import ( + check_relations_equal, + run_dbt, +) + +from tests.functional.incremental_schema_tests.fixtures import ( + _PROPERTIES__SCHEMA, + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, + _MODELS__INCREMENTAL_IGNORE, + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, + _MODELS__INCREMENTAL_IGNORE_TARGET, + _MODELS__INCREMENTAL_FAIL, + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, + _MODELS__A, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, + _TESTS__SELECT_FROM_A, + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, +) + + +class TestIncrementalSchemaChange: + @pytest.fixture(scope="class") + def properties(self): + return { + "schema.yml": _PROPERTIES__SCHEMA, + } + + @pytest.fixture(scope="class") + def models(self): + return { + "incremental_sync_remove_only.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, + "incremental_ignore.sql": _MODELS__INCREMENTAL_IGNORE, + "incremental_sync_remove_only_target.sql": + _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, + "incremental_ignore_target.sql": _MODELS__INCREMENTAL_IGNORE_TARGET, + "incremental_fail.sql": _MODELS__INCREMENTAL_FAIL, + "incremental_sync_all_columns.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, + "incremental_append_new_columns_remove_one.sql": + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, + "model_a.sql": _MODELS__A, + "incremental_append_new_columns_target.sql": + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + "incremental_append_new_columns.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, + "incremental_sync_all_columns_target.sql": + _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + "incremental_append_new_columns_remove_one_target.sql": + _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, + } + + @pytest.fixture(scope="class") + def tests(self): + return { + "select_from_incremental.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, + "select_from_a.sql": _TESTS__SELECT_FROM_A, + "select_from_incremental_append_new_columns_target.sql": + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + "select_from_incremental_sync_all_columns.sql": + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, + "select_from_incremental_sync_all_columns_target.sql": + _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + "select_from_incremental_ignore_target.sql": + _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, + "select_from_incremental_append_new_columns.sql": + _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, + } + + def run_twice_and_assert( + self, include, compare_source, compare_target, project + ): + + # dbt run (twice) + run_args = ['run'] + if include: + run_args.extend(('--select', include)) + results_one = run_dbt(run_args) + assert len(results_one) == 3 + + results_two = run_dbt(run_args) + assert len(results_two) == 3 + + check_relations_equal(project.adapter, [compare_source, compare_target]) + + def run_incremental_append_new_columns(self, project): + select = 'model_a incremental_append_new_columns incremental_append_new_columns_target' + compare_source = 'incremental_append_new_columns' + compare_target = 'incremental_append_new_columns_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_append_new_columns_remove_one(self, project): + select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target' + compare_source = 'incremental_append_new_columns_remove_one' + compare_target = 'incremental_append_new_columns_remove_one_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_sync_all_columns(self, project): + select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target' + compare_source = 'incremental_sync_all_columns' + compare_target = 'incremental_sync_all_columns_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def run_incremental_sync_remove_only(self, project): + select = 'model_a incremental_sync_remove_only incremental_sync_remove_only_target' + compare_source = 'incremental_sync_remove_only' + compare_target = 'incremental_sync_remove_only_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def test_run_incremental_ignore(self, project): + select = 'model_a incremental_ignore incremental_ignore_target' + compare_source = 'incremental_ignore' + compare_target = 'incremental_ignore_target' + self.run_twice_and_assert(select, compare_source, compare_target, project) + + def test_run_incremental_append_new_columns(self, project): + self.run_incremental_append_new_columns(project) + self.run_incremental_append_new_columns_remove_one(project) + + def test_run_incremental_sync_all_columns(self, project): + self.run_incremental_sync_all_columns(project) + self.run_incremental_sync_remove_only(project) + + def test_run_incremental_fail_on_schema_change(self, project): + select = 'model_a incremental_fail' + run_dbt(['run', '--models', select, '--full-refresh']) + results_two = run_dbt(['run', '--models', select], expect_pass=False) + assert 'Compilation Error' in results_two[1].message From b5d303f12a71ba17ac0e4707419a6439a032c6c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Nov 2022 17:32:47 -0500 Subject: [PATCH 043/156] Bump mashumaro[msgpack] from 3.0.4 to 3.1 in /core (#6108) * Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core Bumps [mashumaro[msgpack]](https://github.com/Fatal1ty/mashumaro) from 3.0.4 to 3.1. - [Release notes](https://github.com/Fatal1ty/mashumaro/releases) - [Commits](https://github.com/Fatal1ty/mashumaro/compare/v3.0.4...v3.1) --- updated-dependencies: - dependency-name: mashumaro[msgpack] dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Github Build Bot Co-authored-by: Michelle Ark --- .changes/unreleased/Dependency-20221020-000753.yaml | 7 +++++++ core/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Dependency-20221020-000753.yaml diff --git a/.changes/unreleased/Dependency-20221020-000753.yaml b/.changes/unreleased/Dependency-20221020-000753.yaml new file mode 100644 index 00000000000..ff6e7efc48f --- /dev/null +++ b/.changes/unreleased/Dependency-20221020-000753.yaml @@ -0,0 +1,7 @@ +kind: "Dependency" +body: "Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core" +time: 2022-10-20T00:07:53.00000Z +custom: + Author: dependabot[bot] + Issue: 4904 + PR: 6108 diff --git a/core/setup.py b/core/setup.py index 61d712ed79a..013d0440c26 100644 --- a/core/setup.py +++ b/core/setup.py @@ -54,7 +54,7 @@ "hologram>=0.0.14,<=0.0.15", "isodate>=0.6,<0.7", "logbook>=1.5,<1.6", - "mashumaro[msgpack]==3.0.4", + "mashumaro[msgpack]==3.1.1", "minimal-snowplow-tracker==0.0.2", "networkx>=2.3,<2.8.1;python_version<'3.8'", "networkx>=2.3,<3;python_version>='3.8'", From 2289e45571da690d8b997a5346379443ed48a631 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Thu, 1 Dec 2022 11:01:16 -0500 Subject: [PATCH 044/156] Exposures support metrics (#6342) * exposures support metrics --- .../unreleased/Features-20221130-112913.yaml | 7 ++ core/dbt/context/providers.py | 14 ++++ core/dbt/contracts/graph/parsed.py | 1 + core/dbt/parser/manifest.py | 8 ++- core/dbt/parser/schemas.py | 2 +- schemas/dbt/manifest/v8.json | 64 +++++++++++-------- test/unit/test_contracts_graph_parsed.py | 2 + .../functional/artifacts/expected_manifest.py | 3 + tests/functional/exposures/fixtures.py | 25 ++++++++ .../exposures/test_exposure_configs.py | 8 ++- tests/functional/exposures/test_exposures.py | 15 +++++ 11 files changed, 118 insertions(+), 31 deletions(-) create mode 100644 .changes/unreleased/Features-20221130-112913.yaml diff --git a/.changes/unreleased/Features-20221130-112913.yaml b/.changes/unreleased/Features-20221130-112913.yaml new file mode 100644 index 00000000000..b640ab3e690 --- /dev/null +++ b/.changes/unreleased/Features-20221130-112913.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Exposures support metrics in lineage +time: 2022-11-30T11:29:13.256034-05:00 +custom: + Author: michelleark + Issue: "6057" + PR: "6342" diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index f8d5af889be..35afeecddf3 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -1435,6 +1435,14 @@ def __call__(self, *args) -> str: return "" +class ExposureMetricResolver(BaseResolver): + def __call__(self, *args) -> str: + if len(args) not in (1, 2): + metric_invalid_args(self.model, args) + self.model.metrics.append(list(args)) + return "" + + def generate_parse_exposure( exposure: ParsedExposure, config: RuntimeConfig, @@ -1455,6 +1463,12 @@ def generate_parse_exposure( project, manifest, ), + "metric": ExposureMetricResolver( + None, + exposure, + project, + manifest, + ), } diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/parsed.py index 3bf47b324d5..43d210bff2a 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/parsed.py @@ -764,6 +764,7 @@ class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn): depends_on: DependsOn = field(default_factory=DependsOn) refs: List[List[str]] = field(default_factory=list) sources: List[List[str]] = field(default_factory=list) + metrics: List[List[str]] = field(default_factory=list) created_at: float = field(default_factory=lambda: time.time()) @property diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 80fac715178..d08d08788fc 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -851,6 +851,10 @@ def process_metrics(self, config: RuntimeConfig): if metric.created_at < self.started_at: continue _process_metrics_for_node(self.manifest, current_project, metric) + for exposure in self.manifest.exposures.values(): + if exposure.created_at < self.started_at: + continue + _process_metrics_for_node(self.manifest, current_project, exposure) # nodes: node and column descriptions # sources: source and table descriptions, column descriptions @@ -1180,7 +1184,9 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: P def _process_metrics_for_node( - manifest: Manifest, current_project: str, node: Union[ManifestNode, ParsedMetric] + manifest: Manifest, + current_project: str, + node: Union[ManifestNode, ParsedMetric, ParsedExposure], ): """Given a manifest and a node in that manifest, process its metrics""" for metric in node.metrics: diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 21521c85e53..4909d99f44e 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -1053,7 +1053,7 @@ def parse_exposure(self, unparsed: UnparsedExposure): ) depends_on_jinja = "\n".join("{{ " + line + "}}" for line in unparsed.depends_on) get_rendered(depends_on_jinja, ctx, parsed, capture_macros=True) - # parsed now has a populated refs/sources + # parsed now has a populated refs/sources/metrics if parsed.config.enabled: self.manifest.add_exposure(self.yaml.file, parsed) diff --git a/schemas/dbt/manifest/v8.json b/schemas/dbt/manifest/v8.json index 4442ae1d39f..9bf00fdc7f2 100644 --- a/schemas/dbt/manifest/v8.json +++ b/schemas/dbt/manifest/v8.json @@ -244,7 +244,7 @@ "generated_at": { "type": "string", "format": "date-time", - "default": "2022-11-01T18:01:47.759437Z" + "default": "2022-11-30T05:36:16.443035Z" }, "invocation_id": { "oneOf": [ @@ -255,7 +255,7 @@ "type": "null" } ], - "default": "94cf6dd0-d59b-4139-bf79-70055cb9bb34" + "default": "ff51bdcd-689d-45b3-8dbb-5a8016382eef" }, "env": { "type": "object", @@ -519,7 +519,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.764821 + "default": 1669786576.4447858 }, "config_call_dict": { "type": "object", @@ -1066,7 +1066,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.767402 + "default": 1669786576.445715 }, "config_call_dict": { "type": "object", @@ -1425,7 +1425,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.768972 + "default": 1669786576.4462662 }, "config_call_dict": { "type": "object", @@ -1672,7 +1672,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.7706041 + "default": 1669786576.446837 }, "config_call_dict": { "type": "object", @@ -1929,7 +1929,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.772256 + "default": 1669786576.447436 }, "config_call_dict": { "type": "object", @@ -2176,7 +2176,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.7739131 + "default": 1669786576.448 }, "config_call_dict": { "type": "object", @@ -2419,7 +2419,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.7757251 + "default": 1669786576.448638 }, "config_call_dict": { "type": "object", @@ -2714,7 +2714,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.7787 + "default": 1669786576.449645 }, "config_call_dict": { "type": "object", @@ -3136,7 +3136,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.780513 + "default": 1669786576.450196 }, "config_call_dict": { "type": "object", @@ -3379,7 +3379,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.782298 + "default": 1669786576.450721 }, "config_call_dict": { "type": "object", @@ -3583,7 +3583,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.7835789 + "default": 1669786576.451207 }, "config_call_dict": { "type": "object", @@ -3795,7 +3795,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.784904 + "default": 1669786576.451676 }, "config_call_dict": { "type": "object", @@ -4017,7 +4017,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.78629 + "default": 1669786576.452158 }, "config_call_dict": { "type": "object", @@ -4229,7 +4229,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.78762 + "default": 1669786576.452618 }, "config_call_dict": { "type": "object", @@ -4441,7 +4441,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.789003 + "default": 1669786576.453086 }, "config_call_dict": { "type": "object", @@ -4649,7 +4649,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.790516 + "default": 1669786576.4535701 }, "config_call_dict": { "type": "object", @@ -4882,7 +4882,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.792015 + "default": 1669786576.454068 }, "config_call_dict": { "type": "object", @@ -5081,7 +5081,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.794882 + "default": 1669786576.454986 }, "config_call_dict": { "type": "object", @@ -5462,7 +5462,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.797194 + "default": 1669786576.455929 } }, "additionalProperties": false, @@ -5577,7 +5577,7 @@ "generated_at": { "type": "string", "format": "date-time", - "default": "2022-11-01T18:01:47.754102Z" + "default": "2022-11-30T05:36:16.440838Z" }, "invocation_id": { "oneOf": [ @@ -5588,7 +5588,7 @@ "type": "null" } ], - "default": "94cf6dd0-d59b-4139-bf79-70055cb9bb34" + "default": "ff51bdcd-689d-45b3-8dbb-5a8016382eef" }, "env": { "type": "object", @@ -5948,7 +5948,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.798143 + "default": 1669786576.45632 }, "supported_languages": { "oneOf": [ @@ -6199,13 +6199,23 @@ }, "default": [] }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, "created_at": { "type": "number", - "default": 1667325707.799795 + "default": 1669786576.456964 } }, "additionalProperties": false, - "description": "ParsedExposure(fqn: List[str], unique_id: str, package_name: str, path: str, original_file_path: str, name: str, type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, resource_type: dbt.node_types.NodeType = , description: str = '', label: Optional[str] = None, maturity: Optional[dbt.contracts.graph.unparsed.MaturityType] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.ExposureConfig = , unrendered_config: Dict[str, Any] = , url: Optional[str] = None, depends_on: dbt.contracts.graph.parsed.DependsOn = , refs: List[List[str]] = , sources: List[List[str]] = , created_at: float = )" + "description": "ParsedExposure(fqn: List[str], unique_id: str, package_name: str, path: str, original_file_path: str, name: str, type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, resource_type: dbt.node_types.NodeType = , description: str = '', label: Optional[str] = None, maturity: Optional[dbt.contracts.graph.unparsed.MaturityType] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.ExposureConfig = , unrendered_config: Dict[str, Any] = , url: Optional[str] = None, depends_on: dbt.contracts.graph.parsed.DependsOn = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" }, "ExposureOwner": { "type": "object", @@ -6424,7 +6434,7 @@ }, "created_at": { "type": "number", - "default": 1667325707.801514 + "default": 1669786576.4576042 } }, "additionalProperties": false, diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index 9d11b1cfbc8..1114b65c31c 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -2131,6 +2131,7 @@ def basic_parsed_exposure_dict(): }, 'refs': [], 'sources': [], + 'metrics': [], 'fqn': ['test', 'exposures', 'my_exposure'], 'unique_id': 'exposure.test.my_exposure', 'package_name': 'test', @@ -2191,6 +2192,7 @@ def complex_parsed_exposure_dict(): }, 'refs': [], 'sources': [], + 'metrics': [], 'fqn': ['test', 'exposures', 'my_exposure'], 'unique_id': 'exposure.test.my_exposure', 'package_name': 'test', diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index fabd960dd94..482e6f8672c 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -744,6 +744,7 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "fqn": ["test", "notebook_exposure"], "maturity": "medium", "meta": {"tool": "my_tool", "languages": ["python"]}, + "metrics": [], "tags": ["my_department"], "name": "notebook_exposure", "original_file_path": os.path.join("models", "schema.yml"), @@ -770,6 +771,7 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "enabled": True, }, "fqn": ["test", "simple_exposure"], + "metrics": [], "name": "simple_exposure", "original_file_path": os.path.join("models", "schema.yml"), "owner": { @@ -1185,6 +1187,7 @@ def expected_references_manifest(project): "fqn": ["test", "notebook_exposure"], "maturity": "medium", "meta": {"tool": "my_tool", "languages": ["python"]}, + "metrics": [], "tags": ["my_department"], "name": "notebook_exposure", "original_file_path": os.path.join("models", "schema.yml"), diff --git a/tests/functional/exposures/fixtures.py b/tests/functional/exposures/fixtures.py index 847a3cf5f73..1d573b1a7b6 100644 --- a/tests/functional/exposures/fixtures.py +++ b/tests/functional/exposures/fixtures.py @@ -7,6 +7,29 @@ select 1 as id """ + +source_schema_yml = """version: 2 + +sources: + - name: test_source + tables: + - name: test_table +""" + +metrics_schema_yml = """version: 2 + +metrics: + - name: metric + model: ref('model') + label: "label" + + calculation_method: count_distinct + expression: id + + timestamp: first_order + time_grains: [day] +""" + simple_exposure_yml = """ version: 2 @@ -16,6 +39,8 @@ type: dashboard depends_on: - ref('model') + - source('test_source', 'test_table') + - metric('metric') owner: email: something@example.com - name: notebook_exposure diff --git a/tests/functional/exposures/test_exposure_configs.py b/tests/functional/exposures/test_exposure_configs.py index ed49f565ec7..a7018204952 100644 --- a/tests/functional/exposures/test_exposure_configs.py +++ b/tests/functional/exposures/test_exposure_configs.py @@ -10,7 +10,9 @@ simple_exposure_yml, disabled_models_exposure_yml, enabled_yaml_level_exposure_yml, - invalid_config_exposure_yml + invalid_config_exposure_yml, + source_schema_yml, + metrics_schema_yml ) @@ -29,7 +31,9 @@ def models(self): return { "model.sql": models_sql, "second_model.sql": second_model_sql, - "schema.yml": simple_exposure_yml, + "exposure.yml": simple_exposure_yml, + "schema.yml": source_schema_yml, + "metrics.yml": metrics_schema_yml, } @pytest.fixture(scope="class") diff --git a/tests/functional/exposures/test_exposures.py b/tests/functional/exposures/test_exposures.py index 52ff74d4b0c..777a8e161c4 100644 --- a/tests/functional/exposures/test_exposures.py +++ b/tests/functional/exposures/test_exposures.py @@ -5,6 +5,8 @@ models_sql, second_model_sql, simple_exposure_yml, + source_schema_yml, + metrics_schema_yml ) @@ -15,6 +17,8 @@ def models(self): "exposure.yml": simple_exposure_yml, "model.sql": models_sql, "second_model.sql": second_model_sql, + "schema.yml": source_schema_yml, + "metrics.yml": metrics_schema_yml, } def test_names_with_spaces(self, project): @@ -27,3 +31,14 @@ def test_names_with_spaces(self, project): ] assert exposure_ids == expected_exposure_ids assert manifest.exposures["exposure.test.simple_exposure"].label == "simple exposure label" + + def test_depends_on(self, project): + run_dbt(["run"]) + manifest = get_manifest(project.project_root) + exposure_depends_on = manifest.exposures["exposure.test.simple_exposure"].depends_on.nodes + expected_exposure_depends_on = [ + 'source.test.test_source.test_table', + 'model.test.model', + 'metric.test.metric' + ] + assert sorted(exposure_depends_on) == sorted(expected_exposure_depends_on) From 481235a943c7f87035327ef22cfdc7d0b43558be Mon Sep 17 00:00:00 2001 From: justbldwn <91483530+justbldwn@users.noreply.github.com> Date: Thu, 1 Dec 2022 14:43:36 -0500 Subject: [PATCH 045/156] clarify error log for number of allowed models in a Python file (#6251) --- .changes/unreleased/Fixes-20221115-081021.yaml | 7 +++++++ core/dbt/parser/models.py | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Fixes-20221115-081021.yaml diff --git a/.changes/unreleased/Fixes-20221115-081021.yaml b/.changes/unreleased/Fixes-20221115-081021.yaml new file mode 100644 index 00000000000..d995a2c4fa7 --- /dev/null +++ b/.changes/unreleased/Fixes-20221115-081021.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Clarify Error Message for how many models are allowed in a Python file +time: 2022-11-15T08:10:21.527884-05:00 +custom: + Author: justbldwn + Issue: "6245" + PR: "6251" diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index aaf6a0d016e..0a3f87018d9 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -61,7 +61,11 @@ def visit_FunctionDef(self, node: ast.FunctionDef) -> None: def check_error(self, node): if self.num_model_def != 1: - raise ParsingException("dbt only allow one model defined per python file", node=node) + raise ParsingException( + f"dbt allows exactly one model defined per python file, found {self.num_model_def}", + node=node, + ) + if len(self.dbt_errors) != 0: raise ParsingException("\n".join(self.dbt_errors), node=node) From 1fbcaa44841029364042a1bbd89035f2d97e2f18 Mon Sep 17 00:00:00 2001 From: Matthew McKnight <91097623+McKnight-42@users.noreply.github.com> Date: Thu, 1 Dec 2022 16:54:58 -0600 Subject: [PATCH 046/156] reformatting of test after some spike investigation (#6314) * reformatting of test after some spike investigation * reformat code to pull tests back into base class definition, move a test to more appropriate spot --- .../dbt/tests/adapter/aliases/test_aliases.py | 24 +++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py index d9ff6b5b28f..a9f846e2ca4 100644 --- a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py +++ b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py @@ -50,7 +50,10 @@ def models(self): @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } def test_alias_model_name(self, project): results = run_dbt(["run"]) @@ -68,7 +71,10 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } @pytest.fixture(scope="class") def models(self): @@ -94,7 +100,10 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } @pytest.fixture(scope="class") def models(self): @@ -121,14 +130,19 @@ def project_config_update(self, unique_schema): "models": { "test": { "alias": "duped_alias", - "model_b": {"schema": unique_schema + "_alt"}, + "model_b": { + "schema": unique_schema + "_alt" + }, }, }, } @pytest.fixture(scope="class") def macros(self): - return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} + return { + "cast.sql": MACROS__CAST_SQL, + "expect_value.sql": MACROS__EXPECT_VALUE_SQL + } @pytest.fixture(scope="class") def models(self): From 85d0b5afc77b5ef11b0e89d29bb39a9ea0d728e6 Mon Sep 17 00:00:00 2001 From: leahwicz <60146280+leahwicz@users.noreply.github.com> Date: Fri, 2 Dec 2022 12:09:46 -0500 Subject: [PATCH 047/156] Reverting back to older ubuntu image (#6363) * Reverting back to older ubuntu image * Updating the structured logging workflow as well --- .github/workflows/main.yml | 2 +- .github/workflows/structured-logging-schema-check.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4de07d83c07..257935419c8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -119,7 +119,7 @@ jobs: fail-fast: false matrix: python-version: ["3.7", "3.8", "3.9", "3.10"] - os: [ubuntu-latest] + os: [ubuntu-20.04] include: - python-version: 3.8 os: windows-latest diff --git a/.github/workflows/structured-logging-schema-check.yml b/.github/workflows/structured-logging-schema-check.yml index c99a7107c14..d8dda921ef8 100644 --- a/.github/workflows/structured-logging-schema-check.yml +++ b/.github/workflows/structured-logging-schema-check.yml @@ -22,7 +22,7 @@ jobs: # run the performance measurements on the current or default branch test-schema: name: Test Log Schema - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 env: # turns warnings into errors RUSTFLAGS: "-D warnings" From 67a8138b65e1e74d6922f3f2766e001565cc76c4 Mon Sep 17 00:00:00 2001 From: Alexander Butler <41213451+z3z1ma@users.noreply.github.com> Date: Fri, 2 Dec 2022 15:23:37 -0700 Subject: [PATCH 048/156] [fix] Fix the partial parse write path (#6081) * Fix the partial parse path Partial parse should use project root or it does not resolve to correct path. Eg. `target-path: ../some/dir/target`, if not ran from root, creates an erroneous folder. * Run pre-commit * Changie Co-authored-by: Gerda Shank --- .changes/unreleased/Fixes-20221202-164859.yaml | 7 +++++++ core/dbt/parser/manifest.py | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Fixes-20221202-164859.yaml diff --git a/.changes/unreleased/Fixes-20221202-164859.yaml b/.changes/unreleased/Fixes-20221202-164859.yaml new file mode 100644 index 00000000000..65d17625ac0 --- /dev/null +++ b/.changes/unreleased/Fixes-20221202-164859.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Use full path for writing manifest +time: 2022-12-02T16:48:59.029519-05:00 +custom: + Author: gshank + Issue: "6055" + PR: "6081" diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index d08d08788fc..bbfe9714697 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -537,7 +537,9 @@ def macro_depends_on(self): macro.depends_on.add_macro(dep_macro_id) # will check for dupes def write_manifest_for_partial_parse(self): - path = os.path.join(self.root_project.target_path, PARTIAL_PARSE_FILE_NAME) + path = os.path.join( + self.root_project.project_root, self.root_project.target_path, PARTIAL_PARSE_FILE_NAME + ) try: # This shouldn't be necessary, but we have gotten bug reports (#3757) of the # saved manifest not matching the code version. From ebfcf2a9ef6b691beda49ba5619239dd4f4a0b11 Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Fri, 2 Dec 2022 15:45:53 -0700 Subject: [PATCH 049/156] Update `core/dbt/README.md` to match current (#6371) * Update core/dbt/README.md to match current Add missing files/folders and alphabetize * Changelog entry --- .changes/unreleased/Docs-20221202-150523.yaml | 7 +++ core/dbt/README.md | 49 +++++++++++-------- 2 files changed, 36 insertions(+), 20 deletions(-) create mode 100644 .changes/unreleased/Docs-20221202-150523.yaml diff --git a/.changes/unreleased/Docs-20221202-150523.yaml b/.changes/unreleased/Docs-20221202-150523.yaml new file mode 100644 index 00000000000..cf0b4edb2d8 --- /dev/null +++ b/.changes/unreleased/Docs-20221202-150523.yaml @@ -0,0 +1,7 @@ +kind: Docs +body: Alphabetize `core/dbt/README.md` +time: 2022-12-02T15:05:23.695333-07:00 +custom: + Author: dbeatty10 + Issue: "6368" + PR: "6371" diff --git a/core/dbt/README.md b/core/dbt/README.md index 5886bf37525..79123a95f47 100644 --- a/core/dbt/README.md +++ b/core/dbt/README.md @@ -2,50 +2,59 @@ ## The following are individual files in this directory. -### deprecations.py - -### flags.py +### compilation.py -### main.py +### constants.py -### tracking.py +### dataclass_schema.py -### version.py +### deprecations.py -### lib.py +### exceptions.py -### node_types.py +### flags.py ### helper_types.py +### hooks.py + +### lib.py + ### links.py -### semver.py +### logger.py -### ui.py +### main.py -### compilation.py +### node_types.py -### dataclass_schema.py +### profiler.py -### exceptions.py +### selected_resources.py -### hooks.py +### semver.py -### logger.py +### tracking.py -### profiler.py +### ui.py ### utils.py +### version.py + ## The subdirectories will be documented in a README in the subdirectory -* config -* include * adapters +* cli +* clients +* config * context +* contracts * deps +* docs +* events * graph +* include +* parser * task -* clients -* events +* tests From 16f529e1d4e067bdbb6a659a622bead442f24b4e Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Fri, 2 Dec 2022 19:29:25 -0500 Subject: [PATCH 050/156] CT 1477 enrich logging events with data similar to legacy logger (#6325) --- .../unreleased/Features-20220817-154857.yaml | 2 +- core/dbt/adapters/base/connections.py | 23 ++-- core/dbt/adapters/sql/connections.py | 21 +++- core/dbt/compilation.py | 7 +- core/dbt/context/base.py | 5 +- core/dbt/contracts/connection.py | 5 +- core/dbt/contracts/graph/parsed.py | 9 ++ core/dbt/contracts/results.py | 27 +++-- core/dbt/events/adapter_endpoint.py | 25 +++-- core/dbt/events/contextvars.py | 84 +++++++++++++++ core/dbt/events/functions.py | 5 +- core/dbt/events/proto_types.py | 101 ++++++++++-------- core/dbt/events/types.proto | 99 +++++++++-------- core/dbt/events/types.py | 24 ++--- core/dbt/exceptions.py | 6 +- core/dbt/lib.py | 6 +- core/dbt/logger.py | 5 - core/dbt/parser/generic_test.py | 4 +- core/dbt/parser/macros.py | 4 +- core/dbt/task/base.py | 13 ++- core/dbt/task/run.py | 13 ++- core/dbt/task/runnable.py | 26 +++-- tests/functional/logging/test_logging.py | 51 +++++++++ tests/unit/test_events.py | 13 +-- tox.ini | 2 +- 25 files changed, 400 insertions(+), 180 deletions(-) create mode 100644 core/dbt/events/contextvars.py create mode 100644 tests/functional/logging/test_logging.py diff --git a/.changes/unreleased/Features-20220817-154857.yaml b/.changes/unreleased/Features-20220817-154857.yaml index c8c0cd9c036..f22e48b91a0 100644 --- a/.changes/unreleased/Features-20220817-154857.yaml +++ b/.changes/unreleased/Features-20220817-154857.yaml @@ -1,5 +1,5 @@ kind: Features -body: Proto logging messages +body: Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. time: 2022-08-17T15:48:57.225267-04:00 custom: Author: gshank diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py index ea7150a1b80..577cdf6d9a6 100644 --- a/core/dbt/adapters/base/connections.py +++ b/core/dbt/adapters/base/connections.py @@ -48,6 +48,7 @@ Rollback, RollbackFailed, ) +from dbt.events.contextvars import get_node_info from dbt import flags from dbt.utils import cast_to_str @@ -169,7 +170,9 @@ def set_connection_name(self, name: Optional[str] = None) -> Connection: if conn.name == conn_name and conn.state == "open": return conn - fire_event(NewConnection(conn_name=conn_name, conn_type=self.TYPE)) + fire_event( + NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info()) + ) if conn.state == "open": fire_event(ConnectionReused(conn_name=conn_name)) @@ -336,7 +339,9 @@ def _rollback_handle(cls, connection: Connection) -> None: except Exception: fire_event( RollbackFailed( - conn_name=cast_to_str(connection.name), exc_info=traceback.format_exc() + conn_name=cast_to_str(connection.name), + exc_info=traceback.format_exc(), + node_info=get_node_info(), ) ) @@ -345,10 +350,16 @@ def _close_handle(cls, connection: Connection) -> None: """Perform the actual close operation.""" # On windows, sometimes connection handles don't have a close() attr. if hasattr(connection.handle, "close"): - fire_event(ConnectionClosed(conn_name=cast_to_str(connection.name))) + fire_event( + ConnectionClosed(conn_name=cast_to_str(connection.name), node_info=get_node_info()) + ) connection.handle.close() else: - fire_event(ConnectionLeftOpen(conn_name=cast_to_str(connection.name))) + fire_event( + ConnectionLeftOpen( + conn_name=cast_to_str(connection.name), node_info=get_node_info() + ) + ) @classmethod def _rollback(cls, connection: Connection) -> None: @@ -359,7 +370,7 @@ def _rollback(cls, connection: Connection) -> None: f'"{connection.name}", but it does not have one open!' ) - fire_event(Rollback(conn_name=cast_to_str(connection.name))) + fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info())) cls._rollback_handle(connection) connection.transaction_open = False @@ -371,7 +382,7 @@ def close(cls, connection: Connection) -> Connection: return connection if connection.transaction_open and connection.handle: - fire_event(Rollback(conn_name=cast_to_str(connection.name))) + fire_event(Rollback(conn_name=cast_to_str(connection.name), node_info=get_node_info())) cls._rollback_handle(connection) connection.transaction_open = False diff --git a/core/dbt/adapters/sql/connections.py b/core/dbt/adapters/sql/connections.py index f8928a37651..bc1a562ad86 100644 --- a/core/dbt/adapters/sql/connections.py +++ b/core/dbt/adapters/sql/connections.py @@ -10,6 +10,7 @@ from dbt.contracts.connection import Connection, ConnectionState, AdapterResponse from dbt.events.functions import fire_event from dbt.events.types import ConnectionUsed, SQLQuery, SQLCommit, SQLQueryStatus +from dbt.events.contextvars import get_node_info from dbt.utils import cast_to_str @@ -56,7 +57,13 @@ def add_query( connection = self.get_thread_connection() if auto_begin and connection.transaction_open is False: self.begin() - fire_event(ConnectionUsed(conn_type=self.TYPE, conn_name=cast_to_str(connection.name))) + fire_event( + ConnectionUsed( + conn_type=self.TYPE, + conn_name=cast_to_str(connection.name), + node_info=get_node_info(), + ) + ) with self.exception_handler(sql): if abridge_sql_log: @@ -64,7 +71,11 @@ def add_query( else: log_sql = sql - fire_event(SQLQuery(conn_name=cast_to_str(connection.name), sql=log_sql)) + fire_event( + SQLQuery( + conn_name=cast_to_str(connection.name), sql=log_sql, node_info=get_node_info() + ) + ) pre = time.time() cursor = connection.handle.cursor() @@ -72,7 +83,9 @@ def add_query( fire_event( SQLQueryStatus( - status=str(self.get_response(cursor)), elapsed=round((time.time() - pre), 2) + status=str(self.get_response(cursor)), + elapsed=round((time.time() - pre)), + node_info=get_node_info(), ) ) @@ -156,7 +169,7 @@ def commit(self): "it does not have one open!".format(connection.name) ) - fire_event(SQLCommit(conn_name=connection.name)) + fire_event(SQLCommit(conn_name=connection.name, node_info=get_node_info())) self.add_commit_query() connection.transaction_open = False diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 7163b669001..0afd82c0d42 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -28,7 +28,8 @@ ) from dbt.graph import Graph from dbt.events.functions import fire_event -from dbt.events.types import FoundStats, CompilingNode, WritingInjectedSQLForNode +from dbt.events.types import FoundStats, WritingInjectedSQLForNode +from dbt.events.contextvars import get_node_info from dbt.node_types import NodeType, ModelLanguage from dbt.events.format import pluralize import dbt.tracking @@ -356,8 +357,6 @@ def _compile_node( if extra_context is None: extra_context = {} - fire_event(CompilingNode(unique_id=node.unique_id)) - data = node.to_dict(omit_none=True) data.update( { @@ -511,7 +510,7 @@ def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph def _write_node(self, node: NonSourceCompiledNode) -> ManifestNode: if not node.extra_ctes_injected or node.resource_type == NodeType.Snapshot: return node - fire_event(WritingInjectedSQLForNode(unique_id=node.unique_id)) + fire_event(WritingInjectedSQLForNode(node_info=get_node_info())) if node.compiled_code: node.compiled_path = node.write_node( diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 2fe56de0200..813a7a32b5e 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -18,6 +18,7 @@ ) from dbt.events.functions import fire_event, get_invocation_id from dbt.events.types import JinjaLogInfo, JinjaLogDebug +from dbt.events.contextvars import get_node_info from dbt.version import __version__ as dbt_version # These modules are added to the context. Consider alternative @@ -558,9 +559,9 @@ def log(msg: str, info: bool = False) -> str: {% endmacro %}" """ if info: - fire_event(JinjaLogInfo(msg=msg)) + fire_event(JinjaLogInfo(msg=msg, node_info=get_node_info())) else: - fire_event(JinjaLogDebug(msg=msg)) + fire_event(JinjaLogDebug(msg=msg, node_info=get_node_info())) return "" @contextproperty diff --git a/core/dbt/contracts/connection.py b/core/dbt/contracts/connection.py index a32bb443099..fe4ae912229 100644 --- a/core/dbt/contracts/connection.py +++ b/core/dbt/contracts/connection.py @@ -16,6 +16,7 @@ from dbt.utils import translate_aliases from dbt.events.functions import fire_event from dbt.events.types import NewConnectionOpening +from dbt.events.contextvars import get_node_info from typing_extensions import Protocol from dbt.dataclass_schema import ( dbtClassMixin, @@ -112,7 +113,9 @@ def __init__(self, opener: Callable[[Connection], Connection]): self.opener = opener def resolve(self, connection: Connection) -> Connection: - fire_event(NewConnectionOpening(connection_state=connection.state)) + fire_event( + NewConnectionOpening(connection_state=connection.state, node_info=get_node_info()) + ) return self.opener(connection) diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/parsed.py index 43d210bff2a..8fc4ca0c3ed 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/parsed.py @@ -48,6 +48,7 @@ SeedExceedsLimitAndPathChanged, SeedExceedsLimitChecksumChanged, ) +from dbt.events.contextvars import set_contextvars from dbt import flags from dbt.node_types import ModelLanguage, NodeType @@ -202,6 +203,14 @@ def node_info(self): node_info_msg = NodeInfo(**node_info) return node_info_msg + def update_event_status(self, **kwargs): + for k, v in kwargs.items(): + self._event_status[k] = v + set_contextvars(node_info=self.node_info) + + def clear_event_status(self): + self._event_status = dict() + @dataclass class ParsedNodeDefaults(NodeInfoMixin, ParsedNodeMandatory): diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index 91eb22a2f85..4adba9860b0 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -11,11 +11,9 @@ from dbt.exceptions import InternalException from dbt.events.functions import fire_event from dbt.events.types import TimingInfoCollected -from dbt.events.proto_types import RunResultMsg -from dbt.logger import ( - TimingProcessor, - JsonOnly, -) +from dbt.events.proto_types import RunResultMsg, TimingInfoMsg +from dbt.events.contextvars import get_node_info +from dbt.logger import TimingProcessor from dbt.utils import lowercase, cast_to_str, cast_to_int from dbt.dataclass_schema import dbtClassMixin, StrEnum @@ -48,7 +46,14 @@ def begin(self): def end(self): self.completed_at = datetime.utcnow() + def to_msg(self): + timsg = TimingInfoMsg( + name=self.name, started_at=self.started_at, completed_at=self.completed_at + ) + return timsg + +# This is a context manager class collect_timing_info: def __init__(self, name: str): self.timing_info = TimingInfo(name=name) @@ -59,8 +64,13 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): self.timing_info.end() - with JsonOnly(), TimingProcessor(self.timing_info): - fire_event(TimingInfoCollected()) + # Note: when legacy logger is removed, we can remove the following line + with TimingProcessor(self.timing_info): + fire_event( + TimingInfoCollected( + timing_info=self.timing_info.to_msg(), node_info=get_node_info() + ) + ) class RunningStatus(StrEnum): @@ -128,7 +138,8 @@ def to_msg(self): msg.thread = self.thread_id msg.execution_time = self.execution_time msg.num_failures = cast_to_int(self.failures) - # timing_info, adapter_response, message + msg.timing_info = [ti.to_msg() for ti in self.timing] + # adapter_response return msg diff --git a/core/dbt/events/adapter_endpoint.py b/core/dbt/events/adapter_endpoint.py index 68a73d8aecb..c26ac376437 100644 --- a/core/dbt/events/adapter_endpoint.py +++ b/core/dbt/events/adapter_endpoint.py @@ -1,6 +1,7 @@ import traceback from dataclasses import dataclass from dbt.events.functions import fire_event +from dbt.events.contextvars import get_node_info from dbt.events.types import ( AdapterEventDebug, AdapterEventInfo, @@ -15,27 +16,39 @@ class AdapterLogger: name: str def debug(self, msg, *args): - event = AdapterEventDebug(name=self.name, base_msg=msg, args=args) + event = AdapterEventDebug( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) def info(self, msg, *args): - event = AdapterEventInfo(name=self.name, base_msg=msg, args=args) + event = AdapterEventInfo( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) def warning(self, msg, *args): - event = AdapterEventWarning(name=self.name, base_msg=msg, args=args) + event = AdapterEventWarning( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) def error(self, msg, *args): - event = AdapterEventError(name=self.name, base_msg=msg, args=args) + event = AdapterEventError( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) # The default exc_info=True is what makes this method different def exception(self, msg, *args): - event = AdapterEventError(name=self.name, base_msg=msg, args=args) + event = AdapterEventError( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) event.exc_info = traceback.format_exc() fire_event(event) def critical(self, msg, *args): - event = AdapterEventError(name=self.name, base_msg=msg, args=args) + event = AdapterEventError( + name=self.name, base_msg=msg, args=args, node_info=get_node_info() + ) fire_event(event) diff --git a/core/dbt/events/contextvars.py b/core/dbt/events/contextvars.py new file mode 100644 index 00000000000..4aa507eb29b --- /dev/null +++ b/core/dbt/events/contextvars.py @@ -0,0 +1,84 @@ +import contextlib +import contextvars + +from typing import Any, Generator, Mapping, Dict +from dbt.events.proto_types import NodeInfo + + +LOG_PREFIX = "log_" +LOG_PREFIX_LEN = len(LOG_PREFIX) + +_log_context_vars: Dict[str, contextvars.ContextVar] = {} + + +def get_contextvars() -> Dict[str, Any]: + rv = {} + ctx = contextvars.copy_context() + + for k in ctx: + if k.name.startswith(LOG_PREFIX) and ctx[k] is not Ellipsis: + rv[k.name[LOG_PREFIX_LEN:]] = ctx[k] + + return rv + + +def get_node_info(): + cvars = get_contextvars() + if "node_info" in cvars: + return cvars["node_info"] + else: + return NodeInfo() + + +def clear_contextvars() -> None: + ctx = contextvars.copy_context() + for k in ctx: + if k.name.startswith(LOG_PREFIX): + k.set(Ellipsis) + + +# put keys and values into context. Returns the contextvar.Token mapping +# Save and pass to reset_contextvars +def set_contextvars(**kwargs: Any) -> Mapping[str, contextvars.Token]: + cvar_tokens = {} + for k, v in kwargs.items(): + log_key = f"{LOG_PREFIX}{k}" + try: + var = _log_context_vars[log_key] + except KeyError: + var = contextvars.ContextVar(log_key, default=Ellipsis) + _log_context_vars[log_key] = var + + cvar_tokens[k] = var.set(v) + + return cvar_tokens + + +# reset by Tokens +def reset_contextvars(**kwargs: contextvars.Token) -> None: + for k, v in kwargs.items(): + log_key = f"{LOG_PREFIX}{k}" + var = _log_context_vars[log_key] + var.reset(v) + + +# remove from contextvars +def unset_contextvars(*keys: str) -> None: + for k in keys: + if k in _log_context_vars: + log_key = f"{LOG_PREFIX}{k}" + _log_context_vars[log_key].set(Ellipsis) + + +# Context manager or decorator to set and unset the context vars +@contextlib.contextmanager +def log_contextvars(**kwargs: Any) -> Generator[None, None, None]: + context = get_contextvars() + saved = {k: context[k] for k in context.keys() & kwargs.keys()} + + set_contextvars(**kwargs) + try: + yield + finally: + unset_contextvars(*kwargs.keys()) + set_contextvars(**saved) diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index d69fa63eb6f..bd98f4932a8 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -126,12 +126,13 @@ def event_to_json( def event_to_dict(event: BaseEvent) -> dict: event_dict = dict() try: - # We could use to_json here, but it wouldn't sort the keys. - # The 'to_json' method just does json.dumps on the dict anyway. event_dict = event.to_dict(casing=betterproto.Casing.SNAKE, include_default_values=True) # type: ignore except AttributeError as exc: event_type = type(event).__name__ raise Exception(f"type {event_type} is not serializable. {str(exc)}") + # We don't want an empty NodeInfo in output + if "node_info" in event_dict and event_dict["node_info"]["node_name"] == "": + del event_dict["node_info"] return event_dict diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index c4d195e8777..17eeca3e4b3 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -345,9 +345,10 @@ class AdapterEventDebug(betterproto.Message): """E001""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) @dataclass @@ -355,9 +356,10 @@ class AdapterEventInfo(betterproto.Message): """E002""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) @dataclass @@ -365,9 +367,10 @@ class AdapterEventWarning(betterproto.Message): """E003""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) @dataclass @@ -375,10 +378,11 @@ class AdapterEventError(betterproto.Message): """E004""" info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - base_msg: str = betterproto.string_field(3) - args: List[str] = betterproto.string_field(4) - exc_info: str = betterproto.string_field(5) + node_info: "NodeInfo" = betterproto.message_field(2) + name: str = betterproto.string_field(3) + base_msg: str = betterproto.string_field(4) + args: List[str] = betterproto.string_field(5) + exc_info: str = betterproto.string_field(6) @dataclass @@ -386,8 +390,9 @@ class NewConnection(betterproto.Message): """E005""" info: "EventInfo" = betterproto.message_field(1) - conn_type: str = betterproto.string_field(2) - conn_name: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_type: str = betterproto.string_field(3) + conn_name: str = betterproto.string_field(4) @dataclass @@ -419,8 +424,9 @@ class RollbackFailed(betterproto.Message): """E009""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - exc_info: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) + exc_info: str = betterproto.string_field(4) @dataclass @@ -428,7 +434,8 @@ class ConnectionClosed(betterproto.Message): """E010""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -436,7 +443,8 @@ class ConnectionLeftOpen(betterproto.Message): """E011""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -444,7 +452,8 @@ class Rollback(betterproto.Message): """E012""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -472,8 +481,9 @@ class ConnectionUsed(betterproto.Message): """E015""" info: "EventInfo" = betterproto.message_field(1) - conn_type: str = betterproto.string_field(2) - conn_name: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_type: str = betterproto.string_field(3) + conn_name: str = betterproto.string_field(4) @dataclass @@ -481,8 +491,9 @@ class SQLQuery(betterproto.Message): """E016""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - sql: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) + sql: str = betterproto.string_field(4) @dataclass @@ -490,8 +501,9 @@ class SQLQueryStatus(betterproto.Message): """E017""" info: "EventInfo" = betterproto.message_field(1) - status: str = betterproto.string_field(2) - elapsed: float = betterproto.float_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + status: str = betterproto.string_field(3) + elapsed: float = betterproto.float_field(4) @dataclass @@ -499,7 +511,8 @@ class SQLCommit(betterproto.Message): """E018""" info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + conn_name: str = betterproto.string_field(3) @dataclass @@ -667,7 +680,8 @@ class NewConnectionOpening(betterproto.Message): """E037""" info: "EventInfo" = betterproto.message_field(1) - connection_state: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + connection_state: str = betterproto.string_field(3) @dataclass @@ -1247,7 +1261,8 @@ class JinjaLogWarning(betterproto.Message): """I061""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + msg: str = betterproto.string_field(3) @dataclass @@ -1339,7 +1354,8 @@ class JinjaLogInfo(betterproto.Message): """M011""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + msg: str = betterproto.string_field(3) @dataclass @@ -1347,7 +1363,8 @@ class JinjaLogDebug(betterproto.Message): """M012""" info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) + msg: str = betterproto.string_field(3) @dataclass @@ -1647,7 +1664,6 @@ class NodeStart(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) @dataclass @@ -1656,7 +1672,6 @@ class NodeFinished(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) run_result: "RunResultMsg" = betterproto.message_field(4) @@ -1675,14 +1690,7 @@ class ConcurrencyLine(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) num_threads: int = betterproto.int32_field(2) target_name: str = betterproto.string_field(3) - - -@dataclass -class CompilingNode(betterproto.Message): - """Q028""" - - info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) + node_count: int = betterproto.int32_field(4) @dataclass @@ -1690,7 +1698,7 @@ class WritingInjectedSQLForNode(betterproto.Message): """Q029""" info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) + node_info: "NodeInfo" = betterproto.message_field(2) @dataclass @@ -1699,7 +1707,6 @@ class NodeCompiling(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) @dataclass @@ -1708,7 +1715,6 @@ class NodeExecuting(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) node_info: "NodeInfo" = betterproto.message_field(2) - unique_id: str = betterproto.string_field(3) @dataclass @@ -1786,8 +1792,9 @@ class CatchableExceptionOnRun(betterproto.Message): """W002""" info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) - exc_info: str = betterproto.string_field(3) + node_info: "NodeInfo" = betterproto.message_field(2) + exc: str = betterproto.string_field(3) + exc_info: str = betterproto.string_field(4) @dataclass @@ -1905,6 +1912,8 @@ class TimingInfoCollected(betterproto.Message): """Z010""" info: "EventInfo" = betterproto.message_field(1) + node_info: "NodeInfo" = betterproto.message_field(2) + timing_info: "TimingInfoMsg" = betterproto.message_field(3) @dataclass diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 2666a3565e0..bc16433e1c0 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -265,41 +265,46 @@ message ExposureNameDeprecation { // E001 message AdapterEventDebug { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; } // E002 message AdapterEventInfo { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; } // E003 message AdapterEventWarning { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; } // E004 message AdapterEventError { EventInfo info = 1; - string name = 2; - string base_msg = 3; - repeated string args = 4; - string exc_info = 5; + NodeInfo node_info = 2; + string name = 3; + string base_msg = 4; + repeated string args = 5; + string exc_info = 6; } // E005 message NewConnection { EventInfo info = 1; - string conn_type = 2; - string conn_name = 3; + NodeInfo node_info = 2; + string conn_type = 3; + string conn_name = 4; } // E006 @@ -323,26 +328,30 @@ message ConnectionClosedInCleanup { // E009 message RollbackFailed { EventInfo info = 1; - string conn_name = 2; - string exc_info = 3; + NodeInfo node_info = 2; + string conn_name = 3; + string exc_info = 4; } // E010 message ConnectionClosed { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E011 message ConnectionLeftOpen { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E012 message Rollback { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E013 @@ -364,28 +373,32 @@ message ListRelations { // E015 message ConnectionUsed { EventInfo info = 1; - string conn_type = 2; - string conn_name = 3; + NodeInfo node_info = 2; + string conn_type = 3; + string conn_name = 4; } // E016 message SQLQuery { EventInfo info = 1; - string conn_name = 2; - string sql = 3; + NodeInfo node_info = 2; + string conn_name = 3; + string sql = 4; } // E017 message SQLQueryStatus { EventInfo info = 1; - string status = 2; - float elapsed = 3; + NodeInfo node_info = 2; + string status = 3; + float elapsed = 4; } // E018 message SQLCommit { EventInfo info = 1; - string conn_name = 2; + NodeInfo node_info = 2; + string conn_name = 3; } // E019 @@ -507,7 +520,8 @@ message PluginLoadError { // E037 message NewConnectionOpening { EventInfo info = 1; - string connection_state = 2; + NodeInfo node_info = 2; + string connection_state = 3; } // E038 @@ -946,7 +960,8 @@ message NodeNotFoundOrDisabled { // I061 message JinjaLogWarning { EventInfo info = 1; - string msg = 2; + NodeInfo node_info = 2; + string msg = 3; } // M - Deps generation @@ -1018,13 +1033,15 @@ message SelectorReportInvalidSelector { // M011 message JinjaLogInfo { EventInfo info = 1; - string msg = 2; + NodeInfo node_info = 2; + string msg = 3; } // M012 message JinjaLogDebug { EventInfo info = 1; - string msg = 2; + NodeInfo node_info = 2; + string msg = 3; } // M013 @@ -1270,14 +1287,12 @@ message DefaultSelector { message NodeStart { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; } // Q025 message NodeFinished { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; RunResultMsg run_result = 4; } @@ -1292,32 +1307,27 @@ message ConcurrencyLine { EventInfo info = 1; int32 num_threads = 2; string target_name = 3; + int32 node_count = 4; } -// Q028 -message CompilingNode { - EventInfo info = 1; - string unique_id = 2; -} +// Skipped Q028 // Q029 message WritingInjectedSQLForNode { EventInfo info = 1; - string unique_id = 2; + NodeInfo node_info = 2; } // Q030 message NodeCompiling { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; } // Q031 message NodeExecuting { EventInfo info = 1; NodeInfo node_info = 2; - string unique_id = 3; } // Q032 @@ -1383,8 +1393,9 @@ message NoNodesSelected { // W002 message CatchableExceptionOnRun { EventInfo info = 1; - string exc = 2; - string exc_info = 3; + NodeInfo node_info = 2; + string exc = 3; + string exc_info = 4; } // W003 @@ -1476,6 +1487,8 @@ message SystemReportReturnCode { // Z010 message TimingInfoCollected { EventInfo info = 1; + NodeInfo node_info = 2; + TimingInfoMsg timing_info = 3; } // Z011 diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 55439e0ec15..2ba224eded7 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -14,9 +14,9 @@ ) from dbt.events.format import format_fancy_output_line, pluralize -# The generated classes quote the included message classes, requiring the following line +# The generated classes quote the included message classes, requiring the following lines from dbt.events.proto_types import EventInfo, RunResultMsg, ListOfStrings # noqa -from dbt.events.proto_types import NodeInfo, ReferenceKeyMsg # noqa +from dbt.events.proto_types import NodeInfo, ReferenceKeyMsg, TimingInfoMsg # noqa from dbt.events import proto_types as pt from dbt.node_types import NodeType @@ -2083,7 +2083,7 @@ def code(self): return "Q024" def message(self) -> str: - return f"Began running node {self.unique_id}" + return f"Began running node {self.node_info.unique_id}" @dataclass @@ -2092,7 +2092,7 @@ def code(self): return "Q025" def message(self) -> str: - return f"Finished running node {self.unique_id}" + return f"Finished running node {self.node_info.unique_id}" @dataclass @@ -2118,13 +2118,7 @@ def message(self) -> str: return f"Concurrency: {self.num_threads} threads (target='{self.target_name}')" -@dataclass -class CompilingNode(DebugLevel, pt.CompilingNode): - def code(self): - return "Q028" - - def message(self) -> str: - return f"Compiling {self.unique_id}" +# Skipped Q028 @dataclass @@ -2133,7 +2127,7 @@ def code(self): return "Q029" def message(self) -> str: - return f'Writing injected SQL for node "{self.unique_id}"' + return f'Writing injected SQL for node "{self.node_info.unique_id}"' @dataclass @@ -2142,7 +2136,7 @@ def code(self): return "Q030" def message(self) -> str: - return f"Began compiling node {self.unique_id}" + return f"Began compiling node {self.node_info.unique_id}" @dataclass @@ -2151,7 +2145,7 @@ def code(self): return "Q031" def message(self) -> str: - return f"Began executing node {self.unique_id}" + return f"Began executing node {self.node_info.unique_id}" @dataclass @@ -2393,7 +2387,7 @@ def code(self): return "Z010" def message(self) -> str: - return "finished collecting timing info" + return f"Timing info for {self.node_info.unique_id} ({self.timing_info.name}): {self.timing_info.started_at} => {self.timing_info.completed_at}" # This prints the stack trace at the debug level while allowing just the nice exception message diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index 05f3debafe6..32aa8b477a9 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -4,6 +4,7 @@ from dbt.events.helpers import env_secrets, scrub_secrets from dbt.events.types import JinjaLogWarning +from dbt.events.contextvars import get_node_info from dbt.node_types import NodeType import dbt.dataclass_schema @@ -996,7 +997,10 @@ def raise_duplicate_alias( def warn(msg, node=None): - dbt.events.functions.warn_or_error(JinjaLogWarning(msg=msg), node=node) + dbt.events.functions.warn_or_error( + JinjaLogWarning(msg=msg, node_info=get_node_info()), + node=node, + ) return "" diff --git a/core/dbt/lib.py b/core/dbt/lib.py index 5b2ee2ea29f..f4b9ab5be0e 100644 --- a/core/dbt/lib.py +++ b/core/dbt/lib.py @@ -31,11 +31,10 @@ def compile_and_execute(self, manifest, ctx): method. Once conditional credential usage is enabled, this should be removed. """ result = None - ctx.node._event_status["node_status"] = RunningStatus.Compiling + ctx.node.update_event_status(node_status=RunningStatus.Compiling) fire_event( NodeCompiling( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("compile") as timing_info: @@ -47,11 +46,10 @@ def compile_and_execute(self, manifest, ctx): # for ephemeral nodes, we only want to compile, not run if not ctx.node.is_ephemeral_model: - ctx.node._event_status["node_status"] = RunningStatus.Executing + ctx.node.update_event_status(node_status=RunningStatus.Executing) fire_event( NodeExecuting( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("execute") as timing_info: diff --git a/core/dbt/logger.py b/core/dbt/logger.py index 4bbcfca4c06..0c7ba2fe8f2 100644 --- a/core/dbt/logger.py +++ b/core/dbt/logger.py @@ -191,11 +191,6 @@ def process(self, record): record.level = self.target_level -class JsonOnly(logbook.Processor): - def process(self, record): - record.extra["json_only"] = True - - class TextOnly(logbook.Processor): def process(self, record): record.extra["text_only"] = True diff --git a/core/dbt/parser/generic_test.py b/core/dbt/parser/generic_test.py index 3a7d49c0cf3..b69ca20ef6d 100644 --- a/core/dbt/parser/generic_test.py +++ b/core/dbt/parser/generic_test.py @@ -14,6 +14,7 @@ from dbt.parser.base import BaseParser from dbt.parser.search import FileBlock from dbt.utils import MACRO_PREFIX +from dbt import flags class GenericTestParser(BaseParser[ParsedGenericTestNode]): @@ -87,7 +88,8 @@ def parse_file(self, block: FileBlock): source_file = block.file assert isinstance(source_file.contents, str) original_file_path = source_file.path.original_file_path - fire_event(GenericTestFileParse(path=original_file_path)) + if flags.MACRO_DEBUGGING: + fire_event(GenericTestFileParse(path=original_file_path)) # this is really only used for error messages base_node = UnparsedMacro( diff --git a/core/dbt/parser/macros.py b/core/dbt/parser/macros.py index 396d39f57cc..7f99753ad2c 100644 --- a/core/dbt/parser/macros.py +++ b/core/dbt/parser/macros.py @@ -13,6 +13,7 @@ from dbt.parser.base import BaseParser from dbt.parser.search import FileBlock, filesystem_search from dbt.utils import MACRO_PREFIX +from dbt import flags class MacroParser(BaseParser[ParsedMacro]): @@ -94,7 +95,8 @@ def parse_file(self, block: FileBlock): source_file = block.file assert isinstance(source_file.contents, str) original_file_path = source_file.path.original_file_path - fire_event(MacroFileParse(path=original_file_path)) + if flags.MACRO_DEBUGGING: + fire_event(MacroFileParse(path=original_file_path)) # this is really only used for error messages base_node = UnparsedMacro( diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index 90f53c1f3f2..dbe495840e0 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -43,6 +43,7 @@ NodeCompiling, NodeExecuting, ) +from dbt.events.contextvars import get_node_info from .printer import print_run_result_error from dbt.adapters.factory import register_adapter @@ -312,11 +313,10 @@ def skip_result(self, node, message): def compile_and_execute(self, manifest, ctx): result = None with self.adapter.connection_for(self.node): - ctx.node._event_status["node_status"] = RunningStatus.Compiling + ctx.node.update_event_status(node_status=RunningStatus.Compiling) fire_event( NodeCompiling( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("compile") as timing_info: @@ -328,11 +328,10 @@ def compile_and_execute(self, manifest, ctx): # for ephemeral nodes, we only want to compile, not run if not ctx.node.is_ephemeral_model: - ctx.node._event_status["node_status"] = RunningStatus.Executing + ctx.node.update_event_status(node_status=RunningStatus.Executing) fire_event( NodeExecuting( node_info=ctx.node.node_info, - unique_id=ctx.node.unique_id, ) ) with collect_timing_info("execute") as timing_info: @@ -347,7 +346,11 @@ def _handle_catchable_exception(self, e, ctx): if e.node is None: e.add_node(ctx.node) - fire_event(CatchableExceptionOnRun(exc=str(e), exc_info=traceback.format_exc())) + fire_event( + CatchableExceptionOnRun( + exc=str(e), exc_info=traceback.format_exc(), node_info=get_node_info() + ) + ) return str(e) def _handle_internal_exception(self, e, ctx): diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 4f6bf037d6c..39776b58e87 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -340,8 +340,9 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): finishctx = TimestampNamed("node_finished_at") for idx, hook in enumerate(ordered_hooks, start=1): - hook._event_status["started_at"] = datetime.utcnow().isoformat() - hook._event_status["node_status"] = RunningStatus.Started + hook.update_event_status( + started_at=datetime.utcnow().isoformat(), node_status=RunningStatus.Started + ) sql = self.get_hook_sql(adapter, hook, idx, num_hooks, extra_context) hook_text = "{}.{}.{}".format(hook.package_name, hook_type, hook.index) @@ -365,9 +366,9 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): status = "OK" self.ran_hooks.append(hook) - hook._event_status["finished_at"] = datetime.utcnow().isoformat() + hook.update_event_status(finished_at=datetime.utcnow().isoformat()) with finishctx, DbtModelState({"node_status": "passed"}): - hook._event_status["node_status"] = RunStatus.Success + hook.update_event_status(node_status=RunStatus.Success) fire_event( LogHookEndLine( statement=hook_text, @@ -380,9 +381,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): ) # `_event_status` dict is only used for logging. Make sure # it gets deleted when we're done with it - del hook._event_status["started_at"] - del hook._event_status["finished_at"] - del hook._event_status["node_status"] + hook.clear_event_status() self._total_executed += len(ordered_hooks) diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 0e3e8328b11..279baffc448 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -38,6 +38,7 @@ EndRunResult, NothingToDo, ) +from dbt.events.contextvars import log_contextvars from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.parsed import ParsedSourceDefinition @@ -205,41 +206,39 @@ def get_runner(self, node): def call_runner(self, runner): uid_context = UniqueID(runner.node.unique_id) - with RUNNING_STATE, uid_context: + with RUNNING_STATE, uid_context, log_contextvars(node_info=runner.node.node_info): startctx = TimestampNamed("node_started_at") index = self.index_offset(runner.node_index) - runner.node._event_status["started_at"] = datetime.utcnow().isoformat() - runner.node._event_status["node_status"] = RunningStatus.Started + runner.node.update_event_status( + started_at=datetime.utcnow().isoformat(), node_status=RunningStatus.Started + ) extended_metadata = ModelMetadata(runner.node, index) with startctx, extended_metadata: fire_event( NodeStart( node_info=runner.node.node_info, - unique_id=runner.node.unique_id, ) ) status: Dict[str, str] = {} try: result = runner.run_with_hooks(self.manifest) status = runner.get_result_status(result) - runner.node._event_status["node_status"] = result.status - runner.node._event_status["finished_at"] = datetime.utcnow().isoformat() + runner.node.update_event_status( + node_status=result.status, finished_at=datetime.utcnow().isoformat() + ) finally: finishctx = TimestampNamed("finished_at") with finishctx, DbtModelState(status): fire_event( NodeFinished( node_info=runner.node.node_info, - unique_id=runner.node.unique_id, run_result=result.to_msg(), ) ) # `_event_status` dict is only used for logging. Make sure # it gets deleted when we're done with it - del runner.node._event_status["started_at"] - del runner.node._event_status["finished_at"] - del runner.node._event_status["node_status"] + runner.node.clear_event_status() fail_fast = flags.FAIL_FAST @@ -371,8 +370,13 @@ def execute_nodes(self): num_threads = self.config.threads target_name = self.config.target_name + # following line can be removed when legacy logger is removed with NodeCount(self.num_nodes): - fire_event(ConcurrencyLine(num_threads=num_threads, target_name=target_name)) + fire_event( + ConcurrencyLine( + num_threads=num_threads, target_name=target_name, node_count=self.num_nodes + ) + ) with TextOnly(): fire_event(EmptyLine()) diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py new file mode 100644 index 00000000000..b0feea50809 --- /dev/null +++ b/tests/functional/logging/test_logging.py @@ -0,0 +1,51 @@ +import pytest +from dbt.tests.util import run_dbt, get_manifest, read_file +import json + + +my_model_sql = """ + select 1 as fun +""" + + +@pytest.fixture(scope="class") +def models(): + return {"my_model.sql": my_model_sql} + + +# This test checks that various events contain node_info, +# which is supplied by the log_contextvars context manager +def test_basic(project, logs_dir): + results = run_dbt(["--log-format=json", "run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + assert "model.test.my_model" in manifest.nodes + + # get log file + log_file = read_file(logs_dir, "dbt.log") + assert log_file + node_start = False + node_finished = False + for log_line in log_file.split('\n'): + # skip empty lines + if len(log_line) == 0: + continue + # The adapter logging also shows up, so skip non-json lines + if "[debug]" in log_line: + continue + log_dct = json.loads(log_line) + log_event = log_dct['info']['name'] + if log_event == "NodeStart": + node_start = True + if log_event == "NodeFinished": + node_finished = True + if node_start and not node_finished: + if log_event == 'NodeExecuting': + assert "node_info" in log_dct + if log_event == "JinjaLogDebug": + assert "node_info" in log_dct + if log_event == "SQLQuery": + assert "node_info" in log_dct + if log_event == "TimingInfoCollected": + assert "node_info" in log_dct + assert "timing_info" in log_dct diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index eb4a87f495e..f41c9b49033 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -421,14 +421,13 @@ def MockNode(): ), LogCancelLine(conn_name=""), DefaultSelector(name=""), - NodeStart(unique_id=""), - NodeFinished(unique_id=""), + NodeStart(node_info=NodeInfo()), + NodeFinished(node_info=NodeInfo()), QueryCancelationUnsupported(type=""), ConcurrencyLine(num_threads=0, target_name=""), - CompilingNode(unique_id=""), - WritingInjectedSQLForNode(unique_id=""), - NodeCompiling(unique_id=""), - NodeExecuting(unique_id=""), + WritingInjectedSQLForNode(node_info=NodeInfo()), + NodeCompiling(node_info=NodeInfo()), + NodeExecuting(node_info=NodeInfo()), LogHookStartLine( statement="", index=0, @@ -519,6 +518,8 @@ def MockNode(): ] + + class TestEventJSONSerialization: # attempts to test that every event is serializable to json. diff --git a/tox.ini b/tox.ini index 109e8b4f62f..41cfc795ca2 100644 --- a/tox.ini +++ b/tox.ini @@ -15,7 +15,7 @@ deps = -reditable-requirements.txt [testenv:{integration,py37-integration,py38-integration,py39-integration,py310-integration,py-integration}] -description = adapter plugin integration testing +description = functional testing download = true skip_install = true passenv = DBT_* POSTGRES_TEST_* PYTEST_ADDOPTS From 540c3b79aaa361a249ff308401548318bb1f4b51 Mon Sep 17 00:00:00 2001 From: Stu Kilgore Date: Tue, 6 Dec 2022 11:14:20 -0600 Subject: [PATCH 051/156] Prevent docs gen workflow on forks (#6390) --- .changes/unreleased/Under the Hood-20221206-094015.yaml | 7 +++++++ .github/workflows/generate-cli-api-docs.yml | 3 +-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221206-094015.yaml diff --git a/.changes/unreleased/Under the Hood-20221206-094015.yaml b/.changes/unreleased/Under the Hood-20221206-094015.yaml new file mode 100644 index 00000000000..ebcb9999430 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221206-094015.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Prevent doc gen workflow from running on forks +time: 2022-12-06T09:40:15.301984-06:00 +custom: + Author: stu-k + Issue: "6386" + PR: "6390" diff --git a/.github/workflows/generate-cli-api-docs.yml b/.github/workflows/generate-cli-api-docs.yml index 2364ea99fd6..bc079499b83 100644 --- a/.github/workflows/generate-cli-api-docs.yml +++ b/.github/workflows/generate-cli-api-docs.yml @@ -34,6 +34,7 @@ jobs: check_gen: name: check if generation needed runs-on: ubuntu-latest + if: ${{ github.event.pull_request.head.repo.fork == false }} outputs: cli_dir_changed: ${{ steps.check_cli.outputs.cli_dir_changed }} docs_dir_changed: ${{ steps.check_docs.outputs.docs_dir_changed }} @@ -44,8 +45,6 @@ jobs: echo "env.CLI_DIR: ${{ env.CLI_DIR }}" echo "env.DOCS_BUILD_DIR: ${{ env.DOCS_BUILD_DIR }}" echo "env.DOCS_DIR: ${{ env.DOCS_DIR }}" - echo ">>>>> git log" - git log --pretty=oneline | head -5 - name: git checkout uses: actions/checkout@v3 From 60f80056b1b09b35a22ac91c93319827bdac0e0e Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Tue, 6 Dec 2022 15:51:52 -0500 Subject: [PATCH 052/156] CT-1405: Refactor event logging code (#6291) * CT-1405: Refactor event logging code * CT-1405: Add changelog entry * CT-1405: Add code to protect against using closed streams from past tests. * CT-1405: Restore unit test which was only failing locally * CT-1405: Document a hack with issue # to resolve it in the future * CT-1405: Make black happy * CT-1405: Get rid of confusing factory function and duplicated function * CT-1405: Remove unused event from types.proto and auto-gen'd file --- .../Under the Hood-20221118-145717.yaml | 8 + core/dbt/cli/main.py | 1 - core/dbt/cli/params.py | 8 - core/dbt/contracts/project.py | 1 - core/dbt/events/base_types.py | 41 +- core/dbt/events/eventmgr.py | 182 +++++++++ core/dbt/events/functions.py | 357 ++++++------------ core/dbt/events/proto_types.py | 7 - core/dbt/events/types.proto | 5 +- core/dbt/events/types.py | 12 - core/dbt/flags.py | 9 +- core/dbt/main.py | 8 - core/dbt/task/base.py | 7 - core/dbt/task/list.py | 6 +- core/dbt/tests/fixtures/project.py | 15 +- core/dbt/tests/util.py | 4 +- test/integration/base.py | 13 +- test/unit/test_context.py | 1 - test/unit/test_flags.py | 15 - test/unit/test_manifest.py | 6 +- .../context_methods/test_builtin_functions.py | 12 +- tests/unit/test_events.py | 25 +- 22 files changed, 370 insertions(+), 373 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221118-145717.yaml create mode 100644 core/dbt/events/eventmgr.py diff --git a/.changes/unreleased/Under the Hood-20221118-145717.yaml b/.changes/unreleased/Under the Hood-20221118-145717.yaml new file mode 100644 index 00000000000..934cd9dd5cb --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221118-145717.yaml @@ -0,0 +1,8 @@ +kind: Under the Hood +body: Functionality-neutral refactor of event logging system to improve encapsulation + and modularity. +time: 2022-11-18T14:57:17.792622-05:00 +custom: + Author: peterallenwebb + Issue: "6139" + PR: "6291" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 3f3b94ea9e3..6f0a153c923 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -31,7 +31,6 @@ def cli_runner(): @p.cache_selected_only @p.debug @p.enable_legacy_logger -@p.event_buffer_size @p.fail_fast @p.log_cache_events @p.log_format diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 1661e6e8c55..5045d04cc18 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -80,14 +80,6 @@ hidden=True, ) -event_buffer_size = click.option( - "--event-buffer-size", - envvar="DBT_EVENT_BUFFER_SIZE", - help="Sets the max number of events to buffer in EVENT_HISTORY.", - default=100000, - type=click.INT, -) - exclude = click.option("--exclude", envvar=None, help="Specify the nodes to exclude.") fail_fast = click.option( diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index 17523a40bdb..ea50d654f6c 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -251,7 +251,6 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract): static_parser: Optional[bool] = None indirect_selection: Optional[str] = None cache_selected_only: Optional[bool] = None - event_buffer_size: Optional[int] = None @dataclass diff --git a/core/dbt/events/base_types.py b/core/dbt/events/base_types.py index de010c89ceb..db74016099a 100644 --- a/core/dbt/events/base_types.py +++ b/core/dbt/events/base_types.py @@ -1,4 +1,5 @@ from dataclasses import dataclass +from enum import Enum import os import threading from datetime import datetime @@ -43,6 +44,16 @@ def get_thread_name() -> str: return threading.current_thread().name +# EventLevel is an Enum, but mixing in the 'str' type is suggested in the Python +# documentation, and provides support for json conversion, which fails otherwise. +class EventLevel(str, Enum): + DEBUG = "debug" + TEST = "test" + INFO = "info" + WARN = "warn" + ERROR = "error" + + @dataclass class BaseEvent: """BaseEvent for proto message generated python events""" @@ -62,15 +73,15 @@ def __post_init__(self): self.info.code = self.code() self.info.name = type(self).__name__ - def level_tag(self) -> str: - return "debug" - # This is here because although we know that info should always # exist, mypy doesn't. - def log_level(self) -> str: + def log_level(self) -> EventLevel: return self.info.level # type: ignore - def message(self): + def level_tag(self) -> EventLevel: + return EventLevel.DEBUG + + def message(self) -> str: raise Exception("message() not implemented for event") @@ -85,32 +96,32 @@ class DynamicLevel(BaseEvent): class TestLevel(BaseEvent): __test__ = False - def level_tag(self) -> str: - return "test" + def level_tag(self) -> EventLevel: + return EventLevel.TEST @dataclass # type: ignore[misc] class DebugLevel(BaseEvent): - def level_tag(self) -> str: - return "debug" + def level_tag(self) -> EventLevel: + return EventLevel.DEBUG @dataclass # type: ignore[misc] class InfoLevel(BaseEvent): - def level_tag(self) -> str: - return "info" + def level_tag(self) -> EventLevel: + return EventLevel.INFO @dataclass # type: ignore[misc] class WarnLevel(BaseEvent): - def level_tag(self) -> str: - return "warn" + def level_tag(self) -> EventLevel: + return EventLevel.WARN @dataclass # type: ignore[misc] class ErrorLevel(BaseEvent): - def level_tag(self) -> str: - return "error" + def level_tag(self) -> EventLevel: + return EventLevel.ERROR # Included to ensure classes with str-type message members are initialized correctly. diff --git a/core/dbt/events/eventmgr.py b/core/dbt/events/eventmgr.py new file mode 100644 index 00000000000..4d0ddeb06ef --- /dev/null +++ b/core/dbt/events/eventmgr.py @@ -0,0 +1,182 @@ +from colorama import Style +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +import json +import logging +from logging.handlers import RotatingFileHandler +import threading +from typing import Any, Callable, List, Optional, TextIO +from uuid import uuid4 + +from dbt.events.base_types import BaseEvent, EventLevel + + +# A Filter is a function which takes a BaseEvent and returns True if the event +# should be logged, False otherwise. +Filter = Callable[[BaseEvent], bool] + + +# Default filter which logs every event +def NoFilter(_: BaseEvent) -> bool: + return True + + +# A Scrubber removes secrets from an input string, returning a sanitized string. +Scrubber = Callable[[str], str] + + +# Provide a pass-through scrubber implementation, also used as a default +def NoScrubber(s: str) -> str: + return s + + +class LineFormat(Enum): + PlainText = 1 + DebugText = 2 + Json = 3 + + +# Map from dbt event levels to python log levels +_log_level_map = { + EventLevel.DEBUG: 10, + EventLevel.TEST: 10, + EventLevel.INFO: 20, + EventLevel.WARN: 30, + EventLevel.ERROR: 40, +} + + +@dataclass +class LoggerConfig: + name: str + filter: Filter = NoFilter + scrubber: Scrubber = NoScrubber + line_format: LineFormat = LineFormat.PlainText + level: EventLevel = EventLevel.WARN + use_colors: bool = False + output_stream: Optional[TextIO] = None + output_file_name: Optional[str] = None + logger: Optional[Any] = None + + +class _Logger: + def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: + self.name: str = config.name + self.filter: Filter = config.filter + self.scrubber: Scrubber = config.scrubber + self.level: EventLevel = config.level + self.event_manager: EventManager = event_manager + self._python_logger: Optional[logging.Logger] = config.logger + self._stream: Optional[TextIO] = config.output_stream + + if config.output_file_name: + log = logging.getLogger(config.name) + log.setLevel(_log_level_map[config.level]) + handler = RotatingFileHandler( + filename=str(config.output_file_name), + encoding="utf8", + maxBytes=10 * 1024 * 1024, # 10 mb + backupCount=5, + ) + + handler.setFormatter(logging.Formatter(fmt="%(message)s")) + log.handlers.clear() + log.addHandler(handler) + + self._python_logger = log + + def create_line(self, e: BaseEvent) -> str: + raise NotImplementedError() + + def write_line(self, e: BaseEvent): + line = self.create_line(e) + python_level = _log_level_map[e.log_level()] + if self._python_logger is not None: + self._python_logger.log(python_level, line) + elif self._stream is not None and _log_level_map[self.level] <= python_level: + self._stream.write(line + "\n") + + def flush(self): + if self._python_logger is not None: + for handler in self._python_logger.handlers: + handler.flush() + elif self._stream is not None: + self._stream.flush() + + +class _TextLogger(_Logger): + def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: + super().__init__(event_manager, config) + self.use_colors = config.use_colors + self.use_debug_format = config.line_format == LineFormat.DebugText + + def create_line(self, e: BaseEvent) -> str: + return self.create_debug_line(e) if self.use_debug_format else self.create_info_line(e) + + def create_info_line(self, e: BaseEvent) -> str: + ts: str = datetime.utcnow().strftime("%H:%M:%S") + scrubbed_msg: str = self.scrubber(e.message()) # type: ignore + return f"{self._get_color_tag()}{ts} {scrubbed_msg}" + + def create_debug_line(self, e: BaseEvent) -> str: + log_line: str = "" + # Create a separator if this is the beginning of an invocation + # TODO: This is an ugly hack, get rid of it if we can + if type(e).__name__ == "MainReportVersion": + separator = 30 * "=" + log_line = f"\n\n{separator} {datetime.utcnow()} | {self.event_manager.invocation_id} {separator}\n" + ts: str = datetime.utcnow().strftime("%H:%M:%S.%f") + scrubbed_msg: str = self.scrubber(e.message()) # type: ignore + log_line += f"{self._get_color_tag()}{ts} [{e.log_level():<5}]{self._get_thread_name()} {scrubbed_msg}" + return log_line + + def _get_color_tag(self) -> str: + return "" if not self.use_colors else Style.RESET_ALL + + def _get_thread_name(self) -> str: + thread_name = "" + if threading.current_thread().name: + thread_name = threading.current_thread().name + thread_name = thread_name[:10] + thread_name = thread_name.ljust(10, " ") + thread_name = f" [{thread_name}]:" + return thread_name + + +class _JsonLogger(_Logger): + def create_line(self, e: BaseEvent) -> str: + from dbt.events.functions import event_to_dict + + event_dict = event_to_dict(e) + raw_log_line = json.dumps(event_dict, sort_keys=True) + line = self.scrubber(raw_log_line) # type: ignore + return line + + +class EventManager: + def __init__(self) -> None: + self.loggers: List[_Logger] = [] + self.callbacks: List[Callable[[BaseEvent], None]] = [] + self.invocation_id: str = str(uuid4()) + + def fire_event(self, e: BaseEvent) -> None: + for logger in self.loggers: + if logger.filter(e): # type: ignore + logger.write_line(e) + + for callback in self.callbacks: + callback(e) + + def add_logger(self, config: LoggerConfig): + logger = ( + _JsonLogger(self, config) + if config.line_format == LineFormat.Json + else _TextLogger(self, config) + ) + logger.event_manager = self + self.loggers.append(logger) + + def flush(self): + for logger in self.loggers: + logger.flush() diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index bd98f4932a8..c9d82b9036a 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -1,123 +1,139 @@ import betterproto -from colorama import Style - -from dbt.events.base_types import NoStdOut, BaseEvent, NoFile, Cache -from dbt.events.types import EventBufferFull, MainReportVersion, EmptyLine -from dbt.events.proto_types import EventInfo +from dbt.constants import METADATA_ENV_PREFIX +from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut +from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.proto_types import EventInfo +from dbt.events.types import EmptyLine import dbt.flags as flags - -from dbt.constants import METADATA_ENV_PREFIX - -from dbt.logger import make_log_dir_if_missing, GLOBAL_LOGGER -from datetime import datetime +from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing +from functools import partial import json -import io -from io import StringIO, TextIOWrapper -import logbook -import logging -from logging import Logger -import sys -from logging.handlers import RotatingFileHandler import os +import sys +from typing import Callable, Dict, Optional, TextIO import uuid -import threading -from typing import Optional, Union, Callable, Dict -from collections import deque LOG_VERSION = 3 -EVENT_HISTORY = None - -# create the global file logger with no configuration -FILE_LOG = logging.getLogger("default_file") -null_handler = logging.NullHandler() -FILE_LOG.addHandler(null_handler) - -# set up logger to go to stdout with defaults -# setup_event_logger will be called once args have been parsed -STDOUT_LOG = logging.getLogger("default_stdout") -STDOUT_LOG.setLevel(logging.INFO) -stdout_handler = logging.StreamHandler(sys.stdout) -stdout_handler.setLevel(logging.INFO) -STDOUT_LOG.addHandler(stdout_handler) - -format_color = True -format_json = False -invocation_id: Optional[str] = None metadata_vars: Optional[Dict[str, str]] = None +# The default event manager will not log anything, but some tests run code that +# generates events, without configuring the event manager. +EVENT_MANAGER: EventManager = EventManager() -def setup_event_logger(log_path, level_override=None): - global format_json, format_color, STDOUT_LOG, FILE_LOG + +def setup_event_logger(log_path: str, level_override: Optional[EventLevel] = None): + cleanup_event_logger() make_log_dir_if_missing(log_path) + if flags.ENABLE_LEGACY_LOGGER: + EVENT_MANAGER.add_logger(_get_logbook_log_config(level_override)) + else: + EVENT_MANAGER.add_logger(_get_stdout_config(level_override)) + + if _CAPTURE_STREAM: + # Create second stdout logger to support test which want to know what's + # being sent to stdout. + capture_config = _get_stdout_config(level_override) + capture_config.output_stream = _CAPTURE_STREAM + EVENT_MANAGER.add_logger(capture_config) + + # create and add the file logger to the event manager + EVENT_MANAGER.add_logger(_get_logfile_config(os.path.join(log_path, "dbt.log"))) + + +def _get_stdout_config(level: Optional[EventLevel]) -> LoggerConfig: + fmt = LineFormat.PlainText + if flags.LOG_FORMAT == "json": + fmt = LineFormat.Json + elif flags.DEBUG: + fmt = LineFormat.DebugText + + return LoggerConfig( + name="stdout_log", + level=level or (EventLevel.DEBUG if flags.DEBUG else EventLevel.INFO), + use_colors=bool(flags.USE_COLORS), + line_format=fmt, + scrubber=env_scrubber, + filter=partial( + _stdout_filter, bool(flags.LOG_CACHE_EVENTS), bool(flags.DEBUG), bool(flags.QUIET) + ), + output_stream=sys.stdout, + ) + - format_json = flags.LOG_FORMAT == "json" - # USE_COLORS can be None if the app just started and the cli flags - # havent been applied yet - format_color = True if flags.USE_COLORS else False - # TODO this default should live somewhere better - log_dest = os.path.join(log_path, "dbt.log") - level = level_override or (logging.DEBUG if flags.DEBUG else logging.INFO) - - # overwrite the STDOUT_LOG logger with the configured one - STDOUT_LOG = logging.getLogger("configured_std_out") - STDOUT_LOG.setLevel(level) - - FORMAT = "%(message)s" - stdout_passthrough_formatter = logging.Formatter(fmt=FORMAT) - - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setFormatter(stdout_passthrough_formatter) - stdout_handler.setLevel(level) - # clear existing stdout TextIOWrapper stream handlers - STDOUT_LOG.handlers = [ - h - for h in STDOUT_LOG.handlers - if not (hasattr(h, "stream") and isinstance(h.stream, TextIOWrapper)) # type: ignore - ] - STDOUT_LOG.addHandler(stdout_handler) - - # overwrite the FILE_LOG logger with the configured one - FILE_LOG = logging.getLogger("configured_file") - FILE_LOG.setLevel(logging.DEBUG) # always debug regardless of user input - - file_passthrough_formatter = logging.Formatter(fmt=FORMAT) - - file_handler = RotatingFileHandler( - filename=log_dest, encoding="utf8", maxBytes=10 * 1024 * 1024, backupCount=5 # 10 mb +def _stdout_filter( + log_cache_events: bool, debug_mode: bool, quiet_mode: bool, evt: BaseEvent +) -> bool: + return ( + not isinstance(evt, NoStdOut) + and (not isinstance(evt, Cache) or log_cache_events) + and (evt.log_level() != EventLevel.DEBUG or debug_mode) + and (evt.log_level() == EventLevel.ERROR or not quiet_mode) + and not (flags.LOG_FORMAT == "json" and type(evt) == EmptyLine) ) - file_handler.setFormatter(file_passthrough_formatter) - file_handler.setLevel(logging.DEBUG) # always debug regardless of user input - FILE_LOG.handlers.clear() - FILE_LOG.addHandler(file_handler) -# used for integration tests -def capture_stdout_logs() -> StringIO: - global STDOUT_LOG - capture_buf = io.StringIO() - stdout_capture_handler = logging.StreamHandler(capture_buf) - stdout_handler.setLevel(logging.DEBUG) - STDOUT_LOG.addHandler(stdout_capture_handler) - return capture_buf +def _get_logfile_config(log_path: str) -> LoggerConfig: + return LoggerConfig( + name="file_log", + line_format=LineFormat.Json if flags.LOG_FORMAT == "json" else LineFormat.DebugText, + use_colors=bool(flags.USE_COLORS), + level=EventLevel.DEBUG, # File log is *always* debug level + scrubber=env_scrubber, + filter=partial(_logfile_filter, bool(flags.LOG_CACHE_EVENTS)), + output_file_name=log_path, + ) + + +def _logfile_filter(log_cache_events: bool, evt: BaseEvent) -> bool: + return ( + not isinstance(evt, NoFile) + and not (isinstance(evt, Cache) and not log_cache_events) + and not (flags.LOG_FORMAT == "json" and type(evt) == EmptyLine) + ) + + +def _get_logbook_log_config(level: Optional[EventLevel]) -> LoggerConfig: + config = _get_stdout_config(level) + config.name = "logbook_log" + config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e, Cache) + config.logger = GLOBAL_LOGGER + return config + + +def env_scrubber(msg: str) -> str: + return scrub_secrets(msg, env_secrets()) + + +def cleanup_event_logger(): + # Reset to a no-op manager to release streams associated with logs. This is + # especially important for tests, since pytest replaces the stdout stream + # during test runs, and closes the stream after the test is over. + EVENT_MANAGER.loggers.clear() + EVENT_MANAGER.callbacks.clear() + + +# This global, and the following two functions for capturing stdout logs are +# an unpleasant hack we intend to remove as part of API-ification. The GitHub +# issue #6350 was opened for that work. +_CAPTURE_STREAM: Optional[TextIO] = None # used for integration tests -def stop_capture_stdout_logs() -> None: - global STDOUT_LOG - STDOUT_LOG.handlers = [ - h - for h in STDOUT_LOG.handlers - if not (hasattr(h, "stream") and isinstance(h.stream, StringIO)) # type: ignore - ] +def capture_stdout_logs(stream: TextIO): + global _CAPTURE_STREAM + _CAPTURE_STREAM = stream + + +def stop_capture_stdout_logs(): + global _CAPTURE_STREAM + _CAPTURE_STREAM = None # returns a dictionary representation of the event fields. # the message may contain secrets which must be scrubbed at the usage site. -def event_to_json( - event: BaseEvent, -) -> str: +def event_to_json(event: BaseEvent) -> str: event_dict = event_to_dict(event) raw_log_line = json.dumps(event_dict, sort_keys=True) return raw_log_line @@ -136,83 +152,6 @@ def event_to_dict(event: BaseEvent) -> dict: return event_dict -# translates an Event to a completely formatted text-based log line -# type hinting everything as strings so we don't get any unintentional string conversions via str() -def reset_color() -> str: - global format_color - return "" if not format_color else Style.RESET_ALL - - -def create_info_text_log_line(e: BaseEvent) -> str: - color_tag: str = reset_color() - ts: str = get_ts().strftime("%H:%M:%S") # TODO: get this from the event.ts? - scrubbed_msg: str = scrub_secrets(e.message(), env_secrets()) - log_line: str = f"{color_tag}{ts} {scrubbed_msg}" - return log_line - - -def create_debug_text_log_line(e: BaseEvent) -> str: - log_line: str = "" - # Create a separator if this is the beginning of an invocation - if type(e) == MainReportVersion: - separator = 30 * "=" - log_line = f"\n\n{separator} {get_ts()} | {get_invocation_id()} {separator}\n" - color_tag: str = reset_color() - ts: str = get_ts().strftime("%H:%M:%S.%f") - scrubbed_msg: str = scrub_secrets(e.message(), env_secrets()) - # Make the levels all 5 characters so they line up - level: str = f"{e.log_level():<5}" - thread = "" - if threading.current_thread().name: - thread_name = threading.current_thread().name - thread_name = thread_name[:10] - thread_name = thread_name.ljust(10, " ") - thread = f" [{thread_name}]:" - log_line = log_line + f"{color_tag}{ts} [{level}]{thread} {scrubbed_msg}" - return log_line - - -# translates an Event to a completely formatted json log line -def create_json_log_line(e: BaseEvent) -> Optional[str]: - if type(e) == EmptyLine: - return None # will not be sent to logger - raw_log_line = event_to_json(e) - return scrub_secrets(raw_log_line, env_secrets()) - - -# calls create_stdout_text_log_line() or create_json_log_line() according to logger config -def create_log_line(e: BaseEvent, file_output=False) -> Optional[str]: - global format_json - if format_json: - return create_json_log_line(e) # json output, both console and file - elif file_output is True or flags.DEBUG: - return create_debug_text_log_line(e) # default file output - else: - return create_info_text_log_line(e) # console output - - -# allows for reuse of this obnoxious if else tree. -# do not use for exceptions, it doesn't pass along exc_info, stack_info, or extra -def send_to_logger(l: Union[Logger, logbook.Logger], level: str, log_line: str): - if not log_line: - return - if level == "test": - # TODO after implmenting #3977 send to new test level - l.debug(log_line) - elif level == "debug": - l.debug(log_line) - elif level == "info": - l.info(log_line) - elif level == "warn": - l.warning(log_line) - elif level == "error": - l.error(log_line) - else: - raise AssertionError( - f"While attempting to log {log_line}, encountered the unhandled level: {level}" - ) - - def warn_or_error(event, node=None): if flags.WARN_ERROR: from dbt.exceptions import raise_compiler_error @@ -234,39 +173,7 @@ def fire_event_if(conditional: bool, lazy_e: Callable[[], BaseEvent]) -> None: # (i.e. - mutating the event history, printing to stdout, logging # to files, etc.) def fire_event(e: BaseEvent) -> None: - # skip logs when `--log-cache-events` is not passed - if isinstance(e, Cache) and not flags.LOG_CACHE_EVENTS: - return - - add_to_event_history(e) - - # backwards compatibility for plugins that require old logger (dbt-rpc) - if flags.ENABLE_LEGACY_LOGGER: - # using Event::message because the legacy logger didn't differentiate messages by - # destination - log_line = create_log_line(e) - if log_line: - send_to_logger(GLOBAL_LOGGER, level=e.log_level(), log_line=log_line) - return # exit the function to avoid using the current logger as well - - # always logs debug level regardless of user input - if not isinstance(e, NoFile): - log_line = create_log_line(e, file_output=True) - # doesn't send exceptions to exception logger - if log_line: - send_to_logger(FILE_LOG, level=e.log_level(), log_line=log_line) - - if not isinstance(e, NoStdOut): - # explicitly checking the debug flag here so that potentially expensive-to-construct - # log messages are not constructed if debug messages are never shown. - if e.log_level() == "debug" and not flags.DEBUG: - return # eat the message in case it was one of the expensive ones - if e.log_level() != "error" and flags.QUIET: - return # eat all non-exception messages in quiet mode - - log_line = create_log_line(e) - if log_line: - send_to_logger(STDOUT_LOG, level=e.log_level(), log_line=log_line) + EVENT_MANAGER.fire_event(e) def get_metadata_vars() -> Dict[str, str]: @@ -286,47 +193,13 @@ def reset_metadata_vars() -> None: def get_invocation_id() -> str: - global invocation_id - if invocation_id is None: - invocation_id = str(uuid.uuid4()) - return invocation_id + return EVENT_MANAGER.invocation_id def set_invocation_id() -> None: # This is primarily for setting the invocation_id for separate # commands in the dbt servers. It shouldn't be necessary for the CLI. - global invocation_id - invocation_id = str(uuid.uuid4()) - - -# exactly one time stamp per concrete event -def get_ts() -> datetime: - ts = datetime.utcnow() - return ts - - -# preformatted time stamp -def get_ts_rfc3339() -> str: - ts = get_ts() - ts_rfc3339 = ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - return ts_rfc3339 - - -def add_to_event_history(event): - if flags.EVENT_BUFFER_SIZE == 0: - return - global EVENT_HISTORY - if EVENT_HISTORY is None: - reset_event_history() - EVENT_HISTORY.append(event) - # We only set the EventBufferFull message for event buffers >= 10,000 - if flags.EVENT_BUFFER_SIZE >= 10000 and len(EVENT_HISTORY) == (flags.EVENT_BUFFER_SIZE - 1): - fire_event(EventBufferFull()) - - -def reset_event_history(): - global EVENT_HISTORY - EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) + EVENT_MANAGER.invocation_id = str(uuid.uuid4()) # Currently used to set the level in EventInfo, so logging events can diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 17eeca3e4b3..5ee384643d3 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -2159,13 +2159,6 @@ class TrackingInitializeFailure(betterproto.Message): exc_info: str = betterproto.string_field(2) -@dataclass -class EventBufferFull(betterproto.Message): - """Z045""" - - info: "EventInfo" = betterproto.message_field(1) - - @dataclass class RunResultWarningMessage(betterproto.Message): """Z046""" diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index bc16433e1c0..1c330106d92 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -1674,10 +1674,7 @@ message TrackingInitializeFailure { string exc_info = 2; } -// Z045 -message EventBufferFull { - EventInfo info = 1; -} +// Skipped Z045 // Z046 message RunResultWarningMessage { diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 2ba224eded7..843ef020bbd 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -2704,18 +2704,6 @@ def message(self) -> str: return "Got an exception trying to initialize tracking" -@dataclass -class EventBufferFull(WarnLevel, pt.EventBufferFull): - def code(self): - return "Z045" - - def message(self) -> str: - return ( - "Internal logging/event buffer full." - "Earliest logs/events will be dropped as new ones are fired (FIFO)." - ) - - # this is the message from the result object @dataclass class RunResultWarningMessage(WarnLevel, EventStringFunctor, pt.RunResultWarningMessage): diff --git a/core/dbt/flags.py b/core/dbt/flags.py index 367286ccb8c..484071aa609 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -39,7 +39,6 @@ WHICH = None INDIRECT_SELECTION = None LOG_CACHE_EVENTS = None -EVENT_BUFFER_SIZE = 100000 QUIET = None NO_PRINT = None CACHE_SELECTED_ONLY = None @@ -51,7 +50,6 @@ "PRINTER_WIDTH", "PROFILES_DIR", "INDIRECT_SELECTION", - "EVENT_BUFFER_SIZE", "TARGET_PATH", "LOG_PATH", ] @@ -78,7 +76,6 @@ "PRINTER_WIDTH": 80, "INDIRECT_SELECTION": "eager", "LOG_CACHE_EVENTS": False, - "EVENT_BUFFER_SIZE": 100000, "QUIET": False, "NO_PRINT": False, "CACHE_SELECTED_ONLY": False, @@ -134,7 +131,7 @@ def set_from_args(args, user_config): global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS - global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, EVENT_BUFFER_SIZE, QUIET, NO_PRINT, CACHE_SELECTED_ONLY + global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, QUIET, NO_PRINT, CACHE_SELECTED_ONLY global TARGET_PATH, LOG_PATH STRICT_MODE = False # backwards compatibility @@ -159,7 +156,6 @@ def set_from_args(args, user_config): PRINTER_WIDTH = get_flag_value("PRINTER_WIDTH", args, user_config) INDIRECT_SELECTION = get_flag_value("INDIRECT_SELECTION", args, user_config) LOG_CACHE_EVENTS = get_flag_value("LOG_CACHE_EVENTS", args, user_config) - EVENT_BUFFER_SIZE = get_flag_value("EVENT_BUFFER_SIZE", args, user_config) QUIET = get_flag_value("QUIET", args, user_config) NO_PRINT = get_flag_value("NO_PRINT", args, user_config) CACHE_SELECTED_ONLY = get_flag_value("CACHE_SELECTED_ONLY", args, user_config) @@ -182,7 +178,7 @@ def _set_overrides_from_env(): def get_flag_value(flag, args, user_config): flag_value = _load_flag_value(flag, args, user_config) - if flag in ["PRINTER_WIDTH", "EVENT_BUFFER_SIZE"]: # must be ints + if flag == "PRINTER_WIDTH": # must be ints flag_value = int(flag_value) if flag == "PROFILES_DIR": flag_value = os.path.abspath(flag_value) @@ -243,7 +239,6 @@ def get_flag_dict(): "printer_width": PRINTER_WIDTH, "indirect_selection": INDIRECT_SELECTION, "log_cache_events": LOG_CACHE_EVENTS, - "event_buffer_size": EVENT_BUFFER_SIZE, "quiet": QUIET, "no_print": NO_PRINT, } diff --git a/core/dbt/main.py b/core/dbt/main.py index 153c120a6e0..55920e8a5cc 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -1105,14 +1105,6 @@ def parse_args(args, cls=DBTArgumentParser): """, ) - p.add_argument( - "--event-buffer-size", - dest="event_buffer_size", - help=""" - Sets the max number of events to buffer in EVENT_HISTORY - """, - ) - p.add_argument( "-q", "--quiet", diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index dbe495840e0..e448a15c1d2 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -22,7 +22,6 @@ InternalException, ) from dbt.logger import log_manager -import dbt.events.functions as event_logger from dbt.events.functions import fire_event from dbt.events.types import ( DbtProjectError, @@ -86,9 +85,6 @@ def pre_init_hook(cls, args): """A hook called before the task is initialized.""" if args.log_format == "json": log_manager.format_json() - # we're mutating the initialized, but not-yet-configured event logger - # because it's being configured too late -- bad! TODO refactor! - event_logger.format_json = True else: log_manager.format_text() @@ -96,9 +92,6 @@ def pre_init_hook(cls, args): def set_log_format(cls): if flags.LOG_FORMAT == "json": log_manager.format_json() - # we're mutating the initialized, but not-yet-configured event logger - # because it's being configured too late -- bad! TODO refactor! - event_logger.format_json = True else: log_manager.format_text() diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index 43cd8e3f8fe..d2a33dec184 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -9,8 +9,7 @@ from dbt.events.types import NoNodesSelected from dbt.exceptions import RuntimeException, InternalException from dbt.logger import log_manager -import logging -import dbt.events.functions as event_logger +from dbt.events.eventmgr import EventLevel class ListTask(GraphRunnableTask): @@ -62,9 +61,8 @@ def pre_init_hook(cls, args): # - mutating the initialized, not-yet-configured STDOUT event logger # because it's being configured too late -- bad! TODO refactor! log_manager.stderr_console() - event_logger.STDOUT_LOG.level = logging.WARN super().pre_init_hook(args) - return logging.WARN + return EventLevel.WARN def _iterate_selected_nodes(self): selector = self.get_node_selector() diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index 5da885edf9b..993c89b3544 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -10,7 +10,7 @@ import dbt.flags as flags from dbt.config.runtime import RuntimeConfig from dbt.adapters.factory import get_adapter, register_adapter, reset_adapters, get_adapter_by_type -from dbt.events.functions import setup_event_logger +from dbt.events.functions import setup_event_logger, cleanup_event_logger from dbt.tests.util import ( write_file, run_sql_with_adapter, @@ -229,6 +229,15 @@ def selectors_yml(project_root, selectors): write_file(data, project_root, "selectors.yml") +# This fixture ensures that the logging infrastructure does not accidentally +# reuse streams configured on previous test runs, which might now be closed. +# It should be run before (and so included as a parameter by) any other fixture +# which runs dbt-core functions that might fire events. +@pytest.fixture(scope="class") +def clean_up_logging(): + cleanup_event_logger() + + # This creates an adapter that is used for running test setup, such as creating # the test schema, and sql commands that are run in tests prior to the first # dbt command. After a dbt command is run, the project.adapter property will @@ -240,7 +249,7 @@ def selectors_yml(project_root, selectors): # otherwise this will fail. So to test errors in those areas, you need to copy the files # into the project in the tests instead of putting them in the fixtures. @pytest.fixture(scope="class") -def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml): +def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml, clean_up_logging): # The profiles.yml and dbt_project.yml should already be written out args = Namespace( profiles_dir=str(profiles_root), project_dir=str(project_root), target=None, profile=None @@ -438,6 +447,7 @@ def get_tables_in_schema(self): # to pull in the other fixtures individually to access their information. @pytest.fixture(scope="class") def project( + clean_up_logging, project_root, profiles_root, request, @@ -490,3 +500,4 @@ def project( except (KeyError, AttributeError, CompilationException): pass os.chdir(orig_cwd) + cleanup_event_logger() diff --git a/core/dbt/tests/util.py b/core/dbt/tests/util.py index af837c18b17..bb8b03131b5 100644 --- a/core/dbt/tests/util.py +++ b/core/dbt/tests/util.py @@ -1,3 +1,4 @@ +from io import StringIO import os import shutil import yaml @@ -87,7 +88,8 @@ def run_dbt(args: List[str] = None, expect_pass=True): # will turn the logs into json, so you have to be prepared for that. def run_dbt_and_capture(args: List[str] = None, expect_pass=True): try: - stringbuf = capture_stdout_logs() + stringbuf = StringIO() + capture_stdout_logs(stringbuf) res = run_dbt(args, expect_pass=expect_pass) stdout = stringbuf.getvalue() diff --git a/test/integration/base.py b/test/integration/base.py index b2e55159d6b..ae1dfc6480e 100644 --- a/test/integration/base.py +++ b/test/integration/base.py @@ -1,6 +1,6 @@ +from io import StringIO import json import os -import io import random import shutil import sys @@ -26,7 +26,7 @@ from dbt.context import providers from dbt.logger import log_manager from dbt.events.functions import ( - capture_stdout_logs, fire_event, setup_event_logger, stop_capture_stdout_logs + capture_stdout_logs, fire_event, setup_event_logger, cleanup_event_logger, stop_capture_stdout_logs ) from dbt.events.test_types import ( IntegrationTestInfo, @@ -440,6 +440,8 @@ def tearDown(self): except EnvironmentError: msg = f"Could not clean up after test - {self.test_root_dir} not removable" fire_event(IntegrationTestException(msg=msg)) + + cleanup_event_logger() def _get_schema_fqn(self, database, schema): schema_fqn = self.quote_as_configured(schema, 'schema') @@ -524,7 +526,8 @@ def run_dbt(self, args=None, expect_pass=True, profiles_dir=True): def run_dbt_and_capture(self, *args, **kwargs): try: - stringbuf = capture_stdout_logs() + stringbuf = StringIO() + capture_stdout_logs(stringbuf) res = self.run_dbt(*args, **kwargs) stdout = stringbuf.getvalue() @@ -548,8 +551,8 @@ def run_dbt_and_check(self, args=None, profiles_dir=True): if profiles_dir: final_args.extend(['--profiles-dir', self.test_root_dir]) final_args.append('--log-cache-events') - msg = f"Invoking dbt with {final_args}" - fire_event(IntegrationTestInfo(msg=msg)) + # msg = f"Invoking dbt with {final_args}" + # fire_event(IntegrationTestInfo(msg=msg)) return dbt.handle_and_check(final_args) def run_sql_file(self, path, kwargs=None): diff --git a/test/unit/test_context.py b/test/unit/test_context.py index 1aaf3711909..c25729f0afb 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -430,7 +430,6 @@ def test_invocation_args_to_dict_in_macro_runtime_context( ) # Comes from dbt/flags.py as they are the only values set that aren't None at default - assert ctx["invocation_args_dict"]["event_buffer_size"] == 100000 assert ctx["invocation_args_dict"]["printer_width"] == 80 # Comes from unit/utils.py config_from_parts_or_dicts method diff --git a/test/unit/test_flags.py b/test/unit/test_flags.py index fc4455f5d1b..4be866338a2 100644 --- a/test/unit/test_flags.py +++ b/test/unit/test_flags.py @@ -261,18 +261,3 @@ def test__flags(self): # cleanup os.environ.pop('DBT_LOG_PATH') delattr(self.args, 'log_path') - - # event_buffer_size - self.user_config.event_buffer_size = 100 - flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.EVENT_BUFFER_SIZE, 100) - os.environ['DBT_EVENT_BUFFER_SIZE'] = '80' - flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.EVENT_BUFFER_SIZE, 80) - setattr(self.args, 'event_buffer_size', '120') - flags.set_from_args(self.args, self.user_config) - self.assertEqual(flags.EVENT_BUFFER_SIZE, 120) - # cleanup - os.environ.pop('DBT_EVENT_BUFFER_SIZE') - delattr(self.args, 'event_buffer_size') - self.user_config.event_buffer_size = None diff --git a/test/unit/test_manifest.py b/test/unit/test_manifest.py index 3e18c555b9c..c701aef5b32 100644 --- a/test/unit/test_manifest.py +++ b/test/unit/test_manifest.py @@ -308,7 +308,7 @@ def test__no_nodes(self): metadata=ManifestMetadata(generated_at=datetime.utcnow()), ) - invocation_id = dbt.events.functions.invocation_id + invocation_id = dbt.events.functions.EVENT_MANAGER.invocation_id self.assertEqual( manifest.writable_manifest().to_dict(omit_none=True), { @@ -425,7 +425,7 @@ def test__build_flat_graph(self): @mock.patch.object(tracking, 'active_user') def test_metadata(self, mock_user): mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf' - dbt.events.functions.invocation_id = '01234567-0123-0123-0123-0123456789ab' + dbt.events.functions.EVENT_MANAGER.invocation_id = '01234567-0123-0123-0123-0123456789ab' dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False now = datetime.utcnow() self.assertEqual( @@ -448,7 +448,7 @@ def test_metadata(self, mock_user): @freezegun.freeze_time('2018-02-14T09:15:13Z') def test_no_nodes_with_metadata(self, mock_user): mock_user.id = 'cfc9500f-dc7f-4c83-9ea7-2c581c1b38cf' - dbt.events.functions.invocation_id = '01234567-0123-0123-0123-0123456789ab' + dbt.events.functions.EVENT_MANAGER.invocation_id = '01234567-0123-0123-0123-0123456789ab' dbt.flags.SEND_ANONYMOUS_USAGE_STATS = False metadata = ManifestMetadata( project_id='098f6bcd4621d373cade4e832627b4f6', diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py index 83043b15a10..3a5dff3f2f7 100644 --- a/tests/functional/context_methods/test_builtin_functions.py +++ b/tests/functional/context_methods/test_builtin_functions.py @@ -112,15 +112,15 @@ def test_builtin_invocation_args_dict_function(self, project): expected = "invocation_result: {'debug': True, 'log_format': 'json', 'write_json': True, 'use_colors': True, 'printer_width': 80, 'version_check': True, 'partial_parse': True, 'static_parser': True, 'profiles_dir': " assert expected in str(result) - expected = "'send_anonymous_usage_stats': False, 'event_buffer_size': 100000, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'indirect_selection': 'eager'}" + expected = "'send_anonymous_usage_stats': False, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'indirect_selection': 'eager'}" assert expected in str(result) def test_builtin_dbt_metadata_envs_function(self, project, monkeypatch): envs = { - "DBT_ENV_CUSTOM_ENV_RUN_ID": 1234, - "DBT_ENV_CUSTOM_ENV_JOB_ID": 5678, - "DBT_ENV_RUN_ID": 91011, - "RANDOM_ENV": 121314, + "DBT_ENV_CUSTOM_ENV_RUN_ID": "1234", + "DBT_ENV_CUSTOM_ENV_JOB_ID": "5678", + "DBT_ENV_RUN_ID": "91011", + "RANDOM_ENV": "121314", } monkeypatch.setattr(os, "environ", envs) @@ -133,7 +133,7 @@ def test_builtin_dbt_metadata_envs_function(self, project, monkeypatch): assert result - expected = "dbt_metadata_envs_result:{'RUN_ID': 1234, 'JOB_ID': 5678}" + expected = "dbt_metadata_envs_result:{'RUN_ID': '1234', 'JOB_ID': '5678'}" assert expected in str(result) diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index f41c9b49033..711328f32f3 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -1,7 +1,7 @@ # flake8: noqa from dbt.events.test_types import UnitTestInfo from dbt.events import AdapterLogger -from dbt.events.functions import event_to_json, LOG_VERSION, reset_event_history, event_to_dict +from dbt.events.functions import event_to_json, LOG_VERSION, event_to_dict from dbt.events.types import * from dbt.events.test_types import * @@ -102,28 +102,6 @@ def test_event_codes(self): all_codes.add(code) -class TestEventBuffer: - def setUp(self) -> None: - flags.EVENT_BUFFER_SIZE = 10 - reload(event_funcs) - - # ensure events are populated to the buffer exactly once - def test_buffer_populates(self): - self.setUp() - event_funcs.fire_event(UnitTestInfo(msg="Test Event 1")) - event_funcs.fire_event(UnitTestInfo(msg="Test Event 2")) - event1 = event_funcs.EVENT_HISTORY[-2] - assert event_funcs.EVENT_HISTORY.count(event1) == 1 - - # ensure events drop from the front of the buffer when buffer maxsize is reached - def test_buffer_FIFOs(self): - reset_event_history() - event_funcs.EVENT_HISTORY.clear() - for n in range(1, (flags.EVENT_BUFFER_SIZE + 2)): - event_funcs.fire_event(UnitTestInfo(msg=f"Test Event {n}")) - assert event_funcs.EVENT_HISTORY.count(UnitTestInfo(msg="Test Event 1")) == 0 - - def MockNode(): return ParsedModelNode( alias="model_one", @@ -504,7 +482,6 @@ def MockNode(): FlushEvents(), FlushEventsFailure(), TrackingInitializeFailure(), - EventBufferFull(), RunResultWarningMessage(), # T - tests ====================== From b9a35da1182571e667bce5cda4e717c3fc032637 Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Tue, 6 Dec 2022 18:34:39 -0700 Subject: [PATCH 053/156] Fix intermittent database connection failure in Windows CI test (#6395) * Fix intermittent database connection failure in Windows CI test * Changelog entry --- .changes/unreleased/Under the Hood-20221206-113053.yaml | 7 +++++++ core/dbt/tests/fixtures/project.py | 6 +++--- 2 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221206-113053.yaml diff --git a/.changes/unreleased/Under the Hood-20221206-113053.yaml b/.changes/unreleased/Under the Hood-20221206-113053.yaml new file mode 100644 index 00000000000..a1f94f68f43 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221206-113053.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Fix intermittent database connection failure in Windows CI test +time: 2022-12-06T11:30:53.166009-07:00 +custom: + Author: MichelleArk dbeatty10 + Issue: "6394" + PR: "6395" diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index 993c89b3544..2d7ae5ded67 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -6,7 +6,7 @@ import warnings import yaml -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationException, DatabaseException import dbt.flags as flags from dbt.config.runtime import RuntimeConfig from dbt.adapters.factory import get_adapter, register_adapter, reset_adapters, get_adapter_by_type @@ -494,10 +494,10 @@ def project( # a `load_dependencies` method. # Macros gets executed as part of drop_scheme in core/dbt/adapters/sql/impl.py. When # the macros have errors (which is what we're actually testing for...) they end up - # throwing CompilationExceptions + # throwing CompilationExceptions or DatabaseExceptions try: project.drop_test_schema() - except (KeyError, AttributeError, CompilationException): + except (KeyError, AttributeError, CompilationException, DatabaseException): pass os.chdir(orig_cwd) cleanup_event_logger() From 0721f2c1b70eca233326cdd7d3de755924415b20 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Dec 2022 10:16:31 -0500 Subject: [PATCH 054/156] Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core (#6375) * Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core Bumps [mashumaro[msgpack]](https://github.com/Fatal1ty/mashumaro) from 3.1.1 to 3.2. - [Release notes](https://github.com/Fatal1ty/mashumaro/releases) - [Commits](https://github.com/Fatal1ty/mashumaro/compare/v3.1.1...v3.2) --- updated-dependencies: - dependency-name: mashumaro[msgpack] dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Add automated changelog yaml from template for bot PR Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Github Build Bot --- .changes/unreleased/Dependency-20221205-002118.yaml | 7 +++++++ core/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Dependency-20221205-002118.yaml diff --git a/.changes/unreleased/Dependency-20221205-002118.yaml b/.changes/unreleased/Dependency-20221205-002118.yaml new file mode 100644 index 00000000000..b1e1ae1a6cd --- /dev/null +++ b/.changes/unreleased/Dependency-20221205-002118.yaml @@ -0,0 +1,7 @@ +kind: "Dependency" +body: "Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core" +time: 2022-12-05T00:21:18.00000Z +custom: + Author: dependabot[bot] + Issue: 4904 + PR: 6375 diff --git a/core/setup.py b/core/setup.py index 013d0440c26..96bec2e96bf 100644 --- a/core/setup.py +++ b/core/setup.py @@ -54,7 +54,7 @@ "hologram>=0.0.14,<=0.0.15", "isodate>=0.6,<0.7", "logbook>=1.5,<1.6", - "mashumaro[msgpack]==3.1.1", + "mashumaro[msgpack]==3.2", "minimal-snowplow-tracker==0.0.2", "networkx>=2.3,<2.8.1;python_version<'3.8'", "networkx>=2.3,<3;python_version>='3.8'", From 1b6fed2ffd8d02e767e1afefd68e8336e27bc23e Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Wed, 7 Dec 2022 15:21:05 -0500 Subject: [PATCH 055/156] CT 1604 remove compiled classes (#6384) * removed Compiled versions of nodes * Remove compiled fields from dictionary if not compiled * check compiled is False instead of attribute existence in env_var processing * Update artifacts test (CompiledSnapshotNode did not have SnapshotConfig) * Changie * more complicated 'compiling' check in env_var * Update test_exit_codes.py --- .../Under the Hood-20221205-164948.yaml | 7 + core/dbt/adapters/base/impl.py | 15 +- core/dbt/adapters/base/query_headers.py | 4 +- core/dbt/adapters/base/relation.py | 22 +- core/dbt/adapters/protocol.py | 7 +- core/dbt/clients/jinja.py | 5 +- core/dbt/compilation.py | 55 ++-- core/dbt/context/base.py | 6 +- core/dbt/context/docs.py | 7 +- core/dbt/context/macro_resolver.py | 10 +- core/dbt/context/macros.py | 12 +- core/dbt/context/providers.py | 47 ++-- core/dbt/contracts/graph/compiled.py | 236 ------------------ core/dbt/contracts/graph/manifest.py | 129 +++++----- core/dbt/contracts/graph/metrics.py | 2 +- .../contracts/graph/{parsed.py => nodes.py} | 201 +++++++++------ core/dbt/contracts/results.py | 7 +- core/dbt/contracts/sql.py | 4 +- core/dbt/graph/queue.py | 10 +- core/dbt/graph/selector.py | 2 +- core/dbt/graph/selector_methods.py | 44 ++-- core/dbt/parser/README.md | 22 +- core/dbt/parser/analysis.py | 10 +- core/dbt/parser/base.py | 6 +- core/dbt/parser/docs.py | 8 +- core/dbt/parser/generic_test.py | 11 +- core/dbt/parser/generic_test_builders.py | 2 +- core/dbt/parser/hooks.py | 12 +- core/dbt/parser/macros.py | 12 +- core/dbt/parser/manifest.py | 48 ++-- core/dbt/parser/models.py | 40 ++- core/dbt/parser/partial.py | 2 +- core/dbt/parser/schemas.py | 34 +-- core/dbt/parser/seeds.py | 12 +- core/dbt/parser/singular_test.py | 10 +- core/dbt/parser/snapshots.py | 8 +- core/dbt/parser/sources.py | 34 ++- core/dbt/parser/sql.py | 14 +- core/dbt/task/freshness.py | 4 +- core/dbt/task/generate.py | 4 +- core/dbt/task/list.py | 8 +- core/dbt/task/run.py | 17 +- core/dbt/task/runnable.py | 7 +- core/dbt/task/test.py | 11 +- test/unit/test_compiler.py | 33 ++- test/unit/test_context.py | 14 +- test/unit/test_contracts_graph_compiled.py | 34 +-- test/unit/test_contracts_graph_parsed.py | 108 ++++---- test/unit/test_docs_blocks.py | 8 +- test/unit/test_graph_selector_methods.py | 50 ++-- test/unit/test_macro_resolver.py | 6 +- test/unit/test_manifest.py | 47 ++-- test/unit/test_parser.py | 38 +-- test/unit/test_partial_parsing.py | 6 +- test/unit/utils.py | 22 +- .../functional/artifacts/expected_manifest.py | 2 + .../functional/exit_codes/test_exit_codes.py | 1 + tests/unit/test_events.py | 4 +- 58 files changed, 658 insertions(+), 883 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221205-164948.yaml delete mode 100644 core/dbt/contracts/graph/compiled.py rename core/dbt/contracts/graph/{parsed.py => nodes.py} (85%) diff --git a/.changes/unreleased/Under the Hood-20221205-164948.yaml b/.changes/unreleased/Under the Hood-20221205-164948.yaml new file mode 100644 index 00000000000..579f973955b --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221205-164948.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Consolidate ParsedNode and CompiledNode classes +time: 2022-12-05T16:49:48.563583-05:00 +custom: + Author: gshank + Issue: "6383" + PR: "6384" diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 33b7c45a3c4..bbac18cb16b 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -15,7 +15,6 @@ List, Mapping, Iterator, - Union, Set, ) @@ -38,9 +37,8 @@ ) from dbt.clients.agate_helper import empty_table, merge_tables, table_from_rows from dbt.clients.jinja import MacroGenerator -from dbt.contracts.graph.compiled import CompileResultNode, CompiledSeedNode from dbt.contracts.graph.manifest import Manifest, MacroManifest -from dbt.contracts.graph.parsed import ParsedSeedNode +from dbt.contracts.graph.nodes import ResultNode from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( CacheMiss, @@ -64,9 +62,6 @@ from dbt.adapters.cache import RelationsCache, _make_ref_key_msg -SeedModel = Union[ParsedSeedNode, CompiledSeedNode] - - GET_CATALOG_MACRO_NAME = "get_catalog" FRESHNESS_MACRO_NAME = "collect_freshness" @@ -243,9 +238,7 @@ def nice_connection_name(self) -> str: return conn.name @contextmanager - def connection_named( - self, name: str, node: Optional[CompileResultNode] = None - ) -> Iterator[None]: + def connection_named(self, name: str, node: Optional[ResultNode] = None) -> Iterator[None]: try: if self.connections.query_header is not None: self.connections.query_header.set(name, node) @@ -257,7 +250,7 @@ def connection_named( self.connections.query_header.reset() @contextmanager - def connection_for(self, node: CompileResultNode) -> Iterator[None]: + def connection_for(self, node: ResultNode) -> Iterator[None]: with self.connection_named(node.unique_id, node): yield @@ -372,7 +365,7 @@ def _get_catalog_schemas(self, manifest: Manifest) -> SchemaSearchMap: lowercase strings. """ info_schema_name_map = SchemaSearchMap() - nodes: Iterator[CompileResultNode] = chain( + nodes: Iterator[ResultNode] = chain( [ node for node in manifest.nodes.values() diff --git a/core/dbt/adapters/base/query_headers.py b/core/dbt/adapters/base/query_headers.py index 26f34be9c93..dd88fdb2d41 100644 --- a/core/dbt/adapters/base/query_headers.py +++ b/core/dbt/adapters/base/query_headers.py @@ -5,7 +5,7 @@ from dbt.context.manifest import generate_query_header_context from dbt.contracts.connection import AdapterRequiredConfig, QueryComment -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest from dbt.exceptions import RuntimeException @@ -90,7 +90,7 @@ def add(self, sql: str) -> str: def reset(self): self.set("master", None) - def set(self, name: str, node: Optional[CompileResultNode]): + def set(self, name: str, node: Optional[ResultNode]): wrapped: Optional[NodeWrapper] = None if node is not None: wrapped = NodeWrapper(node) diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py index 3124384975a..55182396ef4 100644 --- a/core/dbt/adapters/base/relation.py +++ b/core/dbt/adapters/base/relation.py @@ -2,8 +2,7 @@ from dataclasses import dataclass from typing import Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set -from dbt.contracts.graph.compiled import CompiledNode -from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode +from dbt.contracts.graph.nodes import SourceDefinition, ParsedNode from dbt.contracts.relation import ( RelationType, ComponentName, @@ -184,7 +183,7 @@ def quoted(self, identifier): ) @classmethod - def create_from_source(cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any) -> Self: + def create_from_source(cls: Type[Self], source: SourceDefinition, **kwargs: Any) -> Self: source_quoting = source.quoting.to_dict(omit_none=True) source_quoting.pop("column", None) quote_policy = deep_merge( @@ -209,7 +208,7 @@ def add_ephemeral_prefix(name: str): def create_ephemeral_from_node( cls: Type[Self], config: HasQuoting, - node: Union[ParsedNode, CompiledNode], + node: ParsedNode, ) -> Self: # Note that ephemeral models are based on the name. identifier = cls.add_ephemeral_prefix(node.name) @@ -222,7 +221,7 @@ def create_ephemeral_from_node( def create_from_node( cls: Type[Self], config: HasQuoting, - node: Union[ParsedNode, CompiledNode], + node: ParsedNode, quote_policy: Optional[Dict[str, bool]] = None, **kwargs: Any, ) -> Self: @@ -243,21 +242,18 @@ def create_from_node( def create_from( cls: Type[Self], config: HasQuoting, - node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition], + node: Union[ParsedNode, SourceDefinition], **kwargs: Any, ) -> Self: if node.resource_type == NodeType.Source: - if not isinstance(node, ParsedSourceDefinition): + if not isinstance(node, SourceDefinition): raise InternalException( - "type mismatch, expected ParsedSourceDefinition but got {}".format(type(node)) + "type mismatch, expected SourceDefinition but got {}".format(type(node)) ) return cls.create_from_source(node, **kwargs) else: - if not isinstance(node, (ParsedNode, CompiledNode)): - raise InternalException( - "type mismatch, expected ParsedNode or CompiledNode but " - "got {}".format(type(node)) - ) + if not isinstance(node, (ParsedNode)): + raise InternalException(f"type mismatch, expected ParsedNode but got {type(node)}") return cls.create_from_node(config, node, **kwargs) @classmethod diff --git a/core/dbt/adapters/protocol.py b/core/dbt/adapters/protocol.py index f17c2bd6f45..0cc3b3c96ce 100644 --- a/core/dbt/adapters/protocol.py +++ b/core/dbt/adapters/protocol.py @@ -17,8 +17,7 @@ import agate from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse -from dbt.contracts.graph.compiled import CompiledNode, ManifestNode, NonSourceCompiledNode -from dbt.contracts.graph.parsed import ParsedNode, ParsedSourceDefinition +from dbt.contracts.graph.nodes import ParsedNode, SourceDefinition, ManifestNode from dbt.contracts.graph.model_config import BaseConfig from dbt.contracts.graph.manifest import Manifest from dbt.contracts.relation import Policy, HasQuoting @@ -51,7 +50,7 @@ def get_default_quote_policy(cls) -> Policy: def create_from( cls: Type[Self], config: HasQuoting, - node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition], + node: Union[ParsedNode, SourceDefinition], ) -> Self: ... @@ -65,7 +64,7 @@ def compile_node( node: ManifestNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, - ) -> NonSourceCompiledNode: + ) -> ManifestNode: ... diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index 5e9835952a8..ac04bb86cb4 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -25,8 +25,7 @@ ) from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag -from dbt.contracts.graph.compiled import CompiledGenericTestNode -from dbt.contracts.graph.parsed import ParsedGenericTestNode +from dbt.contracts.graph.nodes import GenericTestNode from dbt.exceptions import ( InternalException, @@ -620,7 +619,7 @@ def extract_toplevel_blocks( def add_rendered_test_kwargs( context: Dict[str, Any], - node: Union[ParsedGenericTestNode, CompiledGenericTestNode], + node: GenericTestNode, capture_macros: bool = False, ) -> None: """Render each of the test kwargs in the given context using the native diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 0afd82c0d42..7cd6f49a5e6 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -1,6 +1,6 @@ import os from collections import defaultdict -from typing import List, Dict, Any, Tuple, cast, Optional +from typing import List, Dict, Any, Tuple, Optional import networkx as nx # type: ignore import pickle @@ -12,15 +12,13 @@ from dbt.clients.system import make_directory from dbt.context.providers import generate_runtime_model_context from dbt.contracts.graph.manifest import Manifest, UniqueID -from dbt.contracts.graph.compiled import ( - COMPILED_TYPES, - CompiledGenericTestNode, +from dbt.contracts.graph.nodes import ( + ParsedNode, + ManifestNode, + GenericTestNode, GraphMemberNode, InjectedCTE, - ManifestNode, - NonSourceCompiledNode, ) -from dbt.contracts.graph.parsed import ParsedNode from dbt.exceptions import ( dependency_not_found, InternalException, @@ -37,14 +35,6 @@ graph_file_name = "graph.gpickle" -def _compiled_type_for(model: ParsedNode): - if type(model) not in COMPILED_TYPES: - raise InternalException( - f"Asked to compile {type(model)} node, but it has no compiled form" - ) - return COMPILED_TYPES[type(model)] - - def print_compile_stats(stats): names = { NodeType.Model: "model", @@ -177,7 +167,7 @@ def initialize(self): # a dict for jinja rendering of SQL def _create_node_context( self, - node: NonSourceCompiledNode, + node: ManifestNode, manifest: Manifest, extra_context: Dict[str, Any], ) -> Dict[str, Any]: @@ -185,7 +175,7 @@ def _create_node_context( context = generate_runtime_model_context(node, self.config, manifest) context.update(extra_context) - if isinstance(node, CompiledGenericTestNode): + if isinstance(node, GenericTestNode): # for test nodes, add a special keyword args value to the context jinja.add_rendered_test_kwargs(context, node) @@ -262,10 +252,10 @@ def _inject_ctes_into_sql(self, sql: str, ctes: List[InjectedCTE]) -> str: def _recursively_prepend_ctes( self, - model: NonSourceCompiledNode, + model: ManifestNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]], - ) -> Tuple[NonSourceCompiledNode, List[InjectedCTE]]: + ) -> Tuple[ManifestNode, List[InjectedCTE]]: """This method is called by the 'compile_node' method. Starting from the node that it is passed in, it will recursively call itself using the 'extra_ctes'. The 'ephemeral' models do @@ -306,8 +296,6 @@ def _recursively_prepend_ctes( # This model has already been compiled, so it's been # through here before if getattr(cte_model, "compiled", False): - assert isinstance(cte_model, tuple(COMPILED_TYPES.values())) - cte_model = cast(NonSourceCompiledNode, cte_model) new_prepended_ctes = cte_model.extra_ctes # if the cte_model isn't compiled, i.e. first time here @@ -344,7 +332,7 @@ def _recursively_prepend_ctes( return model, prepended_ctes - # creates a compiled_node from the ManifestNode passed in, + # Sets compiled fields in the ManifestNode passed in, # creates a "context" dictionary for jinja rendering, # and then renders the "compiled_code" using the node, the # raw_code and the context. @@ -353,7 +341,7 @@ def _compile_node( node: ManifestNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, - ) -> NonSourceCompiledNode: + ) -> ManifestNode: if extra_context is None: extra_context = {} @@ -366,9 +354,8 @@ def _compile_node( "extra_ctes": [], } ) - compiled_node = _compiled_type_for(node).from_dict(data) - if compiled_node.language == ModelLanguage.python: + if node.language == ModelLanguage.python: # TODO could we also 'minify' this code at all? just aesthetic, not functional # quoating seems like something very specific to sql so far @@ -376,7 +363,7 @@ def _compile_node( # TODO try to find better way to do this, given that original_quoting = self.config.quoting self.config.quoting = {key: False for key in original_quoting.keys()} - context = self._create_node_context(compiled_node, manifest, extra_context) + context = self._create_node_context(node, manifest, extra_context) postfix = jinja.get_rendered( "{{ py_script_postfix(model) }}", @@ -384,23 +371,23 @@ def _compile_node( node, ) # we should NOT jinja render the python model's 'raw code' - compiled_node.compiled_code = f"{node.raw_code}\n\n{postfix}" + node.compiled_code = f"{node.raw_code}\n\n{postfix}" # restore quoting settings in the end since context is lazy evaluated self.config.quoting = original_quoting else: - context = self._create_node_context(compiled_node, manifest, extra_context) - compiled_node.compiled_code = jinja.get_rendered( + context = self._create_node_context(node, manifest, extra_context) + node.compiled_code = jinja.get_rendered( node.raw_code, context, node, ) - compiled_node.relation_name = self._get_relation_name(node) + node.relation_name = self._get_relation_name(node) - compiled_node.compiled = True + node.compiled = True - return compiled_node + return node def write_graph_file(self, linker: Linker, manifest: Manifest): filename = graph_file_name @@ -507,7 +494,7 @@ def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph return Graph(linker.graph) # writes the "compiled_code" into the target/compiled directory - def _write_node(self, node: NonSourceCompiledNode) -> ManifestNode: + def _write_node(self, node: ManifestNode) -> ManifestNode: if not node.extra_ctes_injected or node.resource_type == NodeType.Snapshot: return node fire_event(WritingInjectedSQLForNode(node_info=get_node_info())) @@ -524,7 +511,7 @@ def compile_node( manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, write: bool = True, - ) -> NonSourceCompiledNode: + ) -> ManifestNode: """This is the main entry point into this code. It's called by CompileRunner.compile, GenericRPCRunner.compile, and RunTask.get_hook_sql. It calls '_compile_node' to convert diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 813a7a32b5e..e57c3edac56 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -8,7 +8,7 @@ from dbt.clients.jinja import get_rendered from dbt.clients.yaml_helper import yaml, safe_load, SafeLoader, Loader, Dumper # noqa: F401 from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER -from dbt.contracts.graph.compiled import CompiledResource +from dbt.contracts.graph.nodes import Resource from dbt.exceptions import ( CompilationException, MacroReturn, @@ -135,11 +135,11 @@ def __init__( self, context: Mapping[str, Any], cli_vars: Mapping[str, Any], - node: Optional[CompiledResource] = None, + node: Optional[Resource] = None, ) -> None: self._context: Mapping[str, Any] = context self._cli_vars: Mapping[str, Any] = cli_vars - self._node: Optional[CompiledResource] = node + self._node: Optional[Resource] = node self._merged: Mapping[str, Any] = self._generate_merged() def _generate_merged(self) -> Mapping[str, Any]: diff --git a/core/dbt/context/docs.py b/core/dbt/context/docs.py index 26096caa108..4908829d414 100644 --- a/core/dbt/context/docs.py +++ b/core/dbt/context/docs.py @@ -5,9 +5,8 @@ doc_target_not_found, ) from dbt.config.runtime import RuntimeConfig -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro, ResultNode from dbt.context.base import contextmember from dbt.context.configured import SchemaYamlContext @@ -17,7 +16,7 @@ class DocsRuntimeContext(SchemaYamlContext): def __init__( self, config: RuntimeConfig, - node: Union[ParsedMacro, CompileResultNode], + node: Union[Macro, ResultNode], manifest: Manifest, current_project: str, ) -> None: @@ -55,7 +54,7 @@ def doc(self, *args: str) -> str: else: doc_invalid_args(self.node, args) - # ParsedDocumentation + # Documentation target_doc = self.manifest.resolve_doc( doc_name, doc_package_name, diff --git a/core/dbt/context/macro_resolver.py b/core/dbt/context/macro_resolver.py index 2766dc4130c..a108a1889b9 100644 --- a/core/dbt/context/macro_resolver.py +++ b/core/dbt/context/macro_resolver.py @@ -1,10 +1,10 @@ from typing import Dict, MutableMapping, Optional -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME from dbt.clients.jinja import MacroGenerator -MacroNamespace = Dict[str, ParsedMacro] +MacroNamespace = Dict[str, Macro] # This class builds the MacroResolver by adding macros @@ -21,7 +21,7 @@ class MacroResolver: def __init__( self, - macros: MutableMapping[str, ParsedMacro], + macros: MutableMapping[str, Macro], root_project_name: str, internal_package_names, ) -> None: @@ -77,7 +77,7 @@ def _build_macros_by_name(self): def _add_macro_to( self, package_namespaces: Dict[str, MacroNamespace], - macro: ParsedMacro, + macro: Macro, ): if macro.package_name in package_namespaces: namespace = package_namespaces[macro.package_name] @@ -89,7 +89,7 @@ def _add_macro_to( raise_duplicate_macro_name(macro, macro, macro.package_name) package_namespaces[macro.package_name][macro.name] = macro - def add_macro(self, macro: ParsedMacro): + def add_macro(self, macro: Macro): macro_name: str = macro.name # internal macros (from plugins) will be processed separately from diff --git a/core/dbt/context/macros.py b/core/dbt/context/macros.py index dccd376b876..700109b8081 100644 --- a/core/dbt/context/macros.py +++ b/core/dbt/context/macros.py @@ -1,7 +1,7 @@ from typing import Any, Dict, Iterable, Union, Optional, List, Iterator, Mapping, Set from dbt.clients.jinja import MacroGenerator, MacroStack -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error @@ -112,7 +112,7 @@ def __init__( def _add_macro_to( self, hierarchy: Dict[str, FlatNamespace], - macro: ParsedMacro, + macro: Macro, macro_func: MacroGenerator, ): if macro.package_name in hierarchy: @@ -125,7 +125,7 @@ def _add_macro_to( raise_duplicate_macro_name(macro_func.macro, macro, macro.package_name) hierarchy[macro.package_name][macro.name] = macro_func - def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]): + def add_macro(self, macro: Macro, ctx: Dict[str, Any]): macro_name: str = macro.name # MacroGenerator is in clients/jinja.py @@ -147,13 +147,11 @@ def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]): elif macro.package_name == self.root_package: self.globals[macro_name] = macro_func - def add_macros(self, macros: Iterable[ParsedMacro], ctx: Dict[str, Any]): + def add_macros(self, macros: Iterable[Macro], ctx: Dict[str, Any]): for macro in macros: self.add_macro(macro, ctx) - def build_namespace( - self, macros: Iterable[ParsedMacro], ctx: Dict[str, Any] - ) -> MacroNamespace: + def build_namespace(self, macros: Iterable[Macro], ctx: Dict[str, Any]) -> MacroNamespace: self.add_macros(macros, ctx) # Iterate in reverse-order and overwrite: the packages that are first diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 35afeecddf3..06642810730 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -28,18 +28,15 @@ from .manifest import ManifestContext from dbt.contracts.connection import AdapterResponse from dbt.contracts.graph.manifest import Manifest, Disabled -from dbt.contracts.graph.compiled import ( - CompiledResource, - CompiledSeedNode, +from dbt.contracts.graph.nodes import ( + Macro, + Exposure, + Metric, + SeedNode, + SourceDefinition, + Resource, ManifestNode, ) -from dbt.contracts.graph.parsed import ( - ParsedMacro, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, - ParsedSourceDefinition, -) from dbt.contracts.graph.metrics import MetricReference, ResolvedMetricReference from dbt.events.functions import get_metadata_vars from dbt.exceptions import ( @@ -512,7 +509,7 @@ def validate( def create_relation(self, target_model: ManifestNode, name: str) -> RelationProxy: if target_model.is_ephemeral_model: # In operations, we can't ref() ephemeral nodes, because - # ParsedMacros do not support set_cte + # Macros do not support set_cte raise_compiler_error( "Operations can not ref() ephemeral nodes, but {} is ephemeral".format( target_model.name @@ -584,9 +581,9 @@ def __init__( self, context: Dict[str, Any], config: RuntimeConfig, - node: CompiledResource, + node: Resource, ) -> None: - self._node: CompiledResource + self._node: Resource self._config: RuntimeConfig = config super().__init__(context, config.cli_vars, node=node) @@ -690,7 +687,7 @@ def __init__( raise InternalException(f"Invalid provider given to context: {provider}") # mypy appeasement - we know it'll be a RuntimeConfig self.config: RuntimeConfig - self.model: Union[ParsedMacro, ManifestNode] = model + self.model: Union[Macro, ManifestNode] = model super().__init__(config, manifest, model.package_name) self.sql_results: Dict[str, AttrDict] = {} self.context_config: Optional[ContextConfig] = context_config @@ -779,7 +776,7 @@ def inner(value: T) -> None: @contextmember def write(self, payload: str) -> str: # macros/source defs aren't 'writeable'. - if isinstance(self.model, (ParsedMacro, ParsedSourceDefinition)): + if isinstance(self.model, (Macro, SourceDefinition)): raise_compiler_error('cannot "write" macros or sources') self.model.build_path = self.model.write_node(self.config.target_path, "run", payload) return "" @@ -799,7 +796,7 @@ def try_or_compiler_error( @contextmember def load_agate_table(self) -> agate.Table: - if not isinstance(self.model, (ParsedSeedNode, CompiledSeedNode)): + if not isinstance(self.model, SeedNode): raise_compiler_error( "can only load_agate_table for seeds (got a {})".format(self.model.resource_type) ) @@ -1220,7 +1217,13 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: if return_value is not None: # Save the env_var value in the manifest and the var name in the source_file. # If this is compiling, do not save because it's irrelevant to parsing. - if self.model and not hasattr(self.model, "compiled"): + compiling = ( + True + if hasattr(self.model, "compiled") + and getattr(self.model, "compiled", False) is True + else False + ) + if self.model and not compiling: # If the environment variable is set from a default, store a string indicating # that so we can skip partial parsing. Otherwise the file will be scheduled for # reparsing. If the default changes, the file will have been updated and therefore @@ -1275,7 +1278,7 @@ class MacroContext(ProviderContext): def __init__( self, - model: ParsedMacro, + model: Macro, config: RuntimeConfig, manifest: Manifest, provider: Provider, @@ -1390,7 +1393,7 @@ def generate_parser_model_context( def generate_generate_name_macro_context( - macro: ParsedMacro, + macro: Macro, config: RuntimeConfig, manifest: Manifest, ) -> Dict[str, Any]: @@ -1408,7 +1411,7 @@ def generate_runtime_model_context( def generate_runtime_macro_context( - macro: ParsedMacro, + macro: Macro, config: RuntimeConfig, manifest: Manifest, package_name: Optional[str], @@ -1444,7 +1447,7 @@ def __call__(self, *args) -> str: def generate_parse_exposure( - exposure: ParsedExposure, + exposure: Exposure, config: RuntimeConfig, manifest: Manifest, package_name: str, @@ -1494,7 +1497,7 @@ def validate_args(self, name, package): def generate_parse_metrics( - metric: ParsedMetric, + metric: Metric, config: RuntimeConfig, manifest: Manifest, package_name: str, diff --git a/core/dbt/contracts/graph/compiled.py b/core/dbt/contracts/graph/compiled.py deleted file mode 100644 index 28930932299..00000000000 --- a/core/dbt/contracts/graph/compiled.py +++ /dev/null @@ -1,236 +0,0 @@ -from dbt.contracts.graph.parsed import ( - HasTestMetadata, - ParsedNode, - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedExposure, - ParsedMetric, - ParsedResource, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, - ParsedSourceDefinition, - SeedConfig, - TestConfig, - same_seeds, -) -from dbt.node_types import NodeType -from dbt.contracts.util import Replaceable - -from dbt.dataclass_schema import dbtClassMixin -from dataclasses import dataclass, field -from typing import Optional, List, Union, Dict, Type - - -@dataclass -class InjectedCTE(dbtClassMixin, Replaceable): - id: str - sql: str - - -@dataclass -class CompiledNodeMixin(dbtClassMixin): - # this is a special mixin class to provide a required argument. If a node - # is missing a `compiled` flag entirely, it must not be a CompiledNode. - compiled: bool - - -@dataclass -class CompiledNode(ParsedNode, CompiledNodeMixin): - compiled_code: Optional[str] = None - extra_ctes_injected: bool = False - extra_ctes: List[InjectedCTE] = field(default_factory=list) - relation_name: Optional[str] = None - _pre_injected_sql: Optional[str] = None - - def set_cte(self, cte_id: str, sql: str): - """This is the equivalent of what self.extra_ctes[cte_id] = sql would - do if extra_ctes were an OrderedDict - """ - for cte in self.extra_ctes: - if cte.id == cte_id: - cte.sql = sql - break - else: - self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) - - def __post_serialize__(self, dct): - dct = super().__post_serialize__(dct) - if "_pre_injected_sql" in dct: - del dct["_pre_injected_sql"] - return dct - - -@dataclass -class CompiledAnalysisNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]}) - - -@dataclass -class CompiledHookNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]}) - index: Optional[int] = None - - -@dataclass -class CompiledModelNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]}) - - -# TODO: rm? -@dataclass -class CompiledRPCNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]}) - - -@dataclass -class CompiledSqlNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]}) - - -@dataclass -class CompiledSeedNode(CompiledNode): - # keep this in sync with ParsedSeedNode! - resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) - config: SeedConfig = field(default_factory=SeedConfig) - root_path: Optional[str] = None - - @property - def empty(self): - """Seeds are never empty""" - return False - - def same_body(self, other) -> bool: - return same_seeds(self, other) - - -@dataclass -class CompiledSnapshotNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) - - -@dataclass -class CompiledSingularTestNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type:ignore - - -@dataclass -class CompiledGenericTestNode(CompiledNode, HasTestMetadata): - # keep this in sync with ParsedGenericTestNode! - resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) - column_name: Optional[str] = None - file_key_name: Optional[str] = None - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type:ignore - - def same_contents(self, other) -> bool: - if other is None: - return False - - return self.same_config(other) and self.same_fqn(other) and True - - -CompiledTestNode = Union[CompiledSingularTestNode, CompiledGenericTestNode] - - -PARSED_TYPES: Dict[Type[CompiledNode], Type[ParsedResource]] = { - CompiledAnalysisNode: ParsedAnalysisNode, - CompiledModelNode: ParsedModelNode, - CompiledHookNode: ParsedHookNode, - CompiledRPCNode: ParsedRPCNode, - CompiledSqlNode: ParsedSqlNode, - CompiledSeedNode: ParsedSeedNode, - CompiledSnapshotNode: ParsedSnapshotNode, - CompiledSingularTestNode: ParsedSingularTestNode, - CompiledGenericTestNode: ParsedGenericTestNode, -} - - -COMPILED_TYPES: Dict[Type[ParsedResource], Type[CompiledNode]] = { - ParsedAnalysisNode: CompiledAnalysisNode, - ParsedModelNode: CompiledModelNode, - ParsedHookNode: CompiledHookNode, - ParsedRPCNode: CompiledRPCNode, - ParsedSqlNode: CompiledSqlNode, - ParsedSeedNode: CompiledSeedNode, - ParsedSnapshotNode: CompiledSnapshotNode, - ParsedSingularTestNode: CompiledSingularTestNode, - ParsedGenericTestNode: CompiledGenericTestNode, -} - - -# for some types, the compiled type is the parsed type, so make this easy -CompiledType = Union[Type[CompiledNode], Type[ParsedResource]] -CompiledResource = Union[ParsedResource, CompiledNode] - - -def compiled_type_for(parsed: ParsedNode) -> CompiledType: - if type(parsed) in COMPILED_TYPES: - return COMPILED_TYPES[type(parsed)] - else: - return type(parsed) - - -def parsed_instance_for(compiled: CompiledNode) -> ParsedResource: - cls = PARSED_TYPES.get(type(compiled)) - if cls is None: - # how??? - raise ValueError("invalid resource_type: {}".format(compiled.resource_type)) - - return cls.from_dict(compiled.to_dict(omit_none=True)) - - -NonSourceCompiledNode = Union[ - CompiledAnalysisNode, - CompiledSingularTestNode, - CompiledModelNode, - CompiledHookNode, - CompiledRPCNode, - CompiledSqlNode, - CompiledGenericTestNode, - CompiledSeedNode, - CompiledSnapshotNode, -] - -NonSourceParsedNode = Union[ - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, -] - - -# This is anything that can be in manifest.nodes. -ManifestNode = Union[ - NonSourceCompiledNode, - NonSourceParsedNode, -] - -# We allow either parsed or compiled nodes, or parsed sources, as some -# 'compile()' calls in the runner actually just return the original parsed -# node they were given. -CompileResultNode = Union[ - ManifestNode, - ParsedSourceDefinition, -] - -# anything that participates in the graph: sources, exposures, metrics, -# or manifest nodes -GraphMemberNode = Union[ - CompileResultNode, - ParsedExposure, - ParsedMetric, -] diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 73034ec80f2..05c856f461f 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -16,29 +16,24 @@ TypeVar, Callable, Generic, - cast, AbstractSet, ClassVar, ) from typing_extensions import Protocol from uuid import UUID -from dbt.contracts.graph.compiled import ( - CompileResultNode, - ManifestNode, - NonSourceCompiledNode, - GraphMemberNode, -) -from dbt.contracts.graph.parsed import ( - ParsedMacro, - ParsedDocumentation, - ParsedSourceDefinition, - ParsedGenericTestNode, - ParsedExposure, - ParsedMetric, +from dbt.contracts.graph.nodes import ( + Macro, + Documentation, + SourceDefinition, + GenericTestNode, + Exposure, + Metric, HasUniqueID, UnpatchedSourceDefinition, - ManifestNodes, + ManifestNode, + GraphMemberNode, + ResultNode, ) from dbt.contracts.graph.unparsed import SourcePatch from dbt.contracts.files import SourceFile, SchemaSourceFile, FileHash, AnySourceFile @@ -96,7 +91,7 @@ def find(self, key, package: Optional[PackageName], manifest: "Manifest"): return self.perform_lookup(unique_id, manifest) return None - def add_doc(self, doc: ParsedDocumentation): + def add_doc(self, doc: Documentation): if doc.name not in self.storage: self.storage[doc.name] = {} self.storage[doc.name][doc.package_name] = doc.unique_id @@ -105,7 +100,7 @@ def populate(self, manifest): for doc in manifest.docs.values(): self.add_doc(doc) - def perform_lookup(self, unique_id: UniqueID, manifest) -> ParsedDocumentation: + def perform_lookup(self, unique_id: UniqueID, manifest) -> Documentation: if unique_id not in manifest.docs: raise dbt.exceptions.InternalException( f"Doc {unique_id} found in cache but not found in manifest" @@ -127,7 +122,7 @@ def find(self, search_name, package: Optional[PackageName], manifest: "Manifest" return self.perform_lookup(unique_id, manifest) return None - def add_source(self, source: ParsedSourceDefinition): + def add_source(self, source: SourceDefinition): if source.search_name not in self.storage: self.storage[source.search_name] = {} @@ -138,7 +133,7 @@ def populate(self, manifest): if hasattr(source, "source_name"): self.add_source(source) - def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedSourceDefinition: + def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> SourceDefinition: if unique_id not in manifest.sources: raise dbt.exceptions.InternalException( f"Source {unique_id} found in cache but not found in manifest" @@ -198,7 +193,7 @@ def find(self, search_name, package: Optional[PackageName], manifest: "Manifest" return self.perform_lookup(unique_id, manifest) return None - def add_metric(self, metric: ParsedMetric): + def add_metric(self, metric: Metric): if metric.search_name not in self.storage: self.storage[metric.search_name] = {} @@ -209,7 +204,7 @@ def populate(self, manifest): if hasattr(metric, "name"): self.add_metric(metric) - def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedMetric: + def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> Metric: if unique_id not in manifest.metrics: raise dbt.exceptions.InternalException( f"Metric {unique_id} found in cache but not found in manifest" @@ -365,7 +360,7 @@ class Locality(enum.IntEnum): @dataclass class MacroCandidate: locality: Locality - macro: ParsedMacro + macro: Macro def __eq__(self, other: object) -> bool: if not isinstance(other, MacroCandidate): @@ -430,16 +425,14 @@ def __lt__(self, other: object) -> bool: class CandidateList(List[M]): - def last(self) -> Optional[ParsedMacro]: + def last(self) -> Optional[Macro]: if not self: return None self.sort() return self[-1].macro -def _get_locality( - macro: ParsedMacro, root_project_name: str, internal_packages: Set[str] -) -> Locality: +def _get_locality(macro: Macro, root_project_name: str, internal_packages: Set[str]) -> Locality: if macro.package_name == root_project_name: return Locality.Root elif macro.package_name in internal_packages: @@ -465,16 +458,16 @@ class Disabled(Generic[D]): target: D -MaybeMetricNode = Optional[Union[ParsedMetric, Disabled[ParsedMetric]]] +MaybeMetricNode = Optional[Union[Metric, Disabled[Metric]]] -MaybeDocumentation = Optional[ParsedDocumentation] +MaybeDocumentation = Optional[Documentation] MaybeParsedSource = Optional[ Union[ - ParsedSourceDefinition, - Disabled[ParsedSourceDefinition], + SourceDefinition, + Disabled[SourceDefinition], ] ] @@ -514,7 +507,7 @@ def __init__(self): def find_macro_by_name( self, name: str, root_project_name: str, package: Optional[str] - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: """Find a macro in the graph by its name and package name, or None for any package. The root project name is used to determine priority: - locally defined macros come first @@ -537,7 +530,7 @@ def filter(candidate: MacroCandidate) -> bool: def find_generate_macro_by_name( self, component: str, root_project_name: str - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: """ The `generate_X_name` macros are similar to regular ones, but ignore imported packages. @@ -606,11 +599,11 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin): # is added it must all be added in the __reduce_ex__ method in the # args tuple in the right position. nodes: MutableMapping[str, ManifestNode] = field(default_factory=dict) - sources: MutableMapping[str, ParsedSourceDefinition] = field(default_factory=dict) - macros: MutableMapping[str, ParsedMacro] = field(default_factory=dict) - docs: MutableMapping[str, ParsedDocumentation] = field(default_factory=dict) - exposures: MutableMapping[str, ParsedExposure] = field(default_factory=dict) - metrics: MutableMapping[str, ParsedMetric] = field(default_factory=dict) + sources: MutableMapping[str, SourceDefinition] = field(default_factory=dict) + macros: MutableMapping[str, Macro] = field(default_factory=dict) + docs: MutableMapping[str, Documentation] = field(default_factory=dict) + exposures: MutableMapping[str, Exposure] = field(default_factory=dict) + metrics: MutableMapping[str, Metric] = field(default_factory=dict) selectors: MutableMapping[str, Any] = field(default_factory=dict) files: MutableMapping[str, AnySourceFile] = field(default_factory=dict) metadata: ManifestMetadata = field(default_factory=ManifestMetadata) @@ -658,7 +651,7 @@ def __post_deserialize__(cls, obj): obj._lock = flags.MP_CONTEXT.Lock() return obj - def sync_update_node(self, new_node: NonSourceCompiledNode) -> NonSourceCompiledNode: + def sync_update_node(self, new_node: ManifestNode) -> ManifestNode: """update the node with a lock. The only time we should want to lock is when compiling an ephemeral ancestor of a node at runtime, because multiple threads could be just-in-time compiling the same ephemeral @@ -671,21 +664,21 @@ def sync_update_node(self, new_node: NonSourceCompiledNode) -> NonSourceCompiled with self._lock: existing = self.nodes[new_node.unique_id] if getattr(existing, "compiled", False): - # already compiled -> must be a NonSourceCompiledNode - return cast(NonSourceCompiledNode, existing) + # already compiled + return existing _update_into(self.nodes, new_node) return new_node - def update_exposure(self, new_exposure: ParsedExposure): + def update_exposure(self, new_exposure: Exposure): _update_into(self.exposures, new_exposure) - def update_metric(self, new_metric: ParsedMetric): + def update_metric(self, new_metric: Metric): _update_into(self.metrics, new_metric) def update_node(self, new_node: ManifestNode): _update_into(self.nodes, new_node) - def update_source(self, new_source: ParsedSourceDefinition): + def update_source(self, new_source: SourceDefinition): _update_into(self.sources, new_source) def build_flat_graph(self): @@ -738,7 +731,7 @@ def _materialization_candidates_for( def find_materialization_macro_by_name( self, project_name: str, materialization_name: str, adapter_type: str - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: candidates: CandidateList = CandidateList( chain.from_iterable( self._materialization_candidates_for( @@ -943,8 +936,8 @@ def resolve_source( search_name = f"{target_source_name}.{target_table_name}" candidates = _search_packages(current_project, node_package) - source: Optional[ParsedSourceDefinition] = None - disabled: Optional[List[ParsedSourceDefinition]] = None + source: Optional[SourceDefinition] = None + disabled: Optional[List[SourceDefinition]] = None for pkg in candidates: source = self.source_lookup.find(search_name, pkg, self) @@ -968,8 +961,8 @@ def resolve_metric( node_package: str, ) -> MaybeMetricNode: - metric: Optional[ParsedMetric] = None - disabled: Optional[List[ParsedMetric]] = None + metric: Optional[Metric] = None + disabled: Optional[List[Metric]] = None candidates = _search_packages(current_project, node_package, target_metric_package) for pkg in candidates: @@ -992,7 +985,7 @@ def resolve_doc( package: Optional[str], current_project: str, node_package: str, - ) -> Optional[ParsedDocumentation]: + ) -> Optional[Documentation]: """Resolve the given documentation. This follows the same algorithm as resolve_ref except the is_enabled checks are unnecessary as docs are always enabled. @@ -1044,7 +1037,7 @@ def merge_from_artifact( # Methods that were formerly in ParseResult - def add_macro(self, source_file: SourceFile, macro: ParsedMacro): + def add_macro(self, source_file: SourceFile, macro: Macro): if macro.unique_id in self.macros: # detect that the macro exists and emit an error other_path = self.macros[macro.unique_id].original_file_path @@ -1086,30 +1079,30 @@ def add_source(self, source_file: SchemaSourceFile, source: UnpatchedSourceDefin self.sources[source.unique_id] = source # type: ignore source_file.sources.append(source.unique_id) - def add_node_nofile(self, node: ManifestNodes): + def add_node_nofile(self, node: ManifestNode): # nodes can't be overwritten! _check_duplicates(node, self.nodes) self.nodes[node.unique_id] = node - def add_node(self, source_file: AnySourceFile, node: ManifestNodes, test_from=None): + def add_node(self, source_file: AnySourceFile, node: ManifestNode, test_from=None): self.add_node_nofile(node) if isinstance(source_file, SchemaSourceFile): - if isinstance(node, ParsedGenericTestNode): + if isinstance(node, GenericTestNode): assert test_from source_file.add_test(node.unique_id, test_from) - if isinstance(node, ParsedMetric): + if isinstance(node, Metric): source_file.metrics.append(node.unique_id) - if isinstance(node, ParsedExposure): + if isinstance(node, Exposure): source_file.exposures.append(node.unique_id) else: source_file.nodes.append(node.unique_id) - def add_exposure(self, source_file: SchemaSourceFile, exposure: ParsedExposure): + def add_exposure(self, source_file: SchemaSourceFile, exposure: Exposure): _check_duplicates(exposure, self.exposures) self.exposures[exposure.unique_id] = exposure source_file.exposures.append(exposure.unique_id) - def add_metric(self, source_file: SchemaSourceFile, metric: ParsedMetric): + def add_metric(self, source_file: SchemaSourceFile, metric: Metric): _check_duplicates(metric, self.metrics) self.metrics[metric.unique_id] = metric source_file.metrics.append(metric.unique_id) @@ -1121,20 +1114,20 @@ def add_disabled_nofile(self, node: GraphMemberNode): else: self.disabled[node.unique_id] = [node] - def add_disabled(self, source_file: AnySourceFile, node: CompileResultNode, test_from=None): + def add_disabled(self, source_file: AnySourceFile, node: ResultNode, test_from=None): self.add_disabled_nofile(node) if isinstance(source_file, SchemaSourceFile): - if isinstance(node, ParsedGenericTestNode): + if isinstance(node, GenericTestNode): assert test_from source_file.add_test(node.unique_id, test_from) - if isinstance(node, ParsedMetric): + if isinstance(node, Metric): source_file.metrics.append(node.unique_id) - if isinstance(node, ParsedExposure): + if isinstance(node, Exposure): source_file.exposures.append(node.unique_id) else: source_file.nodes.append(node.unique_id) - def add_doc(self, source_file: SourceFile, doc: ParsedDocumentation): + def add_doc(self, source_file: SourceFile, doc: Documentation): _check_duplicates(doc, self.docs) self.docs[doc.unique_id] = doc source_file.docs.append(doc.unique_id) @@ -1192,27 +1185,27 @@ class WritableManifest(ArtifactMixin): nodes: Mapping[UniqueID, ManifestNode] = field( metadata=dict(description=("The nodes defined in the dbt project and its dependencies")) ) - sources: Mapping[UniqueID, ParsedSourceDefinition] = field( + sources: Mapping[UniqueID, SourceDefinition] = field( metadata=dict(description=("The sources defined in the dbt project and its dependencies")) ) - macros: Mapping[UniqueID, ParsedMacro] = field( + macros: Mapping[UniqueID, Macro] = field( metadata=dict(description=("The macros defined in the dbt project and its dependencies")) ) - docs: Mapping[UniqueID, ParsedDocumentation] = field( + docs: Mapping[UniqueID, Documentation] = field( metadata=dict(description=("The docs defined in the dbt project and its dependencies")) ) - exposures: Mapping[UniqueID, ParsedExposure] = field( + exposures: Mapping[UniqueID, Exposure] = field( metadata=dict( description=("The exposures defined in the dbt project and its dependencies") ) ) - metrics: Mapping[UniqueID, ParsedMetric] = field( + metrics: Mapping[UniqueID, Metric] = field( metadata=dict(description=("The metrics defined in the dbt project and its dependencies")) ) selectors: Mapping[UniqueID, Any] = field( metadata=dict(description=("The selectors defined in selectors.yml")) ) - disabled: Optional[Mapping[UniqueID, List[CompileResultNode]]] = field( + disabled: Optional[Mapping[UniqueID, List[ResultNode]]] = field( metadata=dict(description="A mapping of the disabled nodes in the target") ) parent_map: Optional[NodeEdgeMap] = field( diff --git a/core/dbt/contracts/graph/metrics.py b/core/dbt/contracts/graph/metrics.py index 20222b4a32b..b895aa5e2f5 100644 --- a/core/dbt/contracts/graph/metrics.py +++ b/core/dbt/contracts/graph/metrics.py @@ -12,7 +12,7 @@ def __str__(self): class ResolvedMetricReference(MetricReference): """ - Simple proxy over a ParsedMetric which delegates property + Simple proxy over a Metric which delegates property lookups to the underlying node. Also adds helper functions for working with metrics (ie. __str__ and templating functions) """ diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/nodes.py similarity index 85% rename from core/dbt/contracts/graph/parsed.py rename to core/dbt/contracts/graph/nodes.py index 8fc4ca0c3ed..a908167f49e 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/nodes.py @@ -99,6 +99,49 @@ def add_macro(self, value: str): self.macros.append(value) +@dataclass +class InjectedCTE(dbtClassMixin, Replaceable): + id: str + sql: str + + +@dataclass +class CompiledNode: + compiled: bool = False + compiled_code: Optional[str] = None + extra_ctes_injected: bool = False + extra_ctes: List[InjectedCTE] = field(default_factory=list) + relation_name: Optional[str] = None + _pre_injected_sql: Optional[str] = None + + def set_cte(self, cte_id: str, sql: str): + """This is the equivalent of what self.extra_ctes[cte_id] = sql would + do if extra_ctes were an OrderedDict + """ + for cte in self.extra_ctes: + if cte.id == cte_id: + cte.sql = sql + break + else: + self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) + + def __post_serialize__(self, dct): + dct = super().__post_serialize__(dct) + if "_pre_injected_sql" in dct: + del dct["_pre_injected_sql"] + # Remove compiled attributes + if "compiled" in dct and dct["compiled"] is False: + del dct["compiled"] + del dct["extra_ctes_injected"] + del dct["extra_ctes"] + # "omit_none" means these might not be in the dictionary + if "compiled_code" in dct: + del dct["compiled_code"] + if "relation_name" in dct: + del dct["relation_name"] + return dct + + @dataclass class DependsOn(MacroDependsOn): nodes: List[str] = field(default_factory=list) @@ -213,7 +256,7 @@ def clear_event_status(self): @dataclass -class ParsedNodeDefaults(NodeInfoMixin, ParsedNodeMandatory): +class ParsedNodeDefaults(NodeInfoMixin, CompiledNode, ParsedNodeMandatory): tags: List[str] = field(default_factory=list) refs: List[List[str]] = field(default_factory=list) sources: List[List[str]] = field(default_factory=list) @@ -265,26 +308,26 @@ def _deserialize(cls, dct: Dict[str, int]): # between them. resource_type = dct["resource_type"] if resource_type == "model": - return ParsedModelNode.from_dict(dct) + return ModelNode.from_dict(dct) elif resource_type == "analysis": - return ParsedAnalysisNode.from_dict(dct) + return AnalysisNode.from_dict(dct) elif resource_type == "seed": - return ParsedSeedNode.from_dict(dct) + return SeedNode.from_dict(dct) elif resource_type == "rpc": - return ParsedRPCNode.from_dict(dct) + return RPCNode.from_dict(dct) elif resource_type == "sql": - return ParsedSqlNode.from_dict(dct) + return SqlNode.from_dict(dct) elif resource_type == "test": if "test_metadata" in dct: - return ParsedGenericTestNode.from_dict(dct) + return GenericTestNode.from_dict(dct) else: - return ParsedSingularTestNode.from_dict(dct) + return SingularTestNode.from_dict(dct) elif resource_type == "operation": - return ParsedHookNode.from_dict(dct) + return HookNode.from_dict(dct) elif resource_type == "seed": - return ParsedSeedNode.from_dict(dct) + return SeedNode.from_dict(dct) elif resource_type == "snapshot": - return ParsedSnapshotNode.from_dict(dct) + return SnapshotNode.from_dict(dct) else: return cls.from_dict(dct) @@ -354,29 +397,29 @@ def same_contents(self: T, old: Optional[T]) -> bool: @dataclass -class ParsedAnalysisNode(ParsedNode): +class AnalysisNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]}) @dataclass -class ParsedHookNode(ParsedNode): +class HookNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]}) index: Optional[int] = None @dataclass -class ParsedModelNode(ParsedNode): +class ModelNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]}) # TODO: rm? @dataclass -class ParsedRPCNode(ParsedNode): +class RPCNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]}) @dataclass -class ParsedSqlNode(ParsedNode): +class SqlNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]}) @@ -417,8 +460,7 @@ def same_seeds(first: ParsedNode, second: ParsedNode) -> bool: @dataclass -class ParsedSeedNode(ParsedNode): - # keep this in sync with CompiledSeedNode! +class SeedNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) config: SeedConfig = field(default_factory=SeedConfig) # seeds need the root_path because the contents are not loaded initially @@ -450,7 +492,7 @@ class HasTestMetadata(dbtClassMixin): @dataclass -class ParsedSingularTestNode(ParsedNode): +class SingularTestNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) # Was not able to make mypy happy and keep the code working. We need to # refactor the various configs. @@ -462,8 +504,7 @@ def test_node_type(self): @dataclass -class ParsedGenericTestNode(ParsedNode, HasTestMetadata): - # keep this in sync with CompiledGenericTestNode! +class GenericTestNode(ParsedNode, HasTestMetadata): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) column_name: Optional[str] = None file_key_name: Optional[str] = None @@ -495,7 +536,7 @@ class IntermediateSnapshotNode(ParsedNode): @dataclass -class ParsedSnapshotNode(ParsedNode): +class SnapshotNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) config: SnapshotConfig @@ -523,7 +564,7 @@ class ParsedMacroPatch(ParsedPatch): @dataclass -class ParsedMacro(UnparsedBaseNode, HasUniqueID): +class Macro(UnparsedBaseNode, HasUniqueID): name: str macro_sql: str resource_type: NodeType = field(metadata={"restrict": [NodeType.Macro]}) @@ -547,7 +588,7 @@ def patch(self, patch: ParsedMacroPatch): self.docs = patch.docs self.arguments = patch.arguments - def same_contents(self, other: Optional["ParsedMacro"]) -> bool: + def same_contents(self, other: Optional["Macro"]) -> bool: if other is None: return False # the only thing that makes one macro different from another with the @@ -556,7 +597,7 @@ def same_contents(self, other: Optional["ParsedMacro"]) -> bool: @dataclass -class ParsedDocumentation(UnparsedDocumentation, HasUniqueID): +class Documentation(UnparsedDocumentation, HasUniqueID): name: str block_contents: str @@ -564,7 +605,7 @@ class ParsedDocumentation(UnparsedDocumentation, HasUniqueID): def search_name(self): return self.name - def same_contents(self, other: Optional["ParsedDocumentation"]) -> bool: + def same_contents(self, other: Optional["Documentation"]) -> bool: if other is None: return False # the only thing that makes one doc different from another with the @@ -642,7 +683,7 @@ class ParsedSourceMandatory( @dataclass -class ParsedSourceDefinition(NodeInfoMixin, ParsedSourceMandatory): +class SourceDefinition(NodeInfoMixin, ParsedSourceMandatory): quoting: Quoting = field(default_factory=Quoting) loaded_at_field: Optional[str] = None freshness: Optional[FreshnessThreshold] = None @@ -663,7 +704,7 @@ def __post_serialize__(self, dct): del dct["_event_status"] return dct - def same_database_representation(self, other: "ParsedSourceDefinition") -> bool: + def same_database_representation(self, other: "SourceDefinition") -> bool: return ( self.database == other.database and self.schema == other.schema @@ -671,26 +712,26 @@ def same_database_representation(self, other: "ParsedSourceDefinition") -> bool: and True ) - def same_quoting(self, other: "ParsedSourceDefinition") -> bool: + def same_quoting(self, other: "SourceDefinition") -> bool: return self.quoting == other.quoting - def same_freshness(self, other: "ParsedSourceDefinition") -> bool: + def same_freshness(self, other: "SourceDefinition") -> bool: return ( self.freshness == other.freshness and self.loaded_at_field == other.loaded_at_field and True ) - def same_external(self, other: "ParsedSourceDefinition") -> bool: + def same_external(self, other: "SourceDefinition") -> bool: return self.external == other.external - def same_config(self, old: "ParsedSourceDefinition") -> bool: + def same_config(self, old: "SourceDefinition") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedSourceDefinition"]) -> bool: + def same_contents(self, old: Optional["SourceDefinition"]) -> bool: # existing when it didn't before is a change! if old is None: return True @@ -757,7 +798,7 @@ def search_name(self): @dataclass -class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn): +class Exposure(UnparsedBaseNode, HasUniqueID, HasFqn): name: str type: ExposureType owner: ExposureOwner @@ -784,34 +825,34 @@ def depends_on_nodes(self): def search_name(self): return self.name - def same_depends_on(self, old: "ParsedExposure") -> bool: + def same_depends_on(self, old: "Exposure") -> bool: return set(self.depends_on.nodes) == set(old.depends_on.nodes) - def same_description(self, old: "ParsedExposure") -> bool: + def same_description(self, old: "Exposure") -> bool: return self.description == old.description - def same_label(self, old: "ParsedExposure") -> bool: + def same_label(self, old: "Exposure") -> bool: return self.label == old.label - def same_maturity(self, old: "ParsedExposure") -> bool: + def same_maturity(self, old: "Exposure") -> bool: return self.maturity == old.maturity - def same_owner(self, old: "ParsedExposure") -> bool: + def same_owner(self, old: "Exposure") -> bool: return self.owner == old.owner - def same_exposure_type(self, old: "ParsedExposure") -> bool: + def same_exposure_type(self, old: "Exposure") -> bool: return self.type == old.type - def same_url(self, old: "ParsedExposure") -> bool: + def same_url(self, old: "Exposure") -> bool: return self.url == old.url - def same_config(self, old: "ParsedExposure") -> bool: + def same_config(self, old: "Exposure") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedExposure"]) -> bool: + def same_contents(self, old: Optional["Exposure"]) -> bool: # existing when it didn't before is a change! # metadata/tags changes are not "changes" if old is None: @@ -838,7 +879,7 @@ class MetricReference(dbtClassMixin, Replaceable): @dataclass -class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn): +class Metric(UnparsedBaseNode, HasUniqueID, HasFqn): name: str description: str label: str @@ -870,43 +911,43 @@ def depends_on_nodes(self): def search_name(self): return self.name - def same_model(self, old: "ParsedMetric") -> bool: + def same_model(self, old: "Metric") -> bool: return self.model == old.model - def same_window(self, old: "ParsedMetric") -> bool: + def same_window(self, old: "Metric") -> bool: return self.window == old.window - def same_dimensions(self, old: "ParsedMetric") -> bool: + def same_dimensions(self, old: "Metric") -> bool: return self.dimensions == old.dimensions - def same_filters(self, old: "ParsedMetric") -> bool: + def same_filters(self, old: "Metric") -> bool: return self.filters == old.filters - def same_description(self, old: "ParsedMetric") -> bool: + def same_description(self, old: "Metric") -> bool: return self.description == old.description - def same_label(self, old: "ParsedMetric") -> bool: + def same_label(self, old: "Metric") -> bool: return self.label == old.label - def same_calculation_method(self, old: "ParsedMetric") -> bool: + def same_calculation_method(self, old: "Metric") -> bool: return self.calculation_method == old.calculation_method - def same_expression(self, old: "ParsedMetric") -> bool: + def same_expression(self, old: "Metric") -> bool: return self.expression == old.expression - def same_timestamp(self, old: "ParsedMetric") -> bool: + def same_timestamp(self, old: "Metric") -> bool: return self.timestamp == old.timestamp - def same_time_grains(self, old: "ParsedMetric") -> bool: + def same_time_grains(self, old: "Metric") -> bool: return self.time_grains == old.time_grains - def same_config(self, old: "ParsedMetric") -> bool: + def same_config(self, old: "Metric") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedMetric"]) -> bool: + def same_contents(self, old: Optional["Metric"]) -> bool: # existing when it didn't before is a change! # metadata/tags changes are not "changes" if old is None: @@ -928,24 +969,40 @@ def same_contents(self, old: Optional["ParsedMetric"]) -> bool: ) -ManifestNodes = Union[ - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, +ManifestNode = Union[ + AnalysisNode, + SingularTestNode, + HookNode, + ModelNode, + RPCNode, + SqlNode, + GenericTestNode, + SeedNode, + SnapshotNode, ] +ResultNode = Union[ + ManifestNode, + SourceDefinition, +] -ParsedResource = Union[ - ParsedDocumentation, - ParsedMacro, +GraphMemberNode = Union[ + ResultNode, + Exposure, + Metric, +] + + +Resource = Union[ + Documentation, + Macro, ParsedNode, - ParsedExposure, - ParsedMetric, - ParsedSourceDefinition, + Exposure, + Metric, + SourceDefinition, +] + +TestNode = Union[ + SingularTestNode, + GenericTestNode, ] diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index 4adba9860b0..97c43396e33 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -1,6 +1,5 @@ -from dbt.contracts.graph.manifest import CompileResultNode from dbt.contracts.graph.unparsed import FreshnessThreshold -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition, ResultNode from dbt.contracts.util import ( BaseArtifactMetadata, ArtifactMixin, @@ -145,7 +144,7 @@ def to_msg(self): @dataclass class NodeResult(BaseResult): - node: CompileResultNode + node: ResultNode @dataclass @@ -284,7 +283,7 @@ def from_success( @dataclass class SourceFreshnessResult(NodeResult): - node: ParsedSourceDefinition + node: SourceDefinition status: FreshnessStatus max_loaded_at: datetime snapshotted_at: datetime diff --git a/core/dbt/contracts/sql.py b/core/dbt/contracts/sql.py index a3e5b3d58db..b80304d2565 100644 --- a/core/dbt/contracts/sql.py +++ b/core/dbt/contracts/sql.py @@ -5,7 +5,7 @@ from dbt.dataclass_schema import dbtClassMixin -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.results import ( RunResult, RunResultsArtifact, @@ -32,7 +32,7 @@ class RemoteResult(VersionedSchema): class RemoteCompileResultMixin(RemoteResult): raw_code: str compiled_code: str - node: CompileResultNode + node: ResultNode timing: List[TimingInfo] diff --git a/core/dbt/graph/queue.py b/core/dbt/graph/queue.py index 56248409754..3c3b9625d27 100644 --- a/core/dbt/graph/queue.py +++ b/core/dbt/graph/queue.py @@ -5,8 +5,12 @@ from typing import Dict, Set, List, Generator, Optional from .graph import UniqueId -from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedExposure, ParsedMetric -from dbt.contracts.graph.compiled import GraphMemberNode +from dbt.contracts.graph.nodes import ( + SourceDefinition, + Exposure, + Metric, + GraphMemberNode, +) from dbt.contracts.graph.manifest import Manifest from dbt.node_types import NodeType @@ -48,7 +52,7 @@ def _include_in_cost(self, node_id: UniqueId) -> bool: if node.resource_type != NodeType.Model: return False # must be a Model - tell mypy this won't be a Source or Exposure or Metric - assert not isinstance(node, (ParsedSourceDefinition, ParsedExposure, ParsedMetric)) + assert not isinstance(node, (SourceDefinition, Exposure, Metric)) if node.is_ephemeral: return False return True diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 13a3ae0a952..0ed8ac50b0a 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -12,7 +12,7 @@ InternalException, InvalidSelectorException, ) -from dbt.contracts.graph.compiled import GraphMemberNode +from dbt.contracts.graph.nodes import GraphMemberNode from dbt.contracts.graph.manifest import Manifest from dbt.contracts.state import PreviousState diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py index 0e59da38a16..f7044ecaf32 100644 --- a/core/dbt/graph/selector_methods.py +++ b/core/dbt/graph/selector_methods.py @@ -7,20 +7,16 @@ from .graph import UniqueId -from dbt.contracts.graph.compiled import ( - CompiledSingularTestNode, - CompiledGenericTestNode, - CompileResultNode, - ManifestNode, -) from dbt.contracts.graph.manifest import Manifest, WritableManifest -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( HasTestMetadata, - ParsedSingularTestNode, - ParsedExposure, - ParsedMetric, - ParsedGenericTestNode, - ParsedSourceDefinition, + SingularTestNode, + Exposure, + Metric, + GenericTestNode, + SourceDefinition, + ResultNode, + ManifestNode, ) from dbt.contracts.state import PreviousState from dbt.exceptions import ( @@ -76,7 +72,7 @@ def is_selected_node(fqn: List[str], node_selector: str): return True -SelectorTarget = Union[ParsedSourceDefinition, ManifestNode, ParsedExposure, ParsedMetric] +SelectorTarget = Union[SourceDefinition, ManifestNode, Exposure, Metric] class SelectorMethod(metaclass=abc.ABCMeta): @@ -99,7 +95,7 @@ def parsed_nodes( def source_nodes( self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedSourceDefinition]]: + ) -> Iterator[Tuple[UniqueId, SourceDefinition]]: for key, source in self.manifest.sources.items(): unique_id = UniqueId(key) @@ -107,9 +103,7 @@ def source_nodes( continue yield unique_id, source - def exposure_nodes( - self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedExposure]]: + def exposure_nodes(self, included_nodes: Set[UniqueId]) -> Iterator[Tuple[UniqueId, Exposure]]: for key, exposure in self.manifest.exposures.items(): unique_id = UniqueId(key) @@ -117,9 +111,7 @@ def exposure_nodes( continue yield unique_id, exposure - def metric_nodes( - self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedMetric]]: + def metric_nodes(self, included_nodes: Set[UniqueId]) -> Iterator[Tuple[UniqueId, Metric]]: for key, metric in self.manifest.metrics.items(): unique_id = UniqueId(key) @@ -139,13 +131,13 @@ def all_nodes( def configurable_nodes( self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, CompileResultNode]]: + ) -> Iterator[Tuple[UniqueId, ResultNode]]: yield from chain(self.parsed_nodes(included_nodes), self.source_nodes(included_nodes)) def non_source_nodes( self, included_nodes: Set[UniqueId], - ) -> Iterator[Tuple[UniqueId, Union[ParsedExposure, ManifestNode, ParsedMetric]]]: + ) -> Iterator[Tuple[UniqueId, Union[Exposure, ManifestNode, Metric]]]: yield from chain( self.parsed_nodes(included_nodes), self.exposure_nodes(included_nodes), @@ -392,19 +384,19 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu class TestTypeSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: - search_types: Tuple[Type, ...] + search_type: Type # continue supporting 'schema' + 'data' for backwards compatibility if selector in ("generic", "schema"): - search_types = (ParsedGenericTestNode, CompiledGenericTestNode) + search_type = GenericTestNode elif selector in ("singular", "data"): - search_types = (ParsedSingularTestNode, CompiledSingularTestNode) + search_type = SingularTestNode else: raise RuntimeException( f'Invalid test type selector {selector}: expected "generic" or ' '"singular"' ) for node, real_node in self.parsed_nodes(included_nodes): - if isinstance(real_node, search_types): + if isinstance(real_node, search_type): yield node diff --git a/core/dbt/parser/README.md b/core/dbt/parser/README.md index 6ab326c42a6..7e4c208cdf9 100644 --- a/core/dbt/parser/README.md +++ b/core/dbt/parser/README.md @@ -126,17 +126,17 @@ These have executable SQL attached. Models - Are generated from SQL files in the 'models' directory - have a unique_id starting with 'model.' -- Final object is a ParsedModelNode +- Final object is a ModelNode -Data Tests +Singular Tests - Are generated from SQL files in 'tests' directory - have a unique_id starting with 'test.' -- Final object is a ParsedDataTestNode +- Final object is a SingularTestNode -Schema Tests +Generic Tests - Are generated from 'tests' in schema yaml files, which ultimately derive from tests in the 'macros' directory - Have a unique_id starting with 'test.' -- Final object is a ParsedSchemaTestNode +- Final object is a GenericTestNode - fqn is .schema_test. Hooks @@ -146,35 +146,35 @@ Hooks Analysis - comes from SQL files in 'analysis' directory -- Final object is a ParsedAnalysisNode +- Final object is a AnalysisNode RPC Node - This is a "node" representing the bit of Jinja-SQL that gets passed into the run_sql or compile_sql methods. When you're using the Cloud IDE, and you're working in a scratch tab, and you just want to compile/run what you have there: it needs to be parsed and executed, but it's not actually a model/node in the project, so it's this special thing. This is a temporary addition to the running manifest. -- Object is a ParsedRPCNode +- Object is a RPCNode ### sources - comes from 'sources' sections in yaml files -- Final object is a ParsedSourceDefinition node +- Final object is a SourceDefinition node - have a unique_id starting with 'source.' ### macros - comes from SQL files in 'macros' directory -- Final object is a ParsedMacro node +- Final object is a Macro node - have a unique_id starting with 'macro.' - Test macros are used in schema tests ### docs - comes from .md files in 'docs' directory -- Final object is a ParsedDocumentation +- Final object is a Documentation ### exposures - comes from 'exposures' sections in yaml files -- Final object is a ParsedExposure node +- Final object is a Exposure node ## Temporary patch files diff --git a/core/dbt/parser/analysis.py b/core/dbt/parser/analysis.py index 17eadb8783b..2102a76ac2e 100644 --- a/core/dbt/parser/analysis.py +++ b/core/dbt/parser/analysis.py @@ -1,16 +1,16 @@ import os -from dbt.contracts.graph.parsed import ParsedAnalysisNode +from dbt.contracts.graph.nodes import AnalysisNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock -class AnalysisParser(SimpleSQLParser[ParsedAnalysisNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedAnalysisNode: +class AnalysisParser(SimpleSQLParser[AnalysisNode]): + def parse_from_dict(self, dct, validate=True) -> AnalysisNode: if validate: - ParsedAnalysisNode.validate(dct) - return ParsedAnalysisNode.from_dict(dct) + AnalysisNode.validate(dct) + return AnalysisNode.from_dict(dct) @property def resource_type(self) -> NodeType: diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index b6d349803f6..7fff4daebf4 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -16,7 +16,7 @@ from dbt.config import Project, RuntimeConfig from dbt.context.context_config import ContextConfig from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import HasUniqueID, ManifestNodes +from dbt.contracts.graph.nodes import HasUniqueID, ManifestNode from dbt.contracts.graph.unparsed import UnparsedNode, Docs from dbt.exceptions import ParsingException, validator_error_message, InternalException from dbt import hooks @@ -30,7 +30,7 @@ IntermediateValue = TypeVar("IntermediateValue", bound=HasUniqueID) IntermediateNode = TypeVar("IntermediateNode", bound=Any) -FinalNode = TypeVar("FinalNode", bound=ManifestNodes) +FinalNode = TypeVar("FinalNode", bound=ManifestNode) ConfiguredBlockType = TypeVar("ConfiguredBlockType", bound=FileBlock) @@ -365,7 +365,7 @@ def render_update(self, node: IntermediateNode, config: ContextConfig) -> None: msg = validator_error_message(exc) raise ParsingException(msg, node=node) from exc - def add_result_node(self, block: FileBlock, node: ManifestNodes): + def add_result_node(self, block: FileBlock, node: ManifestNode): if node.config.enabled: self.manifest.add_node(block.file, node) else: diff --git a/core/dbt/parser/docs.py b/core/dbt/parser/docs.py index a1130eda0da..fb9b488276e 100644 --- a/core/dbt/parser/docs.py +++ b/core/dbt/parser/docs.py @@ -4,7 +4,7 @@ from dbt.clients.jinja import get_rendered from dbt.contracts.files import SourceFile -from dbt.contracts.graph.parsed import ParsedDocumentation +from dbt.contracts.graph.nodes import Documentation from dbt.node_types import NodeType from dbt.parser.base import Parser from dbt.parser.search import BlockContents, FileBlock, BlockSearcher @@ -13,7 +13,7 @@ SHOULD_PARSE_RE = re.compile(r"{[{%]") -class DocumentationParser(Parser[ParsedDocumentation]): +class DocumentationParser(Parser[Documentation]): @property def resource_type(self) -> NodeType: return NodeType.Documentation @@ -27,11 +27,11 @@ def generate_unique_id(self, resource_name: str, _: Optional[str] = None) -> str # need to be part of the unique ID. return "{}.{}".format(self.project.project_name, resource_name) - def parse_block(self, block: BlockContents) -> Iterable[ParsedDocumentation]: + def parse_block(self, block: BlockContents) -> Iterable[Documentation]: unique_id = self.generate_unique_id(block.name) contents = get_rendered(block.contents, {}).strip() - doc = ParsedDocumentation( + doc = Documentation( path=block.file.path.relative_path, original_file_path=block.path.original_file_path, package_name=self.project.project_name, diff --git a/core/dbt/parser/generic_test.py b/core/dbt/parser/generic_test.py index b69ca20ef6d..822dd5b2d85 100644 --- a/core/dbt/parser/generic_test.py +++ b/core/dbt/parser/generic_test.py @@ -4,9 +4,8 @@ from dbt.exceptions import ParsingException from dbt.clients import jinja -from dbt.contracts.graph.parsed import ParsedGenericTestNode +from dbt.contracts.graph.nodes import GenericTestNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.contracts.graph.parsed import ParsedMacro from dbt.contracts.files import SourceFile from dbt.events.functions import fire_event from dbt.events.types import GenericTestFileParse @@ -17,7 +16,7 @@ from dbt import flags -class GenericTestParser(BaseParser[ParsedGenericTestNode]): +class GenericTestParser(BaseParser[GenericTestNode]): @property def resource_type(self) -> NodeType: return NodeType.Macro @@ -28,10 +27,10 @@ def get_compiled_path(cls, block: FileBlock): def parse_generic_test( self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str - ) -> ParsedMacro: + ) -> Macro: unique_id = self.generate_unique_id(name) - return ParsedMacro( + return Macro( path=base_node.path, macro_sql=block.full_block, original_file_path=base_node.original_file_path, @@ -41,7 +40,7 @@ def parse_generic_test( unique_id=unique_id, ) - def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[ParsedMacro]: + def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macro]: try: blocks: List[jinja.BlockTag] = [ t diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index 3dfb541cb8f..3b1149e53a5 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -13,7 +13,7 @@ ) from dbt.clients.jinja import get_rendered, GENERIC_TEST_KWARGS_NAME -from dbt.contracts.graph.parsed import UnpatchedSourceDefinition +from dbt.contracts.graph.nodes import UnpatchedSourceDefinition from dbt.contracts.graph.unparsed import ( TestDef, UnparsedAnalysisUpdate, diff --git a/core/dbt/parser/hooks.py b/core/dbt/parser/hooks.py index 2ac8bfda0ef..d05ea136dc5 100644 --- a/core/dbt/parser/hooks.py +++ b/core/dbt/parser/hooks.py @@ -3,7 +3,7 @@ from dbt.context.context_config import ContextConfig from dbt.contracts.files import FilePath -from dbt.contracts.graph.parsed import ParsedHookNode +from dbt.contracts.graph.nodes import HookNode from dbt.exceptions import InternalException from dbt.node_types import NodeType, RunHookType from dbt.parser.base import SimpleParser @@ -65,7 +65,7 @@ def __iter__(self) -> Iterator[HookBlock]: ) -class HookParser(SimpleParser[HookBlock, ParsedHookNode]): +class HookParser(SimpleParser[HookBlock, HookNode]): def transform(self, node): return node @@ -81,10 +81,10 @@ def get_path(self) -> FilePath: ) return path - def parse_from_dict(self, dct, validate=True) -> ParsedHookNode: + def parse_from_dict(self, dct, validate=True) -> HookNode: if validate: - ParsedHookNode.validate(dct) - return ParsedHookNode.from_dict(dct) + HookNode.validate(dct) + return HookNode.from_dict(dct) @classmethod def get_compiled_path(cls, block: HookBlock): @@ -98,7 +98,7 @@ def _create_parsetime_node( fqn: List[str], name=None, **kwargs, - ) -> ParsedHookNode: + ) -> HookNode: return super()._create_parsetime_node( block=block, diff --git a/core/dbt/parser/macros.py b/core/dbt/parser/macros.py index 7f99753ad2c..7c5336b8ccf 100644 --- a/core/dbt/parser/macros.py +++ b/core/dbt/parser/macros.py @@ -4,7 +4,7 @@ from dbt.clients import jinja from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.contracts.files import FilePath, SourceFile from dbt.exceptions import ParsingException from dbt.events.functions import fire_event @@ -16,7 +16,7 @@ from dbt import flags -class MacroParser(BaseParser[ParsedMacro]): +class MacroParser(BaseParser[Macro]): # This is only used when creating a MacroManifest separate # from the normal parsing flow. def get_paths(self) -> List[FilePath]: @@ -32,12 +32,10 @@ def resource_type(self) -> NodeType: def get_compiled_path(cls, block: FileBlock): return block.path.relative_path - def parse_macro( - self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str - ) -> ParsedMacro: + def parse_macro(self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str) -> Macro: unique_id = self.generate_unique_id(name) - return ParsedMacro( + return Macro( path=base_node.path, macro_sql=block.full_block, original_file_path=base_node.original_file_path, @@ -47,7 +45,7 @@ def parse_macro( unique_id=unique_id, ) - def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[ParsedMacro]: + def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]: try: blocks: List[jinja.BlockTag] = [ t diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index bbfe9714697..21594a93318 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -53,7 +53,6 @@ from dbt.contracts.files import FileHash, ParseFileType, SchemaSourceFile from dbt.parser.read_files import read_files, load_source_file from dbt.parser.partial import PartialParsing, special_override_macros -from dbt.contracts.graph.compiled import ManifestNode from dbt.contracts.graph.manifest import ( Manifest, Disabled, @@ -61,13 +60,14 @@ ManifestStateCheck, ParsingInfo, ) -from dbt.contracts.graph.parsed import ( - ParsedSourceDefinition, +from dbt.contracts.graph.nodes import ( + SourceDefinition, ParsedNode, - ParsedMacro, + Macro, ColumnInfo, - ParsedExposure, - ParsedMetric, + Exposure, + Metric, + ManifestNode, ) from dbt.contracts.util import Writable from dbt.exceptions import ( @@ -366,7 +366,7 @@ def load(self): self._perf_info.parse_project_elapsed = time.perf_counter() - start_parse_projects # patch_sources converts the UnparsedSourceDefinitions in the - # Manifest.sources to ParsedSourceDefinition via 'patch_source' + # Manifest.sources to SourceDefinition via 'patch_source' # in SourcePatcher start_patch = time.perf_counter() patcher = SourcePatcher(self.root_project, self.manifest) @@ -921,7 +921,7 @@ def process_sources(self, current_project: str): for node in self.manifest.nodes.values(): if node.resource_type == NodeType.Source: continue - assert not isinstance(node, ParsedSourceDefinition) + assert not isinstance(node, SourceDefinition) if node.created_at < self.started_at: continue _process_sources_for_node(self.manifest, current_project, node) @@ -1053,7 +1053,7 @@ def _get_node_column(node, column_name): return column -DocsContextCallback = Callable[[Union[ParsedNode, ParsedSourceDefinition]], Dict[str, Any]] +DocsContextCallback = Callable[[Union[ParsedNode, SourceDefinition]], Dict[str, Any]] # node and column descriptions @@ -1069,7 +1069,7 @@ def _process_docs_for_node( # source and table descriptions, column descriptions def _process_docs_for_source( context: Dict[str, Any], - source: ParsedSourceDefinition, + source: SourceDefinition, ): table_description = source.description source_description = source.source_description @@ -1085,22 +1085,22 @@ def _process_docs_for_source( # macro argument descriptions -def _process_docs_for_macro(context: Dict[str, Any], macro: ParsedMacro) -> None: +def _process_docs_for_macro(context: Dict[str, Any], macro: Macro) -> None: macro.description = get_rendered(macro.description, context) for arg in macro.arguments: arg.description = get_rendered(arg.description, context) # exposure descriptions -def _process_docs_for_exposure(context: Dict[str, Any], exposure: ParsedExposure) -> None: +def _process_docs_for_exposure(context: Dict[str, Any], exposure: Exposure) -> None: exposure.description = get_rendered(exposure.description, context) -def _process_docs_for_metrics(context: Dict[str, Any], metric: ParsedMetric) -> None: +def _process_docs_for_metrics(context: Dict[str, Any], metric: Metric) -> None: metric.description = get_rendered(metric.description, context) -def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: ParsedExposure): +def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: Exposure): """Given a manifest and exposure in that manifest, process its refs""" for ref in exposure.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None @@ -1143,7 +1143,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur manifest.update_exposure(exposure) -def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric): +def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: Metric): """Given a manifest and a metric in that manifest, process its refs""" for ref in metric.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None @@ -1188,11 +1188,11 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: P def _process_metrics_for_node( manifest: Manifest, current_project: str, - node: Union[ManifestNode, ParsedMetric, ParsedExposure], + node: Union[ManifestNode, Metric, Exposure], ): """Given a manifest and a node in that manifest, process its metrics""" for metric in node.metrics: - target_metric: Optional[Union[Disabled, ParsedMetric]] = None + target_metric: Optional[Union[Disabled, Metric]] = None target_metric_name: str target_metric_package: Optional[str] = None @@ -1276,10 +1276,8 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif manifest.update_node(node) -def _process_sources_for_exposure( - manifest: Manifest, current_project: str, exposure: ParsedExposure -): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None +def _process_sources_for_exposure(manifest: Manifest, current_project: str, exposure: Exposure): + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in exposure.sources: target_source = manifest.resolve_source( source_name, @@ -1301,8 +1299,8 @@ def _process_sources_for_exposure( manifest.update_exposure(exposure) -def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None +def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: Metric): + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in metric.sources: target_source = manifest.resolve_source( source_name, @@ -1325,7 +1323,7 @@ def _process_sources_for_metric(manifest: Manifest, current_project: str, metric def _process_sources_for_node(manifest: Manifest, current_project: str, node: ManifestNode): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in node.sources: target_source = manifest.resolve_source( source_name, @@ -1351,7 +1349,7 @@ def _process_sources_for_node(manifest: Manifest, current_project: str, node: Ma # This is called in task.rpc.sql_commands when a "dynamic" node is # created in the manifest, in 'add_refs' -def process_macro(config: RuntimeConfig, manifest: Manifest, macro: ParsedMacro) -> None: +def process_macro(config: RuntimeConfig, manifest: Manifest, macro: Macro) -> None: ctx = generate_runtime_docs_context( config, macro, diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index 0a3f87018d9..7dea4aca135 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -1,6 +1,6 @@ from copy import deepcopy from dbt.context.context_config import ContextConfig -from dbt.contracts.graph.parsed import ParsedModelNode +from dbt.contracts.graph.nodes import ModelNode import dbt.flags as flags from dbt.events.functions import fire_event from dbt.events.types import ( @@ -181,11 +181,11 @@ def verify_python_model_code(node): raise ParsingException("No jinja in python model code is allowed", node=node) -class ModelParser(SimpleSQLParser[ParsedModelNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedModelNode: +class ModelParser(SimpleSQLParser[ModelNode]): + def parse_from_dict(self, dct, validate=True) -> ModelNode: if validate: - ParsedModelNode.validate(dct) - return ParsedModelNode.from_dict(dct) + ModelNode.validate(dct) + return ModelNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -221,7 +221,7 @@ def parse_python_model(self, node, config, context): # this is being used in macro build_config_dict context["config"](config_keys_used=config_keys_used) - def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: + def render_update(self, node: ModelNode, config: ContextConfig) -> None: self.manifest._parsing_info.static_analysis_path_count += 1 if node.language == ModelLanguage.python: @@ -266,9 +266,9 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: # top-level declaration of variables statically_parsed: Optional[Union[str, Dict[str, List[Any]]]] = None experimental_sample: Optional[Union[str, Dict[str, List[Any]]]] = None - exp_sample_node: Optional[ParsedModelNode] = None + exp_sample_node: Optional[ModelNode] = None exp_sample_config: Optional[ContextConfig] = None - jinja_sample_node: Optional[ParsedModelNode] = None + jinja_sample_node: Optional[ModelNode] = None jinja_sample_config: Optional[ContextConfig] = None result: List[str] = [] @@ -369,9 +369,7 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: } ) - def run_static_parser( - self, node: ParsedModelNode - ) -> Optional[Union[str, Dict[str, List[Any]]]]: + def run_static_parser(self, node: ModelNode) -> Optional[Union[str, Dict[str, List[Any]]]]: # if any banned macros have been overridden by the user, we cannot use the static parser. if self._has_banned_macro(node): # this log line is used for integration testing. If you change @@ -393,7 +391,7 @@ def run_static_parser( return "cannot_parse" def run_experimental_parser( - self, node: ParsedModelNode + self, node: ModelNode ) -> Optional[Union[str, Dict[str, List[Any]]]]: # if any banned macros have been overridden by the user, we cannot use the static parser. if self._has_banned_macro(node): @@ -419,7 +417,7 @@ def run_experimental_parser( return "cannot_parse" # checks for banned macros - def _has_banned_macro(self, node: ParsedModelNode) -> bool: + def _has_banned_macro(self, node: ModelNode) -> bool: # first check if there is a banned macro defined in scope for this model file root_project_name = self.root_project.project_name project_name = node.package_name @@ -439,9 +437,7 @@ def _has_banned_macro(self, node: ParsedModelNode) -> bool: # this method updates the model node rendered and unrendered config as well # as the node object. Used to populate these values when circumventing jinja # rendering like the static parser. - def populate( - self, node: ParsedModelNode, config: ContextConfig, statically_parsed: Dict[str, Any] - ): + def populate(self, node: ModelNode, config: ContextConfig, statically_parsed: Dict[str, Any]): # manually fit configs in config._config_call_dict = _get_config_call_dict(statically_parsed) @@ -489,9 +485,9 @@ def _shift_sources(static_parser_result: Dict[str, List[Any]]) -> Dict[str, List # returns a list of string codes to be sent as a tracking event def _get_exp_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[str]: result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) @@ -505,9 +501,9 @@ def process(codemsg): # returns a list of string codes to be sent as a tracking event def _get_stable_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[str]: result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) @@ -522,9 +518,9 @@ def process(codemsg): # returns a list of string codes that need a single digit prefix to be prepended # before being sent as a tracking event def _get_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[Tuple[int, str]]: result: List[Tuple[int, str]] = [] diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py index 1a8c7e8193e..63ef33429c4 100644 --- a/core/dbt/parser/partial.py +++ b/core/dbt/parser/partial.py @@ -873,7 +873,7 @@ def delete_schema_source(self, schema_file, source_dict): source_name = source_dict["name"] # There may be multiple sources for each source dict, since # there will be a separate source node for each table. - # ParsedSourceDefinition name = table name, dict name is source_name + # SourceDefinition name = table name, dict name is source_name sources = schema_file.sources.copy() for unique_id in sources: if unique_id in self.saved_manifest.sources: diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 4909d99f44e..5756ed4ba02 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -27,14 +27,14 @@ from dbt.context.macro_resolver import MacroResolver from dbt.contracts.files import FileHash, SchemaSourceFile from dbt.contracts.graph.model_config import MetricConfig, ExposureConfig -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( ParsedNodePatch, ColumnInfo, - ParsedGenericTestNode, + GenericTestNode, ParsedMacroPatch, UnpatchedSourceDefinition, - ParsedExposure, - ParsedMetric, + Exposure, + Metric, ) from dbt.contracts.graph.unparsed import ( HasColumnDocs, @@ -168,7 +168,7 @@ def _trimmed(inp: str) -> str: return inp[:44] + "..." + inp[-3:] -class SchemaParser(SimpleParser[GenericTestBlock, ParsedGenericTestNode]): +class SchemaParser(SimpleParser[GenericTestBlock, GenericTestNode]): def __init__( self, project, @@ -195,10 +195,10 @@ def get_compiled_path(cls, block: FileBlock) -> str: def resource_type(self) -> NodeType: return NodeType.Test - def parse_from_dict(self, dct, validate=True) -> ParsedGenericTestNode: + def parse_from_dict(self, dct, validate=True) -> GenericTestNode: if validate: - ParsedGenericTestNode.validate(dct) - return ParsedGenericTestNode.from_dict(dct) + GenericTestNode.validate(dct) + return GenericTestNode.from_dict(dct) def parse_column_tests(self, block: TestBlock, column: UnparsedColumn) -> None: if not column.tests: @@ -219,7 +219,7 @@ def create_test_node( test_metadata: Dict[str, Any], file_key_name: str, column_name: Optional[str], - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: HASH_LENGTH = 10 @@ -259,8 +259,8 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List "file_key_name": file_key_name, } try: - ParsedGenericTestNode.validate(dct) - return ParsedGenericTestNode.from_dict(dct) + GenericTestNode.validate(dct) + return GenericTestNode.from_dict(dct) except ValidationError as exc: msg = validator_error_message(exc) # this is a bit silly, but build an UnparsedNode just for error @@ -281,7 +281,7 @@ def _parse_generic_test( tags: List[str], column_name: Optional[str], schema_file_id: str, - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: try: builder = TestBuilder( test=test, @@ -416,7 +416,7 @@ def render_test_update(self, node, config, builder, schema_file_id): msg = validator_error_message(exc) raise ParsingException(msg, node=node) from exc - def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: + def parse_node(self, block: GenericTestBlock) -> GenericTestNode: """In schema parsing, we rewrite most of the part of parse_node that builds the initial node to be parsed, but rendering is basically the same @@ -431,7 +431,7 @@ def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: self.add_test_node(block, node) return node - def add_test_node(self, block: GenericTestBlock, node: ParsedGenericTestNode): + def add_test_node(self, block: GenericTestBlock, node: GenericTestNode): test_from = {"key": block.target.yaml_key, "name": block.target.name} if node.config.enabled: self.manifest.add_node(block.file, node, test_from) @@ -440,7 +440,7 @@ def add_test_node(self, block: GenericTestBlock, node: ParsedGenericTestNode): def render_with_context( self, - node: ParsedGenericTestNode, + node: GenericTestNode, config: ContextConfig, ) -> None: """Given the parsed node and a ContextConfig to use during @@ -1027,7 +1027,7 @@ def parse_exposure(self, unparsed: UnparsedExposure): f"Calculated a {type(config)} for an exposure, but expected an ExposureConfig" ) - parsed = ParsedExposure( + parsed = Exposure( package_name=package_name, path=path, original_file_path=self.yaml.path.original_file_path, @@ -1130,7 +1130,7 @@ def parse_metric(self, unparsed: UnparsedMetric): f"Calculated a {type(config)} for a metric, but expected a MetricConfig" ) - parsed = ParsedMetric( + parsed = Metric( package_name=package_name, path=path, original_file_path=self.yaml.path.original_file_path, diff --git a/core/dbt/parser/seeds.py b/core/dbt/parser/seeds.py index 0cd5aeb6307..02c20df7cf5 100644 --- a/core/dbt/parser/seeds.py +++ b/core/dbt/parser/seeds.py @@ -1,17 +1,17 @@ from dbt.context.context_config import ContextConfig -from dbt.contracts.graph.parsed import ParsedSeedNode +from dbt.contracts.graph.nodes import SeedNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock -class SeedParser(SimpleSQLParser[ParsedSeedNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSeedNode: +class SeedParser(SimpleSQLParser[SeedNode]): + def parse_from_dict(self, dct, validate=True) -> SeedNode: # seeds need the root_path because the contents are not loaded dct["root_path"] = self.project.project_root if validate: - ParsedSeedNode.validate(dct) - return ParsedSeedNode.from_dict(dct) + SeedNode.validate(dct) + return SeedNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -21,5 +21,5 @@ def resource_type(self) -> NodeType: def get_compiled_path(cls, block: FileBlock): return block.path.relative_path - def render_with_context(self, parsed_node: ParsedSeedNode, config: ContextConfig) -> None: + def render_with_context(self, parsed_node: SeedNode, config: ContextConfig) -> None: """Seeds don't need to do any rendering.""" diff --git a/core/dbt/parser/singular_test.py b/core/dbt/parser/singular_test.py index 22d203a8ebc..fbb3c8ce8fa 100644 --- a/core/dbt/parser/singular_test.py +++ b/core/dbt/parser/singular_test.py @@ -1,15 +1,15 @@ -from dbt.contracts.graph.parsed import ParsedSingularTestNode +from dbt.contracts.graph.nodes import SingularTestNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock from dbt.utils import get_pseudo_test_path -class SingularTestParser(SimpleSQLParser[ParsedSingularTestNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSingularTestNode: +class SingularTestParser(SimpleSQLParser[SingularTestNode]): + def parse_from_dict(self, dct, validate=True) -> SingularTestNode: if validate: - ParsedSingularTestNode.validate(dct) - return ParsedSingularTestNode.from_dict(dct) + SingularTestNode.validate(dct) + return SingularTestNode.from_dict(dct) @property def resource_type(self) -> NodeType: diff --git a/core/dbt/parser/snapshots.py b/core/dbt/parser/snapshots.py index 71e7bba955f..754a0341e69 100644 --- a/core/dbt/parser/snapshots.py +++ b/core/dbt/parser/snapshots.py @@ -3,7 +3,7 @@ from dbt.dataclass_schema import ValidationError -from dbt.contracts.graph.parsed import IntermediateSnapshotNode, ParsedSnapshotNode +from dbt.contracts.graph.nodes import IntermediateSnapshotNode, SnapshotNode from dbt.exceptions import ParsingException, validator_error_message from dbt.node_types import NodeType from dbt.parser.base import SQLParser @@ -11,7 +11,7 @@ from dbt.utils import split_path -class SnapshotParser(SQLParser[IntermediateSnapshotNode, ParsedSnapshotNode]): +class SnapshotParser(SQLParser[IntermediateSnapshotNode, SnapshotNode]): def parse_from_dict(self, dct, validate=True) -> IntermediateSnapshotNode: if validate: IntermediateSnapshotNode.validate(dct) @@ -53,7 +53,7 @@ def get_fqn(self, path: str, name: str) -> List[str]: fqn.append(name) return fqn - def transform(self, node: IntermediateSnapshotNode) -> ParsedSnapshotNode: + def transform(self, node: IntermediateSnapshotNode) -> SnapshotNode: try: # The config_call_dict is not serialized, because normally # it is not needed after parsing. But since the snapshot node @@ -61,7 +61,7 @@ def transform(self, node: IntermediateSnapshotNode) -> ParsedSnapshotNode: # the model config when there is also schema config. config_call_dict = node.config_call_dict dct = node.to_dict(omit_none=True) - parsed_node = ParsedSnapshotNode.from_dict(dct) + parsed_node = SnapshotNode.from_dict(dct) parsed_node.config_call_dict = config_call_dict self.set_snapshot_attributes(parsed_node) return parsed_node diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 73fdd80e4c9..cc9acea98c3 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -10,10 +10,10 @@ ) from dbt.contracts.graph.manifest import Manifest, SourceKey from dbt.contracts.graph.model_config import SourceConfig -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( UnpatchedSourceDefinition, - ParsedSourceDefinition, - ParsedGenericTestNode, + SourceDefinition, + GenericTestNode, ) from dbt.contracts.graph.unparsed import ( UnparsedSourceDefinition, @@ -38,7 +38,7 @@ # generate multiple UnpatchedSourceDefinition nodes (one per # table) in the SourceParser.add_source_definitions. The # SourcePatcher takes an UnparsedSourceDefinition and the -# SourcePatch and produces a ParsedSourceDefinition. Each +# SourcePatch and produces a SourceDefinition. Each # SourcePatch can be applied to multiple UnpatchedSourceDefinitions. class SourcePatcher: def __init__( @@ -50,16 +50,16 @@ def __init__( self.manifest = manifest self.schema_parsers: Dict[str, SchemaParser] = {} self.patches_used: Dict[SourceKey, Set[str]] = {} - self.sources: Dict[str, ParsedSourceDefinition] = {} + self.sources: Dict[str, SourceDefinition] = {} # This method calls the 'parse_source' method which takes # the UnpatchedSourceDefinitions in the manifest and combines them - # with SourcePatches to produce ParsedSourceDefinitions. + # with SourcePatches to produce SourceDefinitions. def construct_sources(self) -> None: for unique_id, unpatched in self.manifest.sources.items(): schema_file = self.manifest.files[unpatched.file_id] - if isinstance(unpatched, ParsedSourceDefinition): - # In partial parsing, there will be ParsedSourceDefinitions + if isinstance(unpatched, SourceDefinition): + # In partial parsing, there will be SourceDefinitions # which must be retained. self.sources[unpatched.unique_id] = unpatched continue @@ -80,7 +80,7 @@ def construct_sources(self) -> None: test_from = {"key": "sources", "name": patched.source.name} schema_file.add_test(test.unique_id, test_from) - # Convert UnpatchedSourceDefinition to a ParsedSourceDefinition + # Convert UnpatchedSourceDefinition to a SourceDefinition parsed = self.parse_source(patched) if parsed.config.enabled: self.sources[unique_id] = parsed @@ -118,8 +118,8 @@ def patch_source( table = UnparsedSourceTableDefinition.from_dict(table_dct) return unpatched.replace(source=source, table=table, patch_path=patch_path) - # This converts an UnpatchedSourceDefinition to a ParsedSourceDefinition - def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinition: + # This converts an UnpatchedSourceDefinition to a SourceDefinition + def parse_source(self, target: UnpatchedSourceDefinition) -> SourceDefinition: source = target.source table = target.table refs = ParserRef.from_target(table) @@ -156,7 +156,7 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinit default_database = self.root_project.credentials.database - parsed_source = ParsedSourceDefinition( + parsed_source = SourceDefinition( package_name=target.package_name, database=(source.database or default_database), schema=(source.schema or source.name), @@ -201,9 +201,7 @@ def get_schema_parser_for(self, package_name: str) -> "SchemaParser": self.schema_parsers[package_name] = schema_parser return schema_parser - def get_source_tests( - self, target: UnpatchedSourceDefinition - ) -> Iterable[ParsedGenericTestNode]: + def get_source_tests(self, target: UnpatchedSourceDefinition) -> Iterable[GenericTestNode]: for test, column in target.get_tests(): yield self.parse_source_test( target=target, @@ -215,7 +213,7 @@ def get_patch_for( self, unpatched: UnpatchedSourceDefinition, ) -> Optional[SourcePatch]: - if isinstance(unpatched, ParsedSourceDefinition): + if isinstance(unpatched, SourceDefinition): return None key = (unpatched.package_name, unpatched.source.name) patch: Optional[SourcePatch] = self.manifest.source_patches.get(key) @@ -234,7 +232,7 @@ def parse_source_test( target: UnpatchedSourceDefinition, test: Dict[str, Any], column: Optional[UnparsedColumn], - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: column_name: Optional[str] if column is None: column_name = None @@ -286,7 +284,7 @@ def _generate_source_config(self, target: UnpatchedSourceDefinition, rendered: b patch_config_dict=precedence_configs, ) - def _get_relation_name(self, node: ParsedSourceDefinition): + def _get_relation_name(self, node: SourceDefinition): adapter = get_adapter(self.root_project) relation_cls = adapter.Relation return str(relation_cls.create_from(self.root_project, node)) diff --git a/core/dbt/parser/sql.py b/core/dbt/parser/sql.py index 14c74247b62..82d09c12d6b 100644 --- a/core/dbt/parser/sql.py +++ b/core/dbt/parser/sql.py @@ -3,7 +3,7 @@ from typing import Iterable from dbt.contracts.graph.manifest import SourceFile -from dbt.contracts.graph.parsed import ParsedSqlNode, ParsedMacro +from dbt.contracts.graph.nodes import SqlNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro from dbt.exceptions import InternalException from dbt.node_types import NodeType @@ -21,11 +21,11 @@ def name(self): return self.block_name -class SqlBlockParser(SimpleSQLParser[ParsedSqlNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSqlNode: +class SqlBlockParser(SimpleSQLParser[SqlNode]): + def parse_from_dict(self, dct, validate=True) -> SqlNode: if validate: - ParsedSqlNode.validate(dct) - return ParsedSqlNode.from_dict(dct) + SqlNode.validate(dct) + return SqlNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -42,14 +42,14 @@ def get_compiled_path(block: FileBlock): return os.path.join("sql", block.name) - def parse_remote(self, sql: str, name: str) -> ParsedSqlNode: + def parse_remote(self, sql: str, name: str) -> SqlNode: source_file = SourceFile.remote(sql, self.project.project_name, "sql") contents = SqlBlock(block_name=name, file=source_file) return self.parse_node(contents) class SqlMacroParser(MacroParser): - def parse_remote(self, contents) -> Iterable[ParsedMacro]: + def parse_remote(self, contents) -> Iterable[Macro]: base = UnparsedMacro( path="from remote system", original_file_path="from remote system", diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index 51944cb4508..704368cf24f 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -25,7 +25,7 @@ from dbt.node_types import NodeType from dbt.graph import ResourceTypeSelector -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition RESULT_FILE_NAME = "sources.json" @@ -141,7 +141,7 @@ class FreshnessSelector(ResourceTypeSelector): def node_is_match(self, node): if not super().node_is_match(node): return False - if not isinstance(node, ParsedSourceDefinition): + if not isinstance(node, SourceDefinition): return False return node.has_freshness diff --git a/core/dbt/task/generate.py b/core/dbt/task/generate.py index 0bc6f3f9527..48db2e772ba 100644 --- a/core/dbt/task/generate.py +++ b/core/dbt/task/generate.py @@ -8,7 +8,7 @@ from .compile import CompileTask from dbt.adapters.factory import get_adapter -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest from dbt.contracts.results import ( NodeStatus, @@ -174,7 +174,7 @@ def format_stats(stats: PrimitiveDict) -> StatsDict: return stats_collector -def mapping_key(node: CompileResultNode) -> CatalogKey: +def mapping_key(node: ResultNode) -> CatalogKey: dkey = dbt.utils.lowercase(node.database) return CatalogKey(dkey, node.schema.lower(), node.identifier.lower()) diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index d2a33dec184..e1be8f214d3 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -1,6 +1,6 @@ import json -from dbt.contracts.graph.parsed import ParsedExposure, ParsedSourceDefinition, ParsedMetric +from dbt.contracts.graph.nodes import Exposure, SourceDefinition, Metric from dbt.graph import ResourceTypeSelector from dbt.task.runnable import GraphRunnableTask, ManifestTask from dbt.task.test import TestSelector @@ -91,17 +91,17 @@ def _iterate_selected_nodes(self): def generate_selectors(self): for node in self._iterate_selected_nodes(): if node.resource_type == NodeType.Source: - assert isinstance(node, ParsedSourceDefinition) + assert isinstance(node, SourceDefinition) # sources are searched for by pkg.source_name.table_name source_selector = ".".join([node.package_name, node.source_name, node.name]) yield f"source:{source_selector}" elif node.resource_type == NodeType.Exposure: - assert isinstance(node, ParsedExposure) + assert isinstance(node, Exposure) # exposures are searched for by pkg.exposure_name exposure_selector = ".".join([node.package_name, node.name]) yield f"exposure:{exposure_selector}" elif node.resource_type == NodeType.Metric: - assert isinstance(node, ParsedMetric) + assert isinstance(node, Metric) # metrics are searched for by pkg.metric_name metric_selector = ".".join([node.package_name, node.name]) yield f"metric:{metric_selector}" diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 39776b58e87..5b88d039904 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -17,9 +17,8 @@ from dbt.adapters.base import BaseRelation from dbt.clients.jinja import MacroGenerator from dbt.context.providers import generate_runtime_model_context -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.model_config import Hook -from dbt.contracts.graph.parsed import ParsedHookNode +from dbt.contracts.graph.nodes import HookNode, ResultNode from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus, BaseResult from dbt.exceptions import ( CompilationException, @@ -79,17 +78,17 @@ def __eq__(self, other): return isinstance(other, self.__class__) -def _hook_list() -> List[ParsedHookNode]: +def _hook_list() -> List[HookNode]: return [] def get_hooks_by_tags( - nodes: Iterable[CompileResultNode], + nodes: Iterable[ResultNode], match_tags: Set[str], -) -> List[ParsedHookNode]: +) -> List[HookNode]: matched_nodes = [] for node in nodes: - if not isinstance(node, ParsedHookNode): + if not isinstance(node, HookNode): continue node_tags = node.tags if len(set(node_tags) & match_tags): @@ -304,20 +303,20 @@ def get_hook_sql(self, adapter, hook, idx, num_hooks, extra_context): hook_obj = get_hook(statement, index=hook_index) return hook_obj.sql or "" - def _hook_keyfunc(self, hook: ParsedHookNode) -> Tuple[str, Optional[int]]: + def _hook_keyfunc(self, hook: HookNode) -> Tuple[str, Optional[int]]: package_name = hook.package_name if package_name == self.config.project_name: package_name = BiggestName("") return package_name, hook.index - def get_hooks_by_type(self, hook_type: RunHookType) -> List[ParsedHookNode]: + def get_hooks_by_type(self, hook_type: RunHookType) -> List[HookNode]: if self.manifest is None: raise InternalException("self.manifest was None in get_hooks_by_type") nodes = self.manifest.nodes.values() # find all hooks defined in the manifest (could be multiple projects) - hooks: List[ParsedHookNode] = get_hooks_by_tags(nodes, {hook_type}) + hooks: List[HookNode] = get_hooks_by_tags(nodes, {hook_type}) hooks.sort(key=self._hook_keyfunc) return hooks diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 279baffc448..226005497e4 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -39,9 +39,8 @@ NothingToDo, ) from dbt.events.contextvars import log_contextvars -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition, ResultNode from dbt.contracts.results import NodeStatus, RunExecutionResult, RunningStatus from dbt.contracts.state import PreviousState from dbt.exceptions import ( @@ -108,7 +107,7 @@ class GraphRunnableTask(ManifestTask): def __init__(self, args, config): super().__init__(args, config) self.job_queue: Optional[GraphQueue] = None - self._flattened_nodes: Optional[List[CompileResultNode]] = None + self._flattened_nodes: Optional[List[ResultNode]] = None self.run_count: int = 0 self.num_nodes: int = 0 @@ -330,7 +329,7 @@ def _handle_result(self, result): if self.manifest is None: raise InternalException("manifest was None in _handle_result") - if isinstance(node, ParsedSourceDefinition): + if isinstance(node, SourceDefinition): self.manifest.update_source(node) else: self.manifest.update_node(node) diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index a2f64a80315..e48dc94e4e4 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -5,15 +5,12 @@ from dbt.events.format import pluralize from dbt.dataclass_schema import dbtClassMixin import threading -from typing import Union from .compile import CompileRunner from .run import RunTask -from dbt.contracts.graph.compiled import ( - CompiledSingularTestNode, - CompiledGenericTestNode, - CompiledTestNode, +from dbt.contracts.graph.nodes import ( + TestNode, ) from dbt.contracts.graph.manifest import Manifest from dbt.contracts.results import TestStatus, PrimitiveDict, RunResult @@ -91,7 +88,7 @@ def before_execute(self): self.print_start_line() def execute_test( - self, test: Union[CompiledSingularTestNode, CompiledGenericTestNode], manifest: Manifest + self, test: TestNode, manifest: Manifest ) -> TestResultData: context = generate_runtime_model_context(test, self.config, manifest) @@ -139,7 +136,7 @@ def execute_test( TestResultData.validate(test_result_dct) return TestResultData.from_dict(test_result_dct) - def execute(self, test: CompiledTestNode, manifest: Manifest): + def execute(self, test: TestNode, manifest: Manifest): result = self.execute_test(test, manifest) severity = test.config.severity.upper() diff --git a/test/unit/test_compiler.py b/test/unit/test_compiler.py index 919f897c549..649a5918f91 100644 --- a/test/unit/test_compiler.py +++ b/test/unit/test_compiler.py @@ -6,8 +6,7 @@ from dbt.adapters.postgres import Plugin from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import NodeConfig, DependsOn, ParsedModelNode -from dbt.contracts.graph.compiled import CompiledModelNode, InjectedCTE +from dbt.contracts.graph.nodes import NodeConfig, DependsOn, ModelNode, InjectedCTE from dbt.node_types import NodeType from datetime import datetime @@ -86,7 +85,7 @@ def test__prepend_ctes__already_has_cte(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -102,7 +101,7 @@ def test__prepend_ctes__already_has_cte(self): raw_code='with cte as (select * from something_else) select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -148,7 +147,7 @@ def test__prepend_ctes__no_ctes(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -165,7 +164,7 @@ def test__prepend_ctes__no_ctes(self): 'select * from source_table'), checksum=FileHash.from_contents(''), ), - 'model.root.view_no_cte': ParsedModelNode( + 'model.root.view_no_cte': ModelNode( name='view_no_cte', database='dbt', schema='analytics', @@ -224,7 +223,7 @@ def test__prepend_ctes(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -240,7 +239,7 @@ def test__prepend_ctes(self): raw_code='select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -284,7 +283,7 @@ def test__prepend_ctes(self): def test__prepend_ctes__cte_not_compiled(self): ephemeral_config = self.model_config.replace(materialized='ephemeral') - parsed_ephemeral = ParsedModelNode( + parsed_ephemeral = ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -304,7 +303,7 @@ def test__prepend_ctes__cte_not_compiled(self): raw_code='select * from source_table', checksum=FileHash.from_contents(''), ) - compiled_ephemeral = CompiledModelNode( + compiled_ephemeral = ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -331,7 +330,7 @@ def test__prepend_ctes__cte_not_compiled(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': CompiledModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -400,7 +399,7 @@ def test__prepend_ctes__multiple_levels(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -417,7 +416,7 @@ def test__prepend_ctes__multiple_levels(self): checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -433,7 +432,7 @@ def test__prepend_ctes__multiple_levels(self): raw_code='select * from {{ref("ephemeral_level_two")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral_level_two': ParsedModelNode( + 'model.root.ephemeral_level_two': ModelNode( name='ephemeral_level_two', database='dbt', schema='analytics', @@ -488,7 +487,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -504,7 +503,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): raw_code='select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.inner_ephemeral': ParsedModelNode( + 'model.root.inner_ephemeral': ModelNode( name='inner_ephemeral', database='dbt', schema='analytics', @@ -520,7 +519,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): raw_code='select * from source_table', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', diff --git a/test/unit/test_context.py b/test/unit/test_context.py index c25729f0afb..a567e032f55 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -10,11 +10,11 @@ from dbt.adapters import factory from dbt.adapters.base import AdapterConfig from dbt.clients.jinja import MacroStack -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, NodeConfig, DependsOn, - ParsedMacro, + Macro, ) from dbt.config.project import VarProvider from dbt.context import base, target, configured, providers, docs, manifest, macros @@ -33,7 +33,7 @@ class TestVar(unittest.TestCase): def setUp(self): - self.model = ParsedModelNode( + self.model = ModelNode( alias="model_one", name="model_one", database="dbt", @@ -273,7 +273,7 @@ def assert_has_keys(required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str def model(): - return ParsedModelNode( + return ModelNode( alias="model_one", name="model_one", database="dbt", @@ -315,7 +315,7 @@ def test_base_context(): def mock_macro(name, package_name): macro = mock.MagicMock( - __class__=ParsedMacro, + __class__=Macro, package_name=package_name, resource_type="macro", unique_id=f"macro.{package_name}.{name}", @@ -335,7 +335,7 @@ def mock_manifest(config): def mock_model(): return mock.MagicMock( - __class__=ParsedModelNode, + __class__=ModelNode, alias="model_one", name="model_one", database="dbt", diff --git a/test/unit/test_contracts_graph_compiled.py b/test/unit/test_contracts_graph_compiled.py index 982673514ab..fe1e25d7925 100644 --- a/test/unit/test_contracts_graph_compiled.py +++ b/test/unit/test_contracts_graph_compiled.py @@ -2,10 +2,10 @@ import pytest from dbt.contracts.files import FileHash -from dbt.contracts.graph.compiled import ( - CompiledModelNode, InjectedCTE, CompiledGenericTestNode +from dbt.contracts.graph.nodes import ( + ModelNode, InjectedCTE, GenericTestNode ) -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( DependsOn, NodeConfig, TestConfig, TestMetadata, ColumnInfo ) from dbt.node_types import NodeType @@ -22,7 +22,7 @@ @pytest.fixture def basic_uncompiled_model(): - return CompiledModelNode( + return ModelNode( package_name='test', path='/root/models/foo.sql', original_file_path='models/foo.sql', @@ -54,7 +54,7 @@ def basic_uncompiled_model(): @pytest.fixture def basic_compiled_model(): - return CompiledModelNode( + return ModelNode( package_name='test', path='/root/models/foo.sql', original_file_path='models/foo.sql', @@ -210,19 +210,19 @@ def basic_compiled_dict(): def test_basic_uncompiled_model(minimal_uncompiled_dict, basic_uncompiled_dict, basic_uncompiled_model): node_dict = basic_uncompiled_dict node = basic_uncompiled_model - assert_symmetric(node, node_dict, CompiledModelNode) + assert_symmetric(node, node_dict, ModelNode) assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False - assert_from_dict(node, minimal_uncompiled_dict, CompiledModelNode) + assert_from_dict(node, minimal_uncompiled_dict, ModelNode) pickle.loads(pickle.dumps(node)) def test_basic_compiled_model(basic_compiled_dict, basic_compiled_model): node_dict = basic_compiled_dict node = basic_compiled_model - assert_symmetric(node, node_dict, CompiledModelNode) + assert_symmetric(node, node_dict, ModelNode) assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False @@ -231,13 +231,13 @@ def test_basic_compiled_model(basic_compiled_dict, basic_compiled_model): def test_invalid_extra_fields_model(minimal_uncompiled_dict): bad_extra = minimal_uncompiled_dict bad_extra['notvalid'] = 'nope' - assert_fails_validation(bad_extra, CompiledModelNode) + assert_fails_validation(bad_extra, ModelNode) def test_invalid_bad_type_model(minimal_uncompiled_dict): bad_type = minimal_uncompiled_dict bad_type['resource_type'] = str(NodeType.Macro) - assert_fails_validation(bad_type, CompiledModelNode) + assert_fails_validation(bad_type, ModelNode) unchanged_compiled_models = [ @@ -346,7 +346,7 @@ def minimal_schema_test_dict(): @pytest.fixture def basic_uncompiled_schema_test_node(): - return CompiledGenericTestNode( + return GenericTestNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -379,7 +379,7 @@ def basic_uncompiled_schema_test_node(): @pytest.fixture def basic_compiled_schema_test_node(): - return CompiledGenericTestNode( + return GenericTestNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -522,19 +522,19 @@ def test_basic_uncompiled_schema_test(basic_uncompiled_schema_test_node, basic_u node = basic_uncompiled_schema_test_node node_dict = basic_uncompiled_schema_test_dict minimum = minimal_schema_test_dict - assert_symmetric(node, node_dict, CompiledGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False - assert_from_dict(node, minimum, CompiledGenericTestNode) + assert_from_dict(node, minimum, GenericTestNode) def test_basic_compiled_schema_test(basic_compiled_schema_test_node, basic_compiled_schema_test_dict): node = basic_compiled_schema_test_node node_dict = basic_compiled_schema_test_dict - assert_symmetric(node, node_dict, CompiledGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False @@ -543,13 +543,13 @@ def test_basic_compiled_schema_test(basic_compiled_schema_test_node, basic_compi def test_invalid_extra_schema_test_fields(minimal_schema_test_dict): bad_extra = minimal_schema_test_dict bad_extra['extra'] = 'extra value' - assert_fails_validation(bad_extra, CompiledGenericTestNode) + assert_fails_validation(bad_extra, GenericTestNode) def test_invalid_resource_type_schema_test(minimal_schema_test_dict): bad_type = minimal_schema_test_dict bad_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_type, CompiledGenericTestNode) + assert_fails_validation(bad_type, GenericTestNode) unchanged_schema_tests = [ diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index 1114b65c31c..453d544ca1c 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -13,23 +13,23 @@ EmptySnapshotConfig, Hook, ) -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, DependsOn, ColumnInfo, - ParsedGenericTestNode, - ParsedSnapshotNode, + GenericTestNode, + SnapshotNode, IntermediateSnapshotNode, ParsedNodePatch, - ParsedMacro, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, + Macro, + Exposure, + Metric, + SeedNode, Docs, MacroDependsOn, - ParsedSourceDefinition, - ParsedDocumentation, - ParsedHookNode, + SourceDefinition, + Documentation, + HookNode, ExposureOwner, TestMetadata, ) @@ -172,7 +172,7 @@ def base_parsed_model_dict(): @pytest.fixture def basic_parsed_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -279,7 +279,7 @@ def complex_parsed_model_dict(): @pytest.fixture def complex_parsed_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -342,14 +342,14 @@ def test_invalid_bad_tags(base_parsed_model_dict): # bad top-level field bad_tags = base_parsed_model_dict bad_tags['tags'] = 100 - assert_fails_validation(bad_tags, ParsedModelNode) + assert_fails_validation(bad_tags, ModelNode) def test_invalid_bad_materialized(base_parsed_model_dict): # bad nested field bad_materialized = base_parsed_model_dict bad_materialized['config']['materialized'] = None - assert_fails_validation(bad_materialized, ParsedModelNode) + assert_fails_validation(bad_materialized, ModelNode) unchanged_nodes = [ @@ -468,7 +468,7 @@ def basic_parsed_seed_dict(): @pytest.fixture def basic_parsed_seed_object(): - return ParsedSeedNode( + return SeedNode( name='foo', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', @@ -570,7 +570,7 @@ def complex_parsed_seed_dict(): @pytest.fixture def complex_parsed_seed_object(): - return ParsedSeedNode( + return SeedNode( name='foo', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', @@ -608,7 +608,7 @@ def test_seed_basic(basic_parsed_seed_dict, basic_parsed_seed_object, minimal_pa assert_symmetric(basic_parsed_seed_object, basic_parsed_seed_dict) assert basic_parsed_seed_object.get_materialization() == 'seed' - assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, ParsedSeedNode) + assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, SeedNode) def test_seed_complex(complex_parsed_seed_dict, complex_parsed_seed_object): @@ -719,7 +719,7 @@ def basic_parsed_model_patch_object(): @pytest.fixture def patched_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -824,7 +824,7 @@ def base_parsed_hook_dict(): @pytest.fixture def base_parsed_hook_object(): - return ParsedHookNode( + return HookNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -911,7 +911,7 @@ def complex_parsed_hook_dict(): @pytest.fixture def complex_parsed_hook_object(): - return ParsedHookNode( + return HookNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -952,11 +952,11 @@ def test_basic_parsed_hook(minimal_parsed_hook_dict, base_parsed_hook_dict, base node_dict = base_parsed_hook_dict minimum = minimal_parsed_hook_dict - assert_symmetric(node, node_dict, ParsedHookNode) + assert_symmetric(node, node_dict, HookNode) assert node.empty is False assert node.is_refable is False assert node.get_materialization() == 'view' - assert_from_dict(node, minimum, ParsedHookNode) + assert_from_dict(node, minimum, HookNode) pickle.loads(pickle.dumps(node)) @@ -973,7 +973,7 @@ def test_complex_parsed_hook(complex_parsed_hook_dict, complex_parsed_hook_objec def test_invalid_hook_index_type(base_parsed_hook_dict): bad_index = base_parsed_hook_dict bad_index['index'] = 'a string!?' - assert_fails_validation(bad_index, ParsedHookNode) + assert_fails_validation(bad_index, HookNode) @pytest.fixture @@ -1051,7 +1051,7 @@ def basic_parsed_schema_test_dict(): @pytest.fixture def basic_parsed_schema_test_object(): - return ParsedGenericTestNode( + return GenericTestNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -1143,7 +1143,7 @@ def complex_parsed_schema_test_object(): severity='WARN' ) cfg._extra.update({'extra_key': 'extra value'}) - return ParsedGenericTestNode( + return GenericTestNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -1180,20 +1180,20 @@ def test_basic_schema_test_node(minimal_parsed_schema_test_dict, basic_parsed_sc node = basic_parsed_schema_test_object node_dict = basic_parsed_schema_test_dict minimum = minimal_parsed_schema_test_dict - assert_symmetric(node, node_dict, ParsedGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_ephemeral is False assert node.is_refable is False assert node.get_materialization() == 'test' - assert_from_dict(node, minimum, ParsedGenericTestNode) + assert_from_dict(node, minimum, GenericTestNode) pickle.loads(pickle.dumps(node)) def test_complex_schema_test_node(complex_parsed_schema_test_dict, complex_parsed_schema_test_object): # this tests for the presence of _extra keys - node = complex_parsed_schema_test_object # ParsedGenericTestNode + node = complex_parsed_schema_test_object # GenericTestNode assert(node.config._extra['extra_key']) node_dict = complex_parsed_schema_test_dict assert_symmetric(node, node_dict) @@ -1204,13 +1204,13 @@ def test_invalid_column_name_type(complex_parsed_schema_test_dict): # bad top-level field bad_column_name = complex_parsed_schema_test_dict bad_column_name['column_name'] = {} - assert_fails_validation(bad_column_name, ParsedGenericTestNode) + assert_fails_validation(bad_column_name, GenericTestNode) def test_invalid_severity(complex_parsed_schema_test_dict): invalid_config_value = complex_parsed_schema_test_dict invalid_config_value['config']['severity'] = 'WERROR' - assert_fails_validation(invalid_config_value, ParsedGenericTestNode) + assert_fails_validation(invalid_config_value, GenericTestNode) @pytest.fixture @@ -1494,7 +1494,7 @@ def basic_timestamp_snapshot_dict(): @pytest.fixture def basic_timestamp_snapshot_object(): - return ParsedSnapshotNode( + return SnapshotNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -1634,7 +1634,7 @@ def basic_check_snapshot_dict(): @pytest.fixture def basic_check_snapshot_object(): - return ParsedSnapshotNode( + return SnapshotNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -1719,10 +1719,10 @@ def test_timestamp_snapshot_ok(basic_timestamp_snapshot_dict, basic_timestamp_sn node = basic_timestamp_snapshot_object inter = basic_intermediate_timestamp_snapshot_object - assert_symmetric(node, node_dict, ParsedSnapshotNode) -# node_from_dict = ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) + assert_symmetric(node, node_dict, SnapshotNode) +# node_from_dict = SnapshotNode.from_dict(inter.to_dict(omit_none=True)) # node_from_dict.created_at = 1 - assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node + assert SnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node assert node.is_refable is True assert node.is_ephemeral is False pickle.loads(pickle.dumps(node)) @@ -1733,8 +1733,8 @@ def test_check_snapshot_ok(basic_check_snapshot_dict, basic_check_snapshot_objec node = basic_check_snapshot_object inter = basic_intermediate_check_snapshot_object - assert_symmetric(node, node_dict, ParsedSnapshotNode) - assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node + assert_symmetric(node, node_dict, SnapshotNode) + assert SnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node assert node.is_refable is True assert node.is_ephemeral is False pickle.loads(pickle.dumps(node)) @@ -1743,7 +1743,7 @@ def test_check_snapshot_ok(basic_check_snapshot_dict, basic_check_snapshot_objec def test_invalid_snapshot_bad_resource_type(basic_timestamp_snapshot_dict): bad_resource_type = basic_timestamp_snapshot_dict bad_resource_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_resource_type, ParsedSnapshotNode) + assert_fails_validation(bad_resource_type, SnapshotNode) def test_basic_parsed_node_patch(basic_parsed_model_patch_object, basic_parsed_model_patch_dict): @@ -1792,7 +1792,7 @@ def test_populated_parsed_node_patch(populated_parsed_node_patch_dict, populated class TestParsedMacro(ContractTestCase): - ContractType = ParsedMacro + ContractType = Macro def _ok_dict(self): return { @@ -1843,7 +1843,7 @@ def test_invalid_extra_field(self): class TestParsedDocumentation(ContractTestCase): - ContractType = ParsedDocumentation + ContractType = Documentation def _ok_dict(self): return { @@ -1931,7 +1931,7 @@ def basic_parsed_source_definition_dict(): @pytest.fixture def basic_parsed_source_definition_object(): - return ParsedSourceDefinition( + return SourceDefinition( columns={}, database='some_db', description='', @@ -1990,7 +1990,7 @@ def complex_parsed_source_definition_dict(): @pytest.fixture def complex_parsed_source_definition_object(): - return ParsedSourceDefinition( + return SourceDefinition( columns={}, database='some_db', description='', @@ -2019,32 +2019,32 @@ def test_basic_source_definition(minimum_parsed_source_definition_dict, basic_pa node_dict = basic_parsed_source_definition_dict minimum = minimum_parsed_source_definition_dict - assert_symmetric(node, node_dict, ParsedSourceDefinition) + assert_symmetric(node, node_dict, SourceDefinition) assert node.is_ephemeral is False assert node.is_refable is False assert node.has_freshness is False - assert_from_dict(node, minimum, ParsedSourceDefinition) + assert_from_dict(node, minimum, SourceDefinition) pickle.loads(pickle.dumps(node)) def test_invalid_missing(minimum_parsed_source_definition_dict): bad_missing_name = minimum_parsed_source_definition_dict del bad_missing_name['name'] - assert_fails_validation(bad_missing_name, ParsedSourceDefinition) + assert_fails_validation(bad_missing_name, SourceDefinition) def test_invalid_bad_resource_type(minimum_parsed_source_definition_dict): bad_resource_type = minimum_parsed_source_definition_dict bad_resource_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_resource_type, ParsedSourceDefinition) + assert_fails_validation(bad_resource_type, SourceDefinition) def test_complex_source_definition(complex_parsed_source_definition_dict, complex_parsed_source_definition_object): node = complex_parsed_source_definition_object node_dict = complex_parsed_source_definition_dict - assert_symmetric(node, node_dict, ParsedSourceDefinition) + assert_symmetric(node, node_dict, SourceDefinition) assert node.is_ephemeral is False assert node.is_refable is False @@ -2150,7 +2150,7 @@ def basic_parsed_exposure_dict(): @pytest.fixture def basic_parsed_exposure_object(): - return ParsedExposure( + return Exposure( name='my_exposure', type=ExposureType.Notebook, fqn=['test', 'exposures', 'my_exposure'], @@ -2207,7 +2207,7 @@ def complex_parsed_exposure_dict(): @pytest.fixture def complex_parsed_exposure_object(): - return ParsedExposure( + return Exposure( name='my_exposure', type=ExposureType.Analysis, owner=ExposureOwner(email='test@example.com', name='A Name'), @@ -2228,13 +2228,13 @@ def complex_parsed_exposure_object(): def test_basic_parsed_exposure(minimal_parsed_exposure_dict, basic_parsed_exposure_dict, basic_parsed_exposure_object): - assert_symmetric(basic_parsed_exposure_object, basic_parsed_exposure_dict, ParsedExposure) - assert_from_dict(basic_parsed_exposure_object, minimal_parsed_exposure_dict, ParsedExposure) + assert_symmetric(basic_parsed_exposure_object, basic_parsed_exposure_dict, Exposure) + assert_from_dict(basic_parsed_exposure_object, minimal_parsed_exposure_dict, Exposure) pickle.loads(pickle.dumps(basic_parsed_exposure_object)) def test_complex_parsed_exposure(complex_parsed_exposure_dict, complex_parsed_exposure_object): - assert_symmetric(complex_parsed_exposure_object, complex_parsed_exposure_dict, ParsedExposure) + assert_symmetric(complex_parsed_exposure_object, complex_parsed_exposure_dict, Exposure) unchanged_parsed_exposures = [ @@ -2325,7 +2325,7 @@ def basic_parsed_metric_dict(): @pytest.fixture def basic_parsed_metric_object(): - return ParsedMetric( + return Metric( name='my_metric', calculation_method='count', fqn=['test', 'metrics', 'my_metric'], diff --git a/test/unit/test_docs_blocks.py b/test/unit/test_docs_blocks.py index 8b87463313a..89821abfe12 100644 --- a/test/unit/test_docs_blocks.py +++ b/test/unit/test_docs_blocks.py @@ -3,7 +3,7 @@ from dbt.contracts.files import SourceFile, FileHash, FilePath from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedDocumentation +from dbt.contracts.graph.nodes import Documentation from dbt.node_types import NodeType from dbt.parser import docs from dbt.parser.search import FileBlock @@ -155,7 +155,7 @@ def test_load_file(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) self.assertEqual(result.resource_type, NodeType.Documentation) @@ -179,7 +179,7 @@ def test_load_file_extras(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(docs_values[0].name, 'snowplow_sessions') self.assertEqual(docs_values[1].name, 'snowplow_sessions__session_id') @@ -196,7 +196,7 @@ def test_multiple_raw_blocks(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) self.assertEqual(result.resource_type, NodeType.Documentation) diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index 87343ca3756..5d99182d62a 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -6,18 +6,18 @@ from pathlib import Path from dbt.contracts.files import FileHash -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( DependsOn, MacroDependsOn, NodeConfig, - ParsedMacro, - ParsedModelNode, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, - ParsedSingularTestNode, - ParsedGenericTestNode, - ParsedSourceDefinition, + Macro, + ModelNode, + Exposure, + Metric, + SeedNode, + SingularTestNode, + GenericTestNode, + SourceDefinition, TestConfig, TestMetadata, ColumnInfo, @@ -42,7 +42,7 @@ MetricSelectorMethod, ) import dbt.exceptions -import dbt.contracts.graph.parsed +import dbt.contracts.graph.nodes from .utils import replace_config @@ -77,7 +77,7 @@ def make_model(pkg, name, sql, refs=None, sources=None, tags=None, path=None, al source_values.append([src.source_name, src.name]) depends_on_nodes.append(src.unique_id) - return ParsedModelNode( + return ModelNode( language='sql', raw_code=sql, database='dbt', @@ -117,7 +117,7 @@ def make_seed(pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extr checksum = FileHash.from_contents('') fqn = [pkg] + fqn_extras + [name] - return ParsedSeedNode( + return SeedNode( language='sql', raw_code='', database='dbt', @@ -148,7 +148,7 @@ def make_source(pkg, source_name, table_name, path=None, loader=None, identifier fqn = [pkg] + fqn_extras + [source_name, table_name] - return ParsedSourceDefinition( + return SourceDefinition( fqn=fqn, database='dbt', schema='dbt_schema', @@ -174,7 +174,7 @@ def make_macro(pkg, name, macro_sql, path=None, depends_on_macros=None): if depends_on_macros is None: depends_on_macros = [] - return ParsedMacro( + return Macro( name=name, macro_sql=macro_sql, unique_id=f'macro.{pkg}.{name}', @@ -200,7 +200,7 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No ref_values = [] source_values = [] # this doesn't really have to be correct - if isinstance(test_model, ParsedSourceDefinition): + if isinstance(test_model, SourceDefinition): kwargs['model'] = "{{ source('" + test_model.source_name + \ "', '" + test_model.name + "') }}" source_values.append([test_model.source_name, test_model.name]) @@ -247,7 +247,7 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No source_values.append([source.source_name, source.name]) depends_on_nodes.append(source.unique_id) - return ParsedGenericTestNode( + return GenericTestNode( language='sql', raw_code=raw_code, test_metadata=TestMetadata( @@ -303,7 +303,7 @@ def make_data_test(pkg, name, sql, refs=None, sources=None, tags=None, path=None source_values.append([src.source_name, src.name]) depends_on_nodes.append(src.unique_id) - return ParsedSingularTestNode( + return SingularTestNode( language='sql', raw_code=sql, database='dbt', @@ -336,7 +336,7 @@ def make_exposure(pkg, name, path=None, fqn_extras=None, owner=None): owner = ExposureOwner(email='test@example.com') fqn = [pkg, 'exposures'] + fqn_extras + [name] - return ParsedExposure( + return Exposure( name=name, type=ExposureType.Notebook, fqn=fqn, @@ -352,7 +352,7 @@ def make_metric(pkg, name, path=None): if path is None: path = 'schema.yml' - return ParsedMetric( + return Metric( name=name, path='schema.yml', package_name=pkg, @@ -970,14 +970,14 @@ def test_select_state_changed_seed_checksum_path_to_path(manifest, previous_stat change_node(manifest, seed.replace(checksum=FileHash( name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'modified') warn_or_error_patch.assert_called_once() event = warn_or_error_patch.call_args[0][0] assert event.info.name == 'SeedExceedsLimitSamePath' msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() @@ -986,7 +986,7 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state change_node(manifest, seed.replace(checksum=FileHash( name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert search_manifest_using_method( manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_called_once() @@ -994,7 +994,7 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state assert event.info.name == 'SeedIncreased' msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() @@ -1003,11 +1003,11 @@ def test_select_state_changed_seed_checksum_path_to_sha(manifest, previous_state change_node(previous_state.manifest, seed.replace( checksum=FileHash(name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert search_manifest_using_method( manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_not_called() - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() diff --git a/test/unit/test_macro_resolver.py b/test/unit/test_macro_resolver.py index 17e1aca6dca..3e0b7622bce 100644 --- a/test/unit/test_macro_resolver.py +++ b/test/unit/test_macro_resolver.py @@ -1,15 +1,15 @@ import unittest from unittest import mock -from dbt.contracts.graph.parsed import ( - ParsedMacro +from dbt.contracts.graph.nodes import ( + Macro ) from dbt.context.macro_resolver import MacroResolver def mock_macro(name, package_name): macro = mock.MagicMock( - __class__=ParsedMacro, + __class__=Macro, package_name=package_name, resource_type='macro', unique_id=f'macro.{package_name}.{name}', diff --git a/test/unit/test_manifest.py b/test/unit/test_manifest.py index c701aef5b32..8bec74787cd 100644 --- a/test/unit/test_manifest.py +++ b/test/unit/test_manifest.py @@ -15,14 +15,14 @@ from dbt.adapters.base.plugin import AdapterPlugin from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import Manifest, ManifestMetadata -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, DependsOn, NodeConfig, - ParsedSeedNode, - ParsedSourceDefinition, - ParsedExposure, - ParsedMetric + SeedNode, + SourceDefinition, + Exposure, + Metric ) from dbt.contracts.graph.unparsed import ( @@ -33,7 +33,6 @@ MetricTime ) -from dbt.contracts.graph.compiled import CompiledModelNode from dbt.events.functions import reset_metadata_vars from dbt.node_types import NodeType @@ -81,7 +80,7 @@ def setUp(self): }) self.exposures = { - 'exposure.root.my_exposure': ParsedExposure( + 'exposure.root.my_exposure': Exposure( name='my_exposure', type=ExposureType.Dashboard, owner=ExposureOwner(email='some@email.com'), @@ -101,7 +100,7 @@ def setUp(self): } self.metrics = { - 'metric.root.my_metric': ParsedMetric( + 'metric.root.my_metric': Metric( name='new_customers', label='New Customers', model='ref("multi")', @@ -133,7 +132,7 @@ def setUp(self): } self.nested_nodes = { - 'model.snowplow.events': ParsedModelNode( + 'model.snowplow.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -155,7 +154,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.events': ParsedModelNode( + 'model.root.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -177,7 +176,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.dep': ParsedModelNode( + 'model.root.dep': ModelNode( name='dep', database='dbt', schema='analytics', @@ -199,7 +198,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.nested': ParsedModelNode( + 'model.root.nested': ModelNode( name='nested', database='dbt', schema='analytics', @@ -221,7 +220,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.sibling': ParsedModelNode( + 'model.root.sibling': ModelNode( name='sibling', database='dbt', schema='analytics', @@ -243,7 +242,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.multi': ParsedModelNode( + 'model.root.multi': ModelNode( name='multi', database='dbt', schema='analytics', @@ -268,7 +267,7 @@ def setUp(self): } self.sources = { - 'source.root.my_source.my_table': ParsedSourceDefinition( + 'source.root.my_source.my_table': SourceDefinition( database='raw', schema='analytics', resource_type=NodeType.Source, @@ -493,7 +492,7 @@ def test_get_resource_fqns_empty(self): def test_get_resource_fqns(self): nodes = copy.copy(self.nested_nodes) - nodes['seed.root.seed'] = ParsedSeedNode( + nodes['seed.root.seed'] = SeedNode( name='seed', database='dbt', schema='analytics', @@ -542,7 +541,7 @@ def test_get_resource_fqns(self): self.assertEqual(resource_fqns, expect) def test__deepcopy_copies_flat_graph(self): - test_node = ParsedModelNode( + test_node = ModelNode( name='events', database='dbt', schema='analytics', @@ -586,7 +585,7 @@ def setUp(self): }) self.nested_nodes = { - 'model.snowplow.events': CompiledModelNode( + 'model.snowplow.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -612,7 +611,7 @@ def setUp(self): extra_ctes=[], checksum=FileHash.empty(), ), - 'model.root.events': CompiledModelNode( + 'model.root.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -638,7 +637,7 @@ def setUp(self): extra_ctes=[], checksum=FileHash.empty(), ), - 'model.root.dep': ParsedModelNode( + 'model.root.dep': ModelNode( name='dep', database='dbt', schema='analytics', @@ -659,7 +658,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.nested': ParsedModelNode( + 'model.root.nested': ModelNode( name='nested', database='dbt', schema='analytics', @@ -680,7 +679,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.sibling': ParsedModelNode( + 'model.root.sibling': ModelNode( name='sibling', database='dbt', schema='analytics', @@ -701,7 +700,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.multi': ParsedModelNode( + 'model.root.multi': ModelNode( name='multi', database='dbt', schema='analytics', diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 1ae9e3917ed..45e165b9abf 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -28,10 +28,10 @@ from dbt.contracts.graph.model_config import ( NodeConfig, TestConfig, SnapshotConfig ) -from dbt.contracts.graph.parsed import ( - ParsedModelNode, ParsedMacro, ParsedNodePatch, DependsOn, ColumnInfo, - ParsedSingularTestNode, ParsedGenericTestNode, ParsedSnapshotNode, - ParsedAnalysisNode, UnpatchedSourceDefinition +from dbt.contracts.graph.nodes import ( + ModelNode, Macro, DependsOn, ColumnInfo, + SingularTestNode, GenericTestNode, SnapshotNode, + AnalysisNode, UnpatchedSourceDefinition ) from dbt.contracts.graph.unparsed import Docs from dbt.parser.models import ( @@ -60,7 +60,7 @@ def _generate_macros(self): name_sql[name] = sql for name, sql in name_sql.items(): - pm = ParsedMacro( + pm = Macro( name=name, resource_type=NodeType.Macro, unique_id=f'macro.root.{name}', @@ -510,7 +510,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedModelNode( + expected = ModelNode( alias='model_1', name='model_1', database='test', @@ -568,7 +568,7 @@ def model(dbt, session): node = list(self.parser.manifest.nodes.values())[0] # we decided to not detect and auto supply for now since import name doesn't always match library name python_packages = ['sklearn==0.1.0'] - expected = ParsedModelNode( + expected = ModelNode( alias='py_model', name='py_model', database='test', @@ -756,7 +756,7 @@ def file_block_for(self, data, filename): # parser does not run in this case. That test is in integration test suite 072 def test_built_in_macro_override_detection(self): macro_unique_id = 'macro.root.ref' - self.parser.manifest.macros[macro_unique_id] = ParsedMacro( + self.parser.manifest.macros[macro_unique_id] = Macro( name='ref', resource_type=NodeType.Macro, unique_id=macro_unique_id, @@ -768,7 +768,7 @@ def test_built_in_macro_override_detection(self): raw_code = '{{ config(materialized="table") }}select 1 as id' block = self.file_block_for(raw_code, 'nested/model_1.sql') - node = ParsedModelNode( + node = ModelNode( alias='model_1', name='model_1', database='test', @@ -803,7 +803,7 @@ def setUp(self): manifest=self.manifest, root_project=self.root_project_config, ) - self.example_node = ParsedModelNode( + self.example_node = ModelNode( alias='model_1', name='model_1', database='test', @@ -982,7 +982,7 @@ def test_single_block(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedSnapshotNode( + expected = SnapshotNode( alias='foo', name='foo', # the `database` entry is overrridden by the target_database config @@ -1051,7 +1051,7 @@ def test_multi_block(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=2) nodes = sorted(self.parser.manifest.nodes.values(), key=lambda n: n.name) - expect_foo = ParsedSnapshotNode( + expect_foo = SnapshotNode( alias='foo', name='foo', database='dbt', @@ -1088,7 +1088,7 @@ def test_multi_block(self): 'updated_at': 'last_update', }, ) - expect_bar = ParsedSnapshotNode( + expect_bar = SnapshotNode( alias='bar', name='bar', database='dbt', @@ -1151,7 +1151,7 @@ def test_single_block(self): self.parser.parse_file(block) self.assertEqual(len(self.parser.manifest.macros), 1) macro = list(self.parser.manifest.macros.values())[0] - expected = ParsedMacro( + expected = Macro( name='foo', resource_type=NodeType.Macro, unique_id='macro.snowplow.foo', @@ -1173,7 +1173,7 @@ def test_multiple_blocks(self): self.parser.parse_file(block) self.assertEqual(len(self.parser.manifest.macros), 2) macros = sorted(self.parser.manifest.macros.values(), key=lambda m: m.name) - expected_bar = ParsedMacro( + expected_bar = Macro( name='bar', resource_type=NodeType.Macro, unique_id='macro.snowplow.bar', @@ -1182,7 +1182,7 @@ def test_multiple_blocks(self): path=normalize('macros/macro.sql'), macro_sql='{% macro bar(c, d) %}c + d{% endmacro %}', ) - expected_foo = ParsedMacro( + expected_foo = Macro( name='foo', resource_type=NodeType.Macro, unique_id='macro.snowplow.foo', @@ -1220,7 +1220,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedSingularTestNode( + expected = SingularTestNode( alias='test_1', name='test_1', database='test', @@ -1263,7 +1263,7 @@ def test_basic(self): self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) node = list(self.parser.manifest.macros.values())[0] - expected = ParsedMacro( + expected = Macro( name='test_not_null', resource_type=NodeType.Macro, unique_id='macro.snowplow.test_not_null', @@ -1297,7 +1297,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedAnalysisNode( + expected = AnalysisNode( alias='analysis_1', name='analysis_1', database='test', diff --git a/test/unit/test_partial_parsing.py b/test/unit/test_partial_parsing.py index a784532fcf4..34e85b0cef0 100644 --- a/test/unit/test_partial_parsing.py +++ b/test/unit/test_partial_parsing.py @@ -5,7 +5,7 @@ import dbt.exceptions from dbt.parser.partial import PartialParsing from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedModelNode +from dbt.contracts.graph.nodes import ModelNode from dbt.contracts.files import ParseFileType, SourceFile, SchemaSourceFile, FilePath, FileHash from dbt.node_types import NodeType from .utils import normalize @@ -88,7 +88,7 @@ def setUp(self): self.partial_parsing = PartialParsing(self.saved_manifest, self.new_files) def get_model(self, name): - return ParsedModelNode( + return ModelNode( package_name='my_test', path=f'{name}.sql', original_file_path=f'models/{name}.sql', @@ -106,7 +106,7 @@ def get_model(self, name): ) def get_python_model(self, name): - return ParsedModelNode( + return ModelNode( package_name='my_test', path=f'{name}.py', original_file_path=f'models/{name}.py', diff --git a/test/unit/utils.py b/test/unit/utils.py index 5df2ef6ac8c..046ac24ff41 100644 --- a/test/unit/utils.py +++ b/test/unit/utils.py @@ -227,7 +227,7 @@ def assert_fails_validation(dct, cls): def generate_name_macros(package): - from dbt.contracts.graph.parsed import ParsedMacro + from dbt.contracts.graph.nodes import Macro from dbt.node_types import NodeType name_sql = {} for component in ('database', 'schema', 'alias'): @@ -240,7 +240,7 @@ def generate_name_macros(package): name_sql[name] = sql for name, sql in name_sql.items(): - pm = ParsedMacro( + pm = Macro( name=name, resource_type=NodeType.Macro, unique_id=f'macro.{package}.{name}', @@ -275,7 +275,7 @@ def _make_table_of(self, rows, column_types): def MockMacro(package, name='my_macro', **kwargs): - from dbt.contracts.graph.parsed import ParsedMacro + from dbt.contracts.graph.nodes import Macro from dbt.node_types import NodeType mock_kwargs = dict( @@ -288,7 +288,7 @@ def MockMacro(package, name='my_macro', **kwargs): mock_kwargs.update(kwargs) macro = mock.MagicMock( - spec=ParsedMacro, + spec=Macro, **mock_kwargs ) macro.name = name @@ -309,9 +309,9 @@ def MockGenerateMacro(package, component='some_component', **kwargs): def MockSource(package, source_name, name, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedSourceDefinition + from dbt.contracts.graph.nodes import SourceDefinition src = mock.MagicMock( - __class__=ParsedSourceDefinition, + __class__=SourceDefinition, resource_type=NodeType.Source, source_name=source_name, package_name=package, @@ -325,13 +325,13 @@ def MockSource(package, source_name, name, **kwargs): def MockNode(package, name, resource_type=None, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedModelNode, ParsedSeedNode + from dbt.contracts.graph.nodes import ModelNode, SeedNode if resource_type is None: resource_type = NodeType.Model if resource_type == NodeType.Model: - cls = ParsedModelNode + cls = ModelNode elif resource_type == NodeType.Seed: - cls = ParsedSeedNode + cls = SeedNode else: raise ValueError(f'I do not know how to handle {resource_type}') node = mock.MagicMock( @@ -348,9 +348,9 @@ def MockNode(package, name, resource_type=None, **kwargs): def MockDocumentation(package, name, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedDocumentation + from dbt.contracts.graph.nodes import Documentation doc = mock.MagicMock( - __class__=ParsedDocumentation, + __class__=Documentation, resource_type=NodeType.Documentation, package_name=package, search_name=name, diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 482e6f8672c..2656c84e249 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -94,7 +94,9 @@ def get_rendered_snapshot_config(**updates): "strategy": "check", "check_cols": "all", "unique_key": "id", + "target_database": None, "target_schema": None, + "updated_at": None, "meta": {}, "grants": {}, "packages": [], diff --git a/tests/functional/exit_codes/test_exit_codes.py b/tests/functional/exit_codes/test_exit_codes.py index dbef6361713..955953a0dc0 100644 --- a/tests/functional/exit_codes/test_exit_codes.py +++ b/tests/functional/exit_codes/test_exit_codes.py @@ -49,6 +49,7 @@ def test_compile(self, project): assert len(results) == 7 def test_snapshot_pass(self, project): + run_dbt(["run", "--model", "good"]) results = run_dbt(['snapshot']) assert len(results) == 1 check_table_does_exist(project.adapter, 'good_snapshot') diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 711328f32f3..3dbff04c303 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -19,7 +19,7 @@ import dbt.flags as flags import inspect import json -from dbt.contracts.graph.parsed import ParsedModelNode, NodeConfig, DependsOn +from dbt.contracts.graph.nodes import ModelNode, NodeConfig, DependsOn from dbt.contracts.files import FileHash from mashumaro.types import SerializableType from typing import Generic, TypeVar, Dict @@ -103,7 +103,7 @@ def test_event_codes(self): def MockNode(): - return ParsedModelNode( + return ModelNode( alias="model_one", name="model_one", database="dbt", From 9c91f3a7bd40f4f071a5349cf1ce63c4b591c927 Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Wed, 7 Dec 2022 22:47:51 +0100 Subject: [PATCH 056/156] Adjust tox passenv to be multiline (#6405) --- tox.ini | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 41cfc795ca2..d49572a0a04 100644 --- a/tox.ini +++ b/tox.ini @@ -6,7 +6,9 @@ envlist = unit,integration description = unit testing download = true skip_install = true -passenv = DBT_* PYTEST_ADDOPTS +passenv = + DBT_* + PYTEST_ADDOPTS commands = {envpython} -m pytest --cov=core {posargs} test/unit {envpython} -m pytest --cov=core {posargs} tests/unit @@ -18,7 +20,10 @@ deps = description = functional testing download = true skip_install = true -passenv = DBT_* POSTGRES_TEST_* PYTEST_ADDOPTS +passenv = + DBT_* + POSTGRES_TEST_* + PYTEST_ADDOPTS commands = {envpython} -m pytest --cov=core -m profile_postgres {posargs} test/integration {envpython} -m pytest --cov=core {posargs} tests/functional From 99f27de9340002f43337eab08625a76869ed074d Mon Sep 17 00:00:00 2001 From: timle Date: Wed, 7 Dec 2022 16:48:17 -0500 Subject: [PATCH 057/156] Feature/dbt deps tarball (#4689) * v0 - new dbt deps type: tarball url in support of https://github.com/dbt-labs/dbt-core/issues/4205 * flake8 fixes * adding max size tarball condition * clean up imports * typing * adding sha1 and subdirectory options; improve logging feedback sha1: allow user to specify sha1 in packages.yaml, will only install if package matches subdirectory: allow user to specify subdirectory of package in tarfile, if the package is a non standard structure (like with git subdirectory option) * simple tests added * flake fixes * changes to support tests; adding exceptions; fire_event logging * new logging events * tarball exceptions added * build out tests * removing in memory tarball test * update type codes to M - Misc * adding new events to test_events * fix spacing for flake * add retry download code - as used in registry calls * clean * remove saving tar in memory inside tarfile object will hit url multiple times instead * remove duplicative code after refactor * black updates * black formatting * black formatting * refactor - no more in-memory tarfile - all as file operations now - remove tarfile passing, always use tempfile instead - reorganize system.* functions, removing duplicative code - more notes on current flow and structure - esp need for pattern of 1) unpack 2) scan for package dir 3) copy to destination. - cleaning * cleaning and sync to new tarball code * cleaning and sync to new tarball code * requested changes from PR https://github.com/dbt-labs/dbt-core/pull/4689#discussion_r812970847 * reversions from revision 2 removing sha1 check to simplify/mirror hub install pattern * simplify/mirror hub install pattern to simplify/mirror hub install pattern - removing sha1 check - supply name/version to act as our 'metadata' source * simplify/mirror hub install pattern simplify with goal of mirroring hub install pattern - supporting subfolders like git packages, and sha1 checks are removed - existing code from RegistryPinnedPackage (install() and download_and_untar()) performs the operations - RegistryPinnedPackage install() and download_and_untar() are not currently set up as functions that can be used across classes - this should be moved to dbt.deps.base, or to a dbt.deps.common file - need dbt labs feedback on how to proceed (or leave as is) * remove revisions, no longer doing package check * slim down to basic tests more complex features have been removed (sha1, subfolder) so testing is much simpler! * fix naming to match hubs behavior remove version from package folder name * refactor install and download to upstream PinnedPackage class i'm on the fence if this is right approach, but seems like most sensible after some thought * Create Features-20221107-105018.yaml * fix flake, black, mypy errors * additional flake/black fixes * Update .changes/unreleased/Features-20221107-105018.yaml fix username on changelog Co-authored-by: Emily Rockman * change to fstring Co-authored-by: Emily Rockman * cleaning - remove comment * remove comment/question for dbt team * in support of issuecomment 1334055944 https://github.com/dbt-labs/dbt-core/pull/4689#issuecomment-1334055944 * in support of issuecomment 1334118433 https://github.com/dbt-labs/dbt-core/pull/4689#issuecomment-1334118433 * black fixes; remove debug bits * remove `.format` & add 'tarball' as version 'tarball' as version so that the temp files format nicely: [tempfile_location]/dbt_utils_2..tar.gz # old vs [tempfile_location]/dbt_utils_1.tarball.tar.gz # current * port os.path refs in `PinnedPackage._install` to pathlib * lowercase as per PR feedback * update tests after removing version arg goes along with 8787ba41af585b954d18c24c2dccbf147035cd9f Co-authored-by: Emily Rockman --- .../unreleased/Features-20221107-105018.yaml | 8 ++ core/dbt/contracts/project.py | 8 +- core/dbt/deps/README.md | 6 +- core/dbt/deps/base.py | 31 ++++++++ core/dbt/deps/registry.py | 34 +-------- core/dbt/deps/resolver.py | 6 +- core/dbt/deps/tarball.py | 74 +++++++++++++++++++ core/dbt/task/deps.py | 4 +- test/unit/test_deps.py | 43 ++++++++++- 9 files changed, 178 insertions(+), 36 deletions(-) create mode 100644 .changes/unreleased/Features-20221107-105018.yaml create mode 100644 core/dbt/deps/tarball.py diff --git a/.changes/unreleased/Features-20221107-105018.yaml b/.changes/unreleased/Features-20221107-105018.yaml new file mode 100644 index 00000000000..db6a0ab753a --- /dev/null +++ b/.changes/unreleased/Features-20221107-105018.yaml @@ -0,0 +1,8 @@ +kind: Features +body: Adding tarball install method for packages. Allowing package tarball to be specified + via url in the packages.yaml. +time: 2022-11-07T10:50:18.464545-05:00 +custom: + Author: timle2 + Issue: "4205" + PR: "4689" diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index ea50d654f6c..7cfbcd75389 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -55,6 +55,12 @@ class LocalPackage(Package): RawVersion = Union[str, float] +@dataclass +class TarballPackage(Package): + tarball: str + name: str + + @dataclass class GitPackage(Package): git: str @@ -82,7 +88,7 @@ def get_versions(self) -> List[str]: return [str(self.version)] -PackageSpec = Union[LocalPackage, GitPackage, RegistryPackage] +PackageSpec = Union[LocalPackage, TarballPackage, GitPackage, RegistryPackage] @dataclass diff --git a/core/dbt/deps/README.md b/core/dbt/deps/README.md index a00802cefbf..99c7fd6fb80 100644 --- a/core/dbt/deps/README.md +++ b/core/dbt/deps/README.md @@ -16,6 +16,8 @@ Defines the base classes of `PinnedPackage` and `UnpinnedPackage`. `downloads_directory` sets the directory packages will be downloaded to. +`_install` has retry logic if the download or untarring process hit exceptions (see `dbt.utils._connection_exception_retry`). + ## `git.py` Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined with git urls. @@ -28,8 +30,10 @@ Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined l Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined on the dbt Hub registry. -`install` has retry logic if the download or untarring process hit exceptions (see `dbt.utils._connection_exception_retry`). ## `resolver.py` Resolves the package definition into package objects to download. + +## `tarball.py` +Extends `PinnedPackage` and `UnpinnedPackage` specific to dbt packages defined by a URL to a tarball hosted on an HTTP server. diff --git a/core/dbt/deps/base.py b/core/dbt/deps/base.py index 27567440a52..f72878422aa 100644 --- a/core/dbt/deps/base.py +++ b/core/dbt/deps/base.py @@ -1,13 +1,16 @@ import abc import os +import functools import tempfile from contextlib import contextmanager +from pathlib import Path from typing import List, Optional, Generic, TypeVar from dbt.clients import system from dbt.contracts.project import ProjectPackageMetadata from dbt.events.functions import fire_event from dbt.events.types import DepsSetDownloadDirectory +from dbt.utils import _connection_exception_retry as connection_exception_retry DOWNLOADS_PATH = None @@ -97,6 +100,34 @@ def get_installation_path(self, project, renderer): def get_subdirectory(self): return None + def _install(self, project, renderer): + metadata = self.fetch_metadata(project, renderer) + + tar_name = f"{self.package}.{self.version}.tar.gz" + tar_path = (Path(get_downloads_path()) / tar_name).resolve(strict=False) + system.make_directory(str(tar_path.parent)) + + download_url = metadata.downloads.tarball + deps_path = project.packages_install_path + package_name = self.get_project_name(project, renderer) + + download_untar_fn = functools.partial( + self.download_and_untar, download_url, str(tar_path), deps_path, package_name + ) + connection_exception_retry(download_untar_fn, 5) + + def download_and_untar(self, download_url, tar_path, deps_path, package_name): + """ + Sometimes the download of the files fails and we want to retry. Sometimes the + download appears successful but the file did not make it through as expected + (generally due to a github incident). Either way we want to retry downloading + and untarring to see if we can get a success. Call this within + `_connection_exception_retry` + """ + + system.download(download_url, tar_path) + system.untar_package(tar_path, deps_path, package_name) + SomePinned = TypeVar("SomePinned", bound=PinnedPackage) SomeUnpinned = TypeVar("SomeUnpinned", bound="UnpinnedPackage") diff --git a/core/dbt/deps/registry.py b/core/dbt/deps/registry.py index bd8263e4001..9f163d89758 100644 --- a/core/dbt/deps/registry.py +++ b/core/dbt/deps/registry.py @@ -1,23 +1,20 @@ -import os -import functools from typing import List from dbt import semver from dbt import flags from dbt.version import get_installed_version -from dbt.clients import registry, system +from dbt.clients import registry from dbt.contracts.project import ( RegistryPackageMetadata, RegistryPackage, ) -from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path +from dbt.deps.base import PinnedPackage, UnpinnedPackage from dbt.exceptions import ( package_version_not_found, VersionsNotCompatibleException, DependencyException, package_not_found, ) -from dbt.utils import _connection_exception_retry as connection_exception_retry class RegistryPackageMixin: @@ -60,32 +57,7 @@ def _fetch_metadata(self, project, renderer) -> RegistryPackageMetadata: return RegistryPackageMetadata.from_dict(dct) def install(self, project, renderer): - metadata = self.fetch_metadata(project, renderer) - - tar_name = "{}.{}.tar.gz".format(self.package, self.version) - tar_path = os.path.realpath(os.path.join(get_downloads_path(), tar_name)) - system.make_directory(os.path.dirname(tar_path)) - - download_url = metadata.downloads.tarball - deps_path = project.packages_install_path - package_name = self.get_project_name(project, renderer) - - download_untar_fn = functools.partial( - self.download_and_untar, download_url, tar_path, deps_path, package_name - ) - connection_exception_retry(download_untar_fn, 5) - - def download_and_untar(self, download_url, tar_path, deps_path, package_name): - """ - Sometimes the download of the files fails and we want to retry. Sometimes the - download appears successful but the file did not make it through as expected - (generally due to a github incident). Either way we want to retry downloading - and untarring to see if we can get a success. Call this within - `_connection_exception_retry` - """ - - system.download(download_url, tar_path) - system.untar_package(tar_path, deps_path, package_name) + self._install(project, renderer) class RegistryUnpinnedPackage(RegistryPackageMixin, UnpinnedPackage[RegistryPinnedPackage]): diff --git a/core/dbt/deps/resolver.py b/core/dbt/deps/resolver.py index 4d971c1cd9e..e4c1992894c 100644 --- a/core/dbt/deps/resolver.py +++ b/core/dbt/deps/resolver.py @@ -7,16 +7,18 @@ from dbt.config.renderer import DbtProjectYamlRenderer from dbt.deps.base import BasePackage, PinnedPackage, UnpinnedPackage from dbt.deps.local import LocalUnpinnedPackage +from dbt.deps.tarball import TarballUnpinnedPackage from dbt.deps.git import GitUnpinnedPackage from dbt.deps.registry import RegistryUnpinnedPackage from dbt.contracts.project import ( LocalPackage, + TarballPackage, GitPackage, RegistryPackage, ) -PackageContract = Union[LocalPackage, GitPackage, RegistryPackage] +PackageContract = Union[LocalPackage, TarballPackage, GitPackage, RegistryPackage] @dataclass @@ -69,6 +71,8 @@ def update_from(self, src: List[PackageContract]) -> None: for contract in src: if isinstance(contract, LocalPackage): pkg = LocalUnpinnedPackage.from_contract(contract) + elif isinstance(contract, TarballPackage): + pkg = TarballUnpinnedPackage.from_contract(contract) elif isinstance(contract, GitPackage): pkg = GitUnpinnedPackage.from_contract(contract) elif isinstance(contract, RegistryPackage): diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py new file mode 100644 index 00000000000..16c9cb0a20d --- /dev/null +++ b/core/dbt/deps/tarball.py @@ -0,0 +1,74 @@ +from dbt.contracts.project import RegistryPackageMetadata, TarballPackage +from dbt.deps.base import PinnedPackage, UnpinnedPackage + + +class TarballPackageMixin: + def __init__(self, tarball: str) -> None: + super().__init__() + self.tarball = tarball + + @property + def name(self): + return self.tarball + + def source_type(self) -> str: + return "tarball" + + +class TarballPinnedPackage(TarballPackageMixin, PinnedPackage): + def __init__(self, tarball: str, package: str) -> None: + super().__init__(tarball) + # setup to recycle RegistryPinnedPackage fns + self.package = package + self.version = "tarball" + + @property + def name(self): + return self.package + + def get_version(self): + return self.version + + def nice_version_name(self): + return f"tarball (url: {self.tarball})" + + def _fetch_metadata(self, project, renderer): + """ + recycle RegistryPackageMetadata so that we can use the install and + download_and_untar from RegistryPinnedPackage next. + build RegistryPackageMetadata from info passed via packages.yml since no + 'metadata' service exists in this case. + """ + + dct = { + "name": self.package, + "packages": [], # note: required by RegistryPackageMetadata + "downloads": {"tarball": self.tarball}, + } + + return RegistryPackageMetadata.from_dict(dct) + + def install(self, project, renderer): + self._install(project, renderer) + + +class TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]): + def __init__( + self, + tarball: str, + package: str, + ) -> None: + super().__init__(tarball) + # setup to recycle RegistryPinnedPackage fns + self.package = package + self.version = "tarball" + + @classmethod + def from_contract(cls, contract: TarballPackage) -> "TarballUnpinnedPackage": + return cls(tarball=contract.tarball, package=contract.name) + + def incorporate(self, other: "TarballUnpinnedPackage") -> "TarballUnpinnedPackage": + return TarballUnpinnedPackage(tarball=self.tarball, package=self.package) + + def resolved(self) -> TarballPinnedPackage: + return TarballPinnedPackage(tarball=self.tarball, package=self.package) diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 14ba794cd4d..0052840c570 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -37,10 +37,12 @@ def track_package_install( self, package_name: str, source_type: str, version: Optional[str] ) -> None: # Hub packages do not need to be hashed, as they are public - # Use the string 'local' for local package versions if source_type == "local": package_name = dbt.utils.md5(package_name) version = "local" + elif source_type == "tarball": + package_name = dbt.utils.md5(package_name) + version = "tarball" elif source_type != "hub": package_name = dbt.utils.md5(package_name) version = dbt.utils.md5(version) diff --git a/test/unit/test_deps.py b/test/unit/test_deps.py index 53d863a3206..650722ef6f4 100644 --- a/test/unit/test_deps.py +++ b/test/unit/test_deps.py @@ -7,21 +7,23 @@ import dbt.exceptions from dbt.deps.git import GitUnpinnedPackage from dbt.deps.local import LocalUnpinnedPackage +from dbt.deps.tarball import TarballUnpinnedPackage from dbt.deps.registry import RegistryUnpinnedPackage from dbt.clients.registry import is_compatible_version from dbt.deps.resolver import resolve_packages from dbt.contracts.project import ( LocalPackage, + TarballPackage, GitPackage, RegistryPackage, ) - from dbt.contracts.project import PackageConfig from dbt.semver import VersionSpecifier from dbt.version import get_installed_version from dbt.dataclass_schema import ValidationError + class TestLocalPackage(unittest.TestCase): def test_init(self): a_contract = LocalPackage.from_dict({'local': '/path/to/package'}) @@ -33,6 +35,45 @@ def test_init(self): self.assertEqual(str(a_pinned), '/path/to/package') +class TestTarballPackage(unittest.TestCase): + def test_TarballPackage(self): + from dbt.contracts.project import RegistryPackageMetadata + from mashumaro.exceptions import MissingField + + dict_well_formed_contract = ( + {'tarball': 'http://example.com', + 'name': 'my_cool_package'}) + + a_contract = ( + TarballPackage.from_dict(dict_well_formed_contract)) + + # check contract and resolver + self.assertEqual(a_contract.tarball, 'http://example.com') + self.assertEqual(a_contract.name, 'my_cool_package') + + a = TarballUnpinnedPackage.from_contract(a_contract) + self.assertEqual(a.tarball, 'http://example.com') + self.assertEqual(a.package, 'my_cool_package') + + a_pinned = a.resolved() + self.assertEqual(a_pinned.source_type(), 'tarball') + + # check bad contract (no name) fails + dict_missing_name_should_fail_on_contract = ( + {'tarball': 'http://example.com'}) + + with self.assertRaises(MissingField): + TarballPackage.from_dict(dict_missing_name_should_fail_on_contract) + + # check RegistryPackageMetadata - it is used in TarballUnpinnedPackage + dct = {'name' : a.package, + 'packages': [], # note: required by RegistryPackageMetadata + 'downloads' : {'tarball' : a_pinned.tarball}} + + metastore = RegistryPackageMetadata.from_dict(dct) + self.assertEqual(metastore.downloads.tarball, 'http://example.com') + + class TestGitPackage(unittest.TestCase): def test_init(self): a_contract = GitPackage.from_dict( From bef6edb9420099300511cef994ca470f30583156 Mon Sep 17 00:00:00 2001 From: bruno messias Date: Wed, 7 Dec 2022 20:52:38 -0300 Subject: [PATCH 058/156] Fix dbt.config.get default values (python-model) (#6317) * feat: add a list of default values to the ctx manager * tests: dbt.get.config default values * feat: validate the num of args in config.get * feat: jinja template for dbt.config.get default values * docs: changie yaml * fix:typo on error message Co-authored-by: Chenyu Li Co-authored-by: Chenyu Li --- .../unreleased/Fixes-20221124-163419.yaml | 7 ++++++ .../macros/python_model/python.sql | 5 +++-- core/dbt/parser/models.py | 22 +++++++++++++++++-- test/unit/test_parser.py | 21 ++++++++++++++++-- 4 files changed, 49 insertions(+), 6 deletions(-) create mode 100644 .changes/unreleased/Fixes-20221124-163419.yaml diff --git a/.changes/unreleased/Fixes-20221124-163419.yaml b/.changes/unreleased/Fixes-20221124-163419.yaml new file mode 100644 index 00000000000..010a073269a --- /dev/null +++ b/.changes/unreleased/Fixes-20221124-163419.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: After this, will be possible to use default values for dbt.config.get +time: 2022-11-24T16:34:19.039512764-03:00 +custom: + Author: devmessias + Issue: "6309" + PR: "6317" diff --git a/core/dbt/include/global_project/macros/python_model/python.sql b/core/dbt/include/global_project/macros/python_model/python.sql index 2155662987e..c56ff7f31c8 100644 --- a/core/dbt/include/global_project/macros/python_model/python.sql +++ b/core/dbt/include/global_project/macros/python_model/python.sql @@ -30,12 +30,13 @@ def source(*args, dbt_load_df_function): {% macro build_config_dict(model) %} {%- set config_dict = {} -%} - {%- for key in model.config.config_keys_used -%} + {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %} + {%- for key, default in config_dbt_used -%} {# weird type testing with enum, would be much easier to write this logic in Python! #} {%- if key == 'language' -%} {%- set value = 'python' -%} {%- endif -%} - {%- set value = model.config[key] -%} + {%- set value = model.config.get(key, default) -%} {%- do config_dict.update({key: value}) -%} {%- endfor -%} config_dict = {{ config_dict }} diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index 7dea4aca135..8303e2f9c52 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -211,15 +211,33 @@ def parse_python_model(self, node, config, context): dbtParser = PythonParseVisitor(node) dbtParser.visit(tree) config_keys_used = [] + config_keys_defaults = [] for (func, args, kwargs) in dbtParser.dbt_function_calls: if func == "get": - config_keys_used.append(args[0]) + num_args = len(args) + if num_args == 0: + raise ParsingException( + "dbt.config.get() requires at least one argument", + node=node, + ) + if num_args > 2: + raise ParsingException( + f"dbt.config.get() takes at most 2 arguments ({num_args} given)", + node=node, + ) + key = args[0] + default_value = args[1] if num_args == 2 else None + config_keys_used.append(key) + config_keys_defaults.append(default_value) continue context[func](*args, **kwargs) if config_keys_used: # this is being used in macro build_config_dict - context["config"](config_keys_used=config_keys_used) + context["config"]( + config_keys_used=config_keys_used, + config_keys_defaults=config_keys_defaults, + ) def render_update(self, node: ModelNode, config: ContextConfig) -> None: self.manifest._parsing_info.static_analysis_path_count += 1 diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 45e165b9abf..9a91b59eb26 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -608,6 +608,25 @@ def model(dbt, session): node = list(self.parser.manifest.nodes.values())[0] self.assertEqual(node.config.to_dict()["config_keys_used"], ["param_1", "param_2"]) + def test_python_model_config_default(self): + py_code = """ +def model(dbt, session): + dbt.config.get("param_None", None) + dbt.config.get("param_Str", "default") + dbt.config.get("param_List", [1, 2]) + return df + """ + block = self.file_block_for(py_code, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + default_values = node.config.to_dict()["config_keys_defaults"] + self.assertIsNone(default_values[0]) + self.assertEqual(default_values[1], "default") + self.assertEqual(default_values[2], [1, 2]) + + def test_wrong_python_model_def_miss_session(self): py_code = """ def model(dbt): @@ -1319,5 +1338,3 @@ def test_basic(self): file_id = 'snowplow://' + normalize('analyses/nested/analysis_1.sql') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['analysis.snowplow.analysis_1']) - - From 0544b085439b3a635b8ce56adbf56d8e7c7e6839 Mon Sep 17 00:00:00 2001 From: Ian Knox <81931810+iknox-fa@users.noreply.github.com> Date: Thu, 8 Dec 2022 11:34:03 -0600 Subject: [PATCH 059/156] Add support for Python 3.11 (#6326) * Get running with Python 3.11 * More tests passing, mypy still unhappy * Upgrade to 3.11, and bump mashumaro * patch importlib.import_module last * lambda: Policy() default_factory on include and quote policy * Add changelog entry * Put a lambda on it * Fix text formatting for log file * Handle variant type return from e.log_level() Co-authored-by: Jeremy Cohen Co-authored-by: Josh Taylor Co-authored-by: Michelle Ark --- .../unreleased/Features-20221206-150704.yaml | 7 +++++++ .github/workflows/main.yml | 4 ++-- CONTRIBUTING.md | 4 ++-- Dockerfile.test | 3 +++ core/dbt/adapters/base/relation.py | 18 ++++++++++-------- core/dbt/contracts/project.py | 2 +- core/dbt/events/eventmgr.py | 6 +++++- core/dbt/events/types.py | 2 ++ core/dbt/helper_types.py | 4 ++-- core/setup.py | 1 + plugins/postgres/setup.py | 1 + tests/unit/test_version.py | 8 +++++++- tox.ini | 4 ++-- 13 files changed, 45 insertions(+), 19 deletions(-) create mode 100644 .changes/unreleased/Features-20221206-150704.yaml diff --git a/.changes/unreleased/Features-20221206-150704.yaml b/.changes/unreleased/Features-20221206-150704.yaml new file mode 100644 index 00000000000..47939ea5a79 --- /dev/null +++ b/.changes/unreleased/Features-20221206-150704.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Add support for Python 3.11 +time: 2022-12-06T15:07:04.753127+01:00 +custom: + Author: joshuataylor MichelleArk jtcohen6 + Issue: "6147" + PR: "6326" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 257935419c8..8138b730d34 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -73,7 +73,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] env: TOXENV: "unit" @@ -118,7 +118,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] os: [ubuntu-20.04] include: - python-version: 3.8 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index efbb0a726ad..d06170f9c55 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -56,7 +56,7 @@ There are some tools that will be helpful to you in developing locally. While th These are the tools used in `dbt-core` development and testing: -- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, 3.8, 3.9, and 3.10 +- [`tox`](https://tox.readthedocs.io/en/latest/) to manage virtualenvs across python versions. We currently target the latest patch releases for Python 3.7, 3.8, 3.9, 3.10 and 3.11 - [`pytest`](https://docs.pytest.org/en/latest/) to define, discover, and run tests - [`flake8`](https://flake8.pycqa.org/en/latest/) for code linting - [`black`](https://github.com/psf/black) for code formatting @@ -160,7 +160,7 @@ suites. #### `tox` -[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.7, Python 3.8, Python 3.9, and Python 3.10 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py37`. The configuration for these tests in located in `tox.ini`. +[`tox`](https://tox.readthedocs.io/en/latest/) takes care of managing virtualenvs and install dependencies in order to run tests. You can also run tests in parallel, for example, you can run unit tests for Python 3.7, Python 3.8, Python 3.9, Python 3.10 and Python 3.11 checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py37`. The configuration for these tests in located in `tox.ini`. #### `pytest` diff --git a/Dockerfile.test b/Dockerfile.test index eb6ba824bcb..b5a373270dd 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -49,6 +49,9 @@ RUN apt-get update \ python3.10 \ python3.10-dev \ python3.10-venv \ + python3.11 \ + python3.11-dev \ + python3.11-venv \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py index 55182396ef4..cd69b80a579 100644 --- a/core/dbt/adapters/base/relation.py +++ b/core/dbt/adapters/base/relation.py @@ -1,5 +1,5 @@ from collections.abc import Hashable -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set from dbt.contracts.graph.nodes import SourceDefinition, ParsedNode @@ -26,8 +26,10 @@ class BaseRelation(FakeAPIObject, Hashable): path: Path type: Optional[RelationType] = None quote_character: str = '"' - include_policy: Policy = Policy() - quote_policy: Policy = Policy() + # Python 3.11 requires that these use default_factory instead of simple default + # ValueError: mutable default for field include_policy is not allowed: use default_factory + include_policy: Policy = field(default_factory=lambda: Policy()) + quote_policy: Policy = field(default_factory=lambda: Policy()) dbt_created: bool = False def _is_exactish_match(self, field: ComponentName, value: str) -> bool: @@ -38,9 +40,9 @@ def _is_exactish_match(self, field: ComponentName, value: str) -> bool: @classmethod def _get_field_named(cls, field_name): - for field, _ in cls._get_fields(): - if field.name == field_name: - return field + for f, _ in cls._get_fields(): + if f.name == field_name: + return f # this should be unreachable raise ValueError(f"BaseRelation has no {field_name} field!") @@ -51,11 +53,11 @@ def __eq__(self, other): @classmethod def get_default_quote_policy(cls) -> Policy: - return cls._get_field_named("quote_policy").default + return cls._get_field_named("quote_policy").default_factory() @classmethod def get_default_include_policy(cls) -> Policy: - return cls._get_field_named("include_policy").default + return cls._get_field_named("include_policy").default_factory() def get(self, key, default=None): """Override `.get` to return a metadata object so we don't break diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index 7cfbcd75389..2fd7434bd87 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -222,7 +222,7 @@ class Project(HyphenatedDbtClassMixin, Replaceable): ), ) packages: List[PackageSpec] = field(default_factory=list) - query_comment: Optional[Union[QueryComment, NoValue, str]] = NoValue() + query_comment: Optional[Union[QueryComment, NoValue, str]] = field(default_factory=NoValue) @classmethod def validate(cls, data): diff --git a/core/dbt/events/eventmgr.py b/core/dbt/events/eventmgr.py index 4d0ddeb06ef..c2c922ab5a8 100644 --- a/core/dbt/events/eventmgr.py +++ b/core/dbt/events/eventmgr.py @@ -128,7 +128,11 @@ def create_debug_line(self, e: BaseEvent) -> str: log_line = f"\n\n{separator} {datetime.utcnow()} | {self.event_manager.invocation_id} {separator}\n" ts: str = datetime.utcnow().strftime("%H:%M:%S.%f") scrubbed_msg: str = self.scrubber(e.message()) # type: ignore - log_line += f"{self._get_color_tag()}{ts} [{e.log_level():<5}]{self._get_thread_name()} {scrubbed_msg}" + # log_level() for DynamicLevel events returns str instead of EventLevel + level = e.log_level().value if isinstance(e.log_level(), EventLevel) else e.log_level() + log_line += ( + f"{self._get_color_tag()}{ts} [{level:<5}]{self._get_thread_name()} {scrubbed_msg}" + ) return log_line def _get_color_tag(self) -> str: diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 843ef020bbd..f5f93cf5886 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -1914,6 +1914,7 @@ def message(self) -> str: @classmethod def status_to_level(cls, status): # The statuses come from TestStatus + # TODO should this return EventLevel enum instead? level_lookup = { "fail": "error", "pass": "info", @@ -2043,6 +2044,7 @@ def message(self) -> str: @classmethod def status_to_level(cls, status): # The statuses come from FreshnessStatus + # TODO should this return EventLevel enum instead? level_lookup = { "runtime error": "error", "pass": "info", diff --git a/core/dbt/helper_types.py b/core/dbt/helper_types.py index eec26a20c64..a8ff90fa75f 100644 --- a/core/dbt/helper_types.py +++ b/core/dbt/helper_types.py @@ -3,7 +3,7 @@ # necessary for annotating constructors from __future__ import annotations -from dataclasses import dataclass +from dataclasses import dataclass, field from datetime import timedelta from pathlib import Path from typing import Tuple, AbstractSet, Union @@ -85,7 +85,7 @@ def __eq__(self, other): class NoValue(dbtClassMixin): """Sometimes, you want a way to say none that isn't None""" - novalue: NVEnum = NVEnum.novalue + novalue: NVEnum = field(default_factory=lambda: NVEnum.novalue) dbtClassMixin.register_field_encoders( diff --git a/core/setup.py b/core/setup.py index 96bec2e96bf..719dd000329 100644 --- a/core/setup.py +++ b/core/setup.py @@ -81,6 +81,7 @@ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", ], python_requires=">=3.7.2", ) diff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py index 6b76e5cc375..3511c96f9fa 100644 --- a/plugins/postgres/setup.py +++ b/plugins/postgres/setup.py @@ -83,6 +83,7 @@ def _dbt_psycopg2_name(): "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", ], python_requires=">=3.7", ) diff --git a/tests/unit/test_version.py b/tests/unit/test_version.py index 6545891fc54..217988ba5e2 100644 --- a/tests/unit/test_version.py +++ b/tests/unit/test_version.py @@ -673,10 +673,16 @@ def mock_import(*args, **kwargs): def mock_versions(mocker, installed="1.0.0", latest=None, plugins={}): mocker.patch("dbt.version.__version__", installed) - mock_plugins(mocker, plugins) mock_latest_versions(mocker, latest, plugins) + # mock_plugins must be called last to avoid erronously raising an ImportError. + mock_plugins(mocker, plugins) +# NOTE: mock_plugins patches importlib.import_module, and should always be the last +# patch to be mocked in order to avoid erronously raising an ImportError. +# Explanation: As of Python 3.11, mock.patch indirectly uses importlib.import_module +# and thus uses the mocked object (in this case, mock_import) instead of the real +# implementation in subsequent mock.patch calls. Issue: https://github.com/python/cpython/issues/98771 def mock_plugins(mocker, plugins): mock_find_spec = mocker.patch("importlib.util.find_spec") path = "/tmp/dbt/adapters" diff --git a/tox.ini b/tox.ini index d49572a0a04..53187161c7f 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,7 @@ skipsdist = True envlist = unit,integration -[testenv:{unit,py37,py38,py39,py310,py}] +[testenv:{unit,py37,py38,py39,py310,py311,py}] description = unit testing download = true skip_install = true @@ -16,7 +16,7 @@ deps = -rdev-requirements.txt -reditable-requirements.txt -[testenv:{integration,py37-integration,py38-integration,py39-integration,py310-integration,py-integration}] +[testenv:{integration,py37-integration,py38-integration,py39-integration,py310-integration,py311-integration,py-integration}] description = functional testing download = true skip_install = true From 0fbbc896b2c49a19ca048d7c75d66a5ed41d1a64 Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Mon, 12 Dec 2022 13:18:15 -0600 Subject: [PATCH 060/156] Remove PR from most changelog kinds (#6374) * update changie to require issue or pr, and allow multiple * remove extraneous data from changelog files. * allow for multiple PR/issues to be entered * update contributing guide * remove issue number from bot changelogs * update format of PR * fix dependency changelogs * remove extra line * remove extra lines, tweak contributor wording * Update CONTRIBUTING.md Co-authored-by: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Co-authored-by: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> --- .../Dependency-20220923-000646.yaml | 5 +- .../Dependency-20221007-000848.yaml | 5 +- .../Dependency-20221020-000753.yaml | 5 +- .../Dependency-20221026-000910.yaml | 5 +- .changes/unreleased/Docs-20220908-154157.yaml | 1 - .changes/unreleased/Docs-20221007-090656.yaml | 1 - .changes/unreleased/Docs-20221017-171411.yaml | 1 - .changes/unreleased/Docs-20221116-155743.yaml | 1 - .changes/unreleased/Docs-20221202-150523.yaml | 1 - .../unreleased/Features-20220408-165459.yaml | 1 - .../unreleased/Features-20220817-154857.yaml | 1 - .../unreleased/Features-20220912-125935.yaml | 1 - .../unreleased/Features-20220914-095625.yaml | 1 - .../unreleased/Features-20220925-211651.yaml | 1 - .../unreleased/Features-20221003-110705.yaml | 1 - .../unreleased/Features-20221102-150003.yaml | 1 - .../unreleased/Features-20221114-185207.yaml | 1 - .../unreleased/Features-20221130-112913.yaml | 1 - .../unreleased/Fixes-20220916-104854.yaml | 1 - .../unreleased/Fixes-20221010-113218.yaml | 1 - .../unreleased/Fixes-20221011-160715.yaml | 1 - .../unreleased/Fixes-20221016-173742.yaml | 1 - .../unreleased/Fixes-20221107-095314.yaml | 1 - .../unreleased/Fixes-20221115-081021.yaml | 1 - .../unreleased/Fixes-20221202-164859.yaml | 1 - .../Under the Hood-20220927-194259.yaml | 1 - .../Under the Hood-20220929-134406.yaml | 1 - .../Under the Hood-20221005-120310.yaml | 1 - .../Under the Hood-20221007-094627.yaml | 1 - .../Under the Hood-20221007-140044.yaml | 1 - .../Under the Hood-20221013-181912.yaml | 1 - .../Under the Hood-20221017-151511.yaml | 1 - .../Under the Hood-20221017-155844.yaml | 1 - .../Under the Hood-20221028-104837.yaml | 1 - .../Under the Hood-20221028-110344.yaml | 1 - .../Under the Hood-20221108-074550.yaml | 1 - .../Under the Hood-20221108-115633.yaml | 1 - .../Under the Hood-20221108-133104.yaml | 1 - .../Under the Hood-20221116-130037.yaml | 1 - .changie.yaml | 105 +++++++++++++----- .github/workflows/bot-changelog.yml | 4 +- CONTRIBUTING.md | 12 +- 42 files changed, 96 insertions(+), 80 deletions(-) diff --git a/.changes/unreleased/Dependency-20220923-000646.yaml b/.changes/unreleased/Dependency-20220923-000646.yaml index a8d3c0a64ad..0375eeb125f 100644 --- a/.changes/unreleased/Dependency-20220923-000646.yaml +++ b/.changes/unreleased/Dependency-20220923-000646.yaml @@ -1,7 +1,6 @@ -kind: "Dependency" +kind: "Dependencies" body: "Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core" time: 2022-09-23T00:06:46.00000Z custom: Author: dependabot[bot] - Issue: 4904 - PR: 5917 + PR: "5917" diff --git a/.changes/unreleased/Dependency-20221007-000848.yaml b/.changes/unreleased/Dependency-20221007-000848.yaml index 8b2aebdc466..7e36733d14e 100644 --- a/.changes/unreleased/Dependency-20221007-000848.yaml +++ b/.changes/unreleased/Dependency-20221007-000848.yaml @@ -1,7 +1,6 @@ -kind: "Dependency" +kind: "Dependencies" body: "Bump black from 22.8.0 to 22.10.0" time: 2022-10-07T00:08:48.00000Z custom: Author: dependabot[bot] - Issue: 4904 - PR: 6019 + PR: "6019" diff --git a/.changes/unreleased/Dependency-20221020-000753.yaml b/.changes/unreleased/Dependency-20221020-000753.yaml index ff6e7efc48f..ce0f122826b 100644 --- a/.changes/unreleased/Dependency-20221020-000753.yaml +++ b/.changes/unreleased/Dependency-20221020-000753.yaml @@ -1,7 +1,6 @@ -kind: "Dependency" +kind: "Dependencies" body: "Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core" time: 2022-10-20T00:07:53.00000Z custom: Author: dependabot[bot] - Issue: 4904 - PR: 6108 + PR: "6108" diff --git a/.changes/unreleased/Dependency-20221026-000910.yaml b/.changes/unreleased/Dependency-20221026-000910.yaml index a5e5756e4cb..d68fa8a11ef 100644 --- a/.changes/unreleased/Dependency-20221026-000910.yaml +++ b/.changes/unreleased/Dependency-20221026-000910.yaml @@ -1,7 +1,6 @@ -kind: "Dependency" +kind: "Dependencies" body: "Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core" time: 2022-10-26T00:09:10.00000Z custom: Author: dependabot[bot] - Issue: 4904 - PR: 6144 + PR: "6144" diff --git a/.changes/unreleased/Docs-20220908-154157.yaml b/.changes/unreleased/Docs-20220908-154157.yaml index 2b2d30d41e5..e307f3bd5e0 100644 --- a/.changes/unreleased/Docs-20220908-154157.yaml +++ b/.changes/unreleased/Docs-20220908-154157.yaml @@ -4,4 +4,3 @@ time: 2022-09-08T15:41:57.689162-04:00 custom: Author: andy-clapson Issue: "5791" - PR: "5684" diff --git a/.changes/unreleased/Docs-20221007-090656.yaml b/.changes/unreleased/Docs-20221007-090656.yaml index 1159879a249..070ecd48944 100644 --- a/.changes/unreleased/Docs-20221007-090656.yaml +++ b/.changes/unreleased/Docs-20221007-090656.yaml @@ -4,4 +4,3 @@ time: 2022-10-07T09:06:56.446078-05:00 custom: Author: stu-k Issue: "5528" - PR: "6022" diff --git a/.changes/unreleased/Docs-20221017-171411.yaml b/.changes/unreleased/Docs-20221017-171411.yaml index 6f480f3c4bd..487362c1d5c 100644 --- a/.changes/unreleased/Docs-20221017-171411.yaml +++ b/.changes/unreleased/Docs-20221017-171411.yaml @@ -3,4 +3,3 @@ time: 2022-10-17T17:14:11.715348-05:00 custom: Author: paulbenschmidt Issue: "5880" - PR: "324" diff --git a/.changes/unreleased/Docs-20221116-155743.yaml b/.changes/unreleased/Docs-20221116-155743.yaml index 0b5ce05ee69..84d90a67b99 100644 --- a/.changes/unreleased/Docs-20221116-155743.yaml +++ b/.changes/unreleased/Docs-20221116-155743.yaml @@ -4,4 +4,3 @@ time: 2022-11-16T15:57:43.204201+01:00 custom: Author: jtcohen6 Issue: "323" - PR: "346" diff --git a/.changes/unreleased/Docs-20221202-150523.yaml b/.changes/unreleased/Docs-20221202-150523.yaml index cf0b4edb2d8..b08a32cddf6 100644 --- a/.changes/unreleased/Docs-20221202-150523.yaml +++ b/.changes/unreleased/Docs-20221202-150523.yaml @@ -4,4 +4,3 @@ time: 2022-12-02T15:05:23.695333-07:00 custom: Author: dbeatty10 Issue: "6368" - PR: "6371" diff --git a/.changes/unreleased/Features-20220408-165459.yaml b/.changes/unreleased/Features-20220408-165459.yaml index c29cfc617c7..12cdf74c757 100644 --- a/.changes/unreleased/Features-20220408-165459.yaml +++ b/.changes/unreleased/Features-20220408-165459.yaml @@ -5,4 +5,3 @@ time: 2022-04-08T16:54:59.696564+01:00 custom: Author: daniel-murray josephberni Issue: "2968" - PR: "5859" diff --git a/.changes/unreleased/Features-20220817-154857.yaml b/.changes/unreleased/Features-20220817-154857.yaml index f22e48b91a0..ad53df05a3f 100644 --- a/.changes/unreleased/Features-20220817-154857.yaml +++ b/.changes/unreleased/Features-20220817-154857.yaml @@ -4,4 +4,3 @@ time: 2022-08-17T15:48:57.225267-04:00 custom: Author: gshank Issue: "5610" - PR: "5643" diff --git a/.changes/unreleased/Features-20220912-125935.yaml b/.changes/unreleased/Features-20220912-125935.yaml index b0c1dd41a26..d49f35fd0af 100644 --- a/.changes/unreleased/Features-20220912-125935.yaml +++ b/.changes/unreleased/Features-20220912-125935.yaml @@ -4,4 +4,3 @@ time: 2022-09-12T12:59:35.121188+01:00 custom: Author: jared-rimmer Issue: "5486" - PR: "5812" diff --git a/.changes/unreleased/Features-20220914-095625.yaml b/.changes/unreleased/Features-20220914-095625.yaml index 51828084a0d..d46b1bfa8d8 100644 --- a/.changes/unreleased/Features-20220914-095625.yaml +++ b/.changes/unreleased/Features-20220914-095625.yaml @@ -4,4 +4,3 @@ time: 2022-09-14T09:56:25.97818-07:00 custom: Author: colin-rogers-dbt Issue: "5521" - PR: "5838" diff --git a/.changes/unreleased/Features-20220925-211651.yaml b/.changes/unreleased/Features-20220925-211651.yaml index 0f0f6e84213..d2c1911c720 100644 --- a/.changes/unreleased/Features-20220925-211651.yaml +++ b/.changes/unreleased/Features-20220925-211651.yaml @@ -4,4 +4,3 @@ time: 2022-09-25T21:16:51.051239654+02:00 custom: Author: pgoslatara Issue: "5929" - PR: "5930" diff --git a/.changes/unreleased/Features-20221003-110705.yaml b/.changes/unreleased/Features-20221003-110705.yaml index f8142666c3b..637d8be58c6 100644 --- a/.changes/unreleased/Features-20221003-110705.yaml +++ b/.changes/unreleased/Features-20221003-110705.yaml @@ -4,4 +4,3 @@ time: 2022-10-03T11:07:05.381632-05:00 custom: Author: dave-connors-3 Issue: "5990" - PR: "5991" diff --git a/.changes/unreleased/Features-20221102-150003.yaml b/.changes/unreleased/Features-20221102-150003.yaml index ca45893dfe9..9d8ba192687 100644 --- a/.changes/unreleased/Features-20221102-150003.yaml +++ b/.changes/unreleased/Features-20221102-150003.yaml @@ -5,4 +5,3 @@ time: 2022-11-02T15:00:03.000805-05:00 custom: Author: racheldaniel Issue: "6201" - PR: "6202" diff --git a/.changes/unreleased/Features-20221114-185207.yaml b/.changes/unreleased/Features-20221114-185207.yaml index 16ea3fb5918..459bc8ce234 100644 --- a/.changes/unreleased/Features-20221114-185207.yaml +++ b/.changes/unreleased/Features-20221114-185207.yaml @@ -4,4 +4,3 @@ time: 2022-11-14T18:52:07.788593+02:00 custom: Author: haritamar Issue: "6246" - PR: "6247" diff --git a/.changes/unreleased/Features-20221130-112913.yaml b/.changes/unreleased/Features-20221130-112913.yaml index b640ab3e690..64832de2f68 100644 --- a/.changes/unreleased/Features-20221130-112913.yaml +++ b/.changes/unreleased/Features-20221130-112913.yaml @@ -4,4 +4,3 @@ time: 2022-11-30T11:29:13.256034-05:00 custom: Author: michelleark Issue: "6057" - PR: "6342" diff --git a/.changes/unreleased/Fixes-20220916-104854.yaml b/.changes/unreleased/Fixes-20220916-104854.yaml index 64e76c43a3f..bd9af0469a7 100644 --- a/.changes/unreleased/Fixes-20220916-104854.yaml +++ b/.changes/unreleased/Fixes-20220916-104854.yaml @@ -4,4 +4,3 @@ time: 2022-09-16T10:48:54.162273-05:00 custom: Author: emmyoop Issue: "3992" - PR: "5868" diff --git a/.changes/unreleased/Fixes-20221010-113218.yaml b/.changes/unreleased/Fixes-20221010-113218.yaml index 73f128ec5b7..5b73b8d9ccd 100644 --- a/.changes/unreleased/Fixes-20221010-113218.yaml +++ b/.changes/unreleased/Fixes-20221010-113218.yaml @@ -4,4 +4,3 @@ time: 2022-10-10T11:32:18.752322-05:00 custom: Author: emmyoop Issue: "6030" - PR: "6038" diff --git a/.changes/unreleased/Fixes-20221011-160715.yaml b/.changes/unreleased/Fixes-20221011-160715.yaml index 273e1398bdd..936546a5232 100644 --- a/.changes/unreleased/Fixes-20221011-160715.yaml +++ b/.changes/unreleased/Fixes-20221011-160715.yaml @@ -4,4 +4,3 @@ time: 2022-10-11T16:07:15.464093-04:00 custom: Author: chamini2 Issue: "6041" - PR: "6042" diff --git a/.changes/unreleased/Fixes-20221016-173742.yaml b/.changes/unreleased/Fixes-20221016-173742.yaml index 11d4a8c85f4..c7b00dddba8 100644 --- a/.changes/unreleased/Fixes-20221016-173742.yaml +++ b/.changes/unreleased/Fixes-20221016-173742.yaml @@ -5,4 +5,3 @@ time: 2022-10-16T17:37:42.846683-07:00 custom: Author: versusfacit Issue: "5436" - PR: "5874" diff --git a/.changes/unreleased/Fixes-20221107-095314.yaml b/.changes/unreleased/Fixes-20221107-095314.yaml index f3763b7d039..99da9c44522 100644 --- a/.changes/unreleased/Fixes-20221107-095314.yaml +++ b/.changes/unreleased/Fixes-20221107-095314.yaml @@ -4,4 +4,3 @@ time: 2022-11-07T09:53:14.340257-06:00 custom: Author: ChenyuLInx Issue: "5625" - PR: "6059" diff --git a/.changes/unreleased/Fixes-20221115-081021.yaml b/.changes/unreleased/Fixes-20221115-081021.yaml index d995a2c4fa7..40c81fabacb 100644 --- a/.changes/unreleased/Fixes-20221115-081021.yaml +++ b/.changes/unreleased/Fixes-20221115-081021.yaml @@ -4,4 +4,3 @@ time: 2022-11-15T08:10:21.527884-05:00 custom: Author: justbldwn Issue: "6245" - PR: "6251" diff --git a/.changes/unreleased/Fixes-20221202-164859.yaml b/.changes/unreleased/Fixes-20221202-164859.yaml index 65d17625ac0..6aad4ced192 100644 --- a/.changes/unreleased/Fixes-20221202-164859.yaml +++ b/.changes/unreleased/Fixes-20221202-164859.yaml @@ -4,4 +4,3 @@ time: 2022-12-02T16:48:59.029519-05:00 custom: Author: gshank Issue: "6055" - PR: "6081" diff --git a/.changes/unreleased/Under the Hood-20220927-194259.yaml b/.changes/unreleased/Under the Hood-20220927-194259.yaml index dbd85165e2c..b6cb64b0155 100644 --- a/.changes/unreleased/Under the Hood-20220927-194259.yaml +++ b/.changes/unreleased/Under the Hood-20220927-194259.yaml @@ -4,4 +4,3 @@ time: 2022-09-27T19:42:59.241433-07:00 custom: Author: max-sixty Issue: "5946" - PR: "5947" diff --git a/.changes/unreleased/Under the Hood-20220929-134406.yaml b/.changes/unreleased/Under the Hood-20220929-134406.yaml index ce69bdf322a..b0175190747 100644 --- a/.changes/unreleased/Under the Hood-20220929-134406.yaml +++ b/.changes/unreleased/Under the Hood-20220929-134406.yaml @@ -4,4 +4,3 @@ time: 2022-09-29T13:44:06.275941-04:00 custom: Author: peterallenwebb Issue: "5809" - PR: "5975" diff --git a/.changes/unreleased/Under the Hood-20221005-120310.yaml b/.changes/unreleased/Under the Hood-20221005-120310.yaml index eb87a14fedc..797be31c319 100644 --- a/.changes/unreleased/Under the Hood-20221005-120310.yaml +++ b/.changes/unreleased/Under the Hood-20221005-120310.yaml @@ -4,4 +4,3 @@ time: 2022-10-05T12:03:10.061263-07:00 custom: Author: max-sixty Issue: "5983" - PR: "5983" diff --git a/.changes/unreleased/Under the Hood-20221007-094627.yaml b/.changes/unreleased/Under the Hood-20221007-094627.yaml index 950c20577ed..d3a5da61566 100644 --- a/.changes/unreleased/Under the Hood-20221007-094627.yaml +++ b/.changes/unreleased/Under the Hood-20221007-094627.yaml @@ -4,4 +4,3 @@ time: 2022-10-07T09:46:27.682872-05:00 custom: Author: emmyoop Issue: "6023" - PR: "6024" diff --git a/.changes/unreleased/Under the Hood-20221007-140044.yaml b/.changes/unreleased/Under the Hood-20221007-140044.yaml index b41e3f6eb5a..971d5a40ce8 100644 --- a/.changes/unreleased/Under the Hood-20221007-140044.yaml +++ b/.changes/unreleased/Under the Hood-20221007-140044.yaml @@ -4,4 +4,3 @@ time: 2022-10-07T14:00:44.227644-07:00 custom: Author: max-sixty Issue: "6028" - PR: "5978" diff --git a/.changes/unreleased/Under the Hood-20221013-181912.yaml b/.changes/unreleased/Under the Hood-20221013-181912.yaml index 2f03b9b29ff..4f5218891b4 100644 --- a/.changes/unreleased/Under the Hood-20221013-181912.yaml +++ b/.changes/unreleased/Under the Hood-20221013-181912.yaml @@ -4,4 +4,3 @@ time: 2022-10-13T18:19:12.167548-04:00 custom: Author: peterallenwebb Issue: "5229" - PR: "6025" diff --git a/.changes/unreleased/Under the Hood-20221017-151511.yaml b/.changes/unreleased/Under the Hood-20221017-151511.yaml index cbdcf04beb3..94f4d27d6de 100644 --- a/.changes/unreleased/Under the Hood-20221017-151511.yaml +++ b/.changes/unreleased/Under the Hood-20221017-151511.yaml @@ -4,4 +4,3 @@ time: 2022-10-17T15:15:11.499246-05:00 custom: Author: luke-bassett Issue: "1350" - PR: "6086" diff --git a/.changes/unreleased/Under the Hood-20221017-155844.yaml b/.changes/unreleased/Under the Hood-20221017-155844.yaml index 84e6675351c..c46ef040410 100644 --- a/.changes/unreleased/Under the Hood-20221017-155844.yaml +++ b/.changes/unreleased/Under the Hood-20221017-155844.yaml @@ -4,4 +4,3 @@ time: 2022-10-17T15:58:44.676549-04:00 custom: Author: eve-johns Issue: "6068" - PR: "6082" diff --git a/.changes/unreleased/Under the Hood-20221028-104837.yaml b/.changes/unreleased/Under the Hood-20221028-104837.yaml index 22ad4901794..446d4898920 100644 --- a/.changes/unreleased/Under the Hood-20221028-104837.yaml +++ b/.changes/unreleased/Under the Hood-20221028-104837.yaml @@ -4,4 +4,3 @@ time: 2022-10-28T10:48:37.687886-04:00 custom: Author: gshank Issue: "6171" - PR: "6172" diff --git a/.changes/unreleased/Under the Hood-20221028-110344.yaml b/.changes/unreleased/Under the Hood-20221028-110344.yaml index 4ee0a7dc214..cbe8dacb3d5 100644 --- a/.changes/unreleased/Under the Hood-20221028-110344.yaml +++ b/.changes/unreleased/Under the Hood-20221028-110344.yaml @@ -4,4 +4,3 @@ time: 2022-10-28T11:03:44.887836-04:00 custom: Author: gshank Issue: "6173" - PR: "6174" diff --git a/.changes/unreleased/Under the Hood-20221108-074550.yaml b/.changes/unreleased/Under the Hood-20221108-074550.yaml index 351887f767a..a8fbc7e208b 100644 --- a/.changes/unreleased/Under the Hood-20221108-074550.yaml +++ b/.changes/unreleased/Under the Hood-20221108-074550.yaml @@ -4,4 +4,3 @@ time: 2022-11-08T07:45:50.589147-06:00 custom: Author: stu-k Issue: "5942" - PR: "6226" diff --git a/.changes/unreleased/Under the Hood-20221108-115633.yaml b/.changes/unreleased/Under the Hood-20221108-115633.yaml index 2ba10536728..ea073719cda 100644 --- a/.changes/unreleased/Under the Hood-20221108-115633.yaml +++ b/.changes/unreleased/Under the Hood-20221108-115633.yaml @@ -4,4 +4,3 @@ time: 2022-11-08T11:56:33.743042-06:00 custom: Author: stu-k Issue: "5770" - PR: "6228" diff --git a/.changes/unreleased/Under the Hood-20221108-133104.yaml b/.changes/unreleased/Under the Hood-20221108-133104.yaml index 4aea5ee8cd9..6829dc097eb 100644 --- a/.changes/unreleased/Under the Hood-20221108-133104.yaml +++ b/.changes/unreleased/Under the Hood-20221108-133104.yaml @@ -4,4 +4,3 @@ time: 2022-11-08T13:31:04.788547-06:00 custom: Author: stu-k Issue: "5771" - PR: "6230" diff --git a/.changes/unreleased/Under the Hood-20221116-130037.yaml b/.changes/unreleased/Under the Hood-20221116-130037.yaml index b7ed5e750d6..ecdedd6bd2d 100644 --- a/.changes/unreleased/Under the Hood-20221116-130037.yaml +++ b/.changes/unreleased/Under the Hood-20221116-130037.yaml @@ -4,4 +4,3 @@ time: 2022-11-16T13:00:37.916202-06:00 custom: Author: stu-k Issue: "5942" - PR: "6187" diff --git a/.changie.yaml b/.changie.yaml index 0744c5bb9c7..0571d809681 100644 --- a/.changie.yaml +++ b/.changie.yaml @@ -6,19 +6,67 @@ changelogPath: CHANGELOG.md versionExt: md versionFormat: '## dbt-core {{.Version}} - {{.Time.Format "January 02, 2006"}}' kindFormat: '### {{.Kind}}' -changeFormat: '- {{.Body}} ([#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), [#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))' +changeFormat: |- + {{- $IssueList := list }} + {{- $changes := splitList " " $.Custom.Issue }} + {{- range $issueNbr := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }} + {{- $IssueList = append $IssueList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}}) kinds: - label: Breaking Changes - label: Features - label: Fixes - label: Docs - changeFormat: '- {{.Body}} ([dbt-docs/#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-docs/issues/{{.Custom.Issue}}), [dbt-docs/#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-docs/pull/{{.Custom.PR}}))' + changeFormat: |- + {{- $IssueList := list }} + {{- $changes := splitList " " $.Custom.Issue }} + {{- range $issueNbr := $changes }} + {{- $changeLink := "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $issueNbr }} + {{- $IssueList = append $IssueList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}}) - label: Under the Hood - label: Dependencies - changeFormat: '- {{.Body}} ({{if ne .Custom.Issue ""}}[#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), {{end}}[#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))' + changeFormat: |- + {{- $PRList := list }} + {{- $changes := splitList " " $.Custom.PR }} + {{- range $pullrequest := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }} + {{- $PRList = append $PRList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}}) + skipGlobalChoices: true + additionalChoices: + - key: Author + label: GitHub Username(s) (separated by a single space if multiple) + type: string + minLength: 3 + - key: PR + label: GitHub Pull Request Number (separated by a single space if multiple) + type: string + minLength: 1 - label: Security - changeFormat: '- {{.Body}} ({{if ne .Custom.Issue ""}}[#{{.Custom.Issue}}](https://github.com/dbt-labs/dbt-core/issues/{{.Custom.Issue}}), {{end}}[#{{.Custom.PR}}](https://github.com/dbt-labs/dbt-core/pull/{{.Custom.PR}}))' + changeFormat: |- + {{- $PRList := list }} + {{- $changes := splitList " " $.Custom.PR }} + {{- range $pullrequest := $changes }} + {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $pullrequest }} + {{- $PRList = append $PRList $changeLink }} + {{- end -}} + - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}}) + skipGlobalChoices: true + additionalChoices: + - key: Author + label: GitHub Username(s) (separated by a single space if multiple) + type: string + minLength: 3 + - key: PR + label: GitHub Pull Request Number (separated by a single space if multiple) + type: string + minLength: 1 newlines: afterChangelogHeader: 1 @@ -33,13 +81,9 @@ custom: type: string minLength: 3 - key: Issue - label: GitHub Issue Number - type: int - minInt: 1 -- key: PR - label: GitHub Pull Request Number - type: int - minInt: 1 + label: GitHub Issue Number (separated by a single space if multiple) + type: string + minLength: 1 footerFormat: | {{- $contributorDict := dict }} @@ -47,28 +91,31 @@ footerFormat: | {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} {{- range $change := .Changes }} {{- $authorList := splitList " " $change.Custom.Author }} - {{- /* loop through all authors for a PR */}} + {{- /* loop through all authors for a single changelog */}} {{- range $author := $authorList }} {{- $authorLower := lower $author }} {{- /* we only want to include non-core team contributors */}} {{- if not (has $authorLower $core_team)}} - {{- /* Docs kind link back to dbt-docs instead of dbt-core PRs */}} - {{- $prLink := $change.Kind }} - {{- if eq $change.Kind "Docs" }} - {{- $prLink = "[dbt-docs/#pr](https://github.com/dbt-labs/dbt-docs/pull/pr)" | replace "pr" $change.Custom.PR }} - {{- else }} - {{- $prLink = "[#pr](https://github.com/dbt-labs/dbt-core/pull/pr)" | replace "pr" $change.Custom.PR }} - {{- end }} - {{- /* check if this contributor has other PRs associated with them already */}} - {{- if hasKey $contributorDict $author }} - {{- $prList := get $contributorDict $author }} - {{- $prList = append $prList $prLink }} - {{- $contributorDict := set $contributorDict $author $prList }} - {{- else }} - {{- $prList := list $prLink }} - {{- $contributorDict := set $contributorDict $author $prList }} - {{- end }} - {{- end}} + {{- $changeList := splitList " " $change.Custom.Author }} + {{- /* Docs kind link back to dbt-docs instead of dbt-core issues */}} + {{- $changeLink := $change.Kind }} + {{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }} + {{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $change.Custom.PR }} + {{- else if eq $change.Kind "Docs"}} + {{- $changeLink = "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $change.Custom.Issue }} + {{- else }} + {{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $change.Custom.Issue }} + {{- end }} + {{- /* check if this contributor has other changes associated with them already */}} + {{- if hasKey $contributorDict $author }} + {{- $contributionList := get $contributorDict $author }} + {{- $contributionList = append $contributionList $changeLink }} + {{- $contributorDict := set $contributorDict $author $contributionList }} + {{- else }} + {{- $contributionList := list $changeLink }} + {{- $contributorDict := set $contributorDict $author $contributionList }} + {{- end }} + {{- end}} {{- end}} {{- end }} {{- /* no indentation here for formatting so the final markdown doesn't have unneeded indentations */}} diff --git a/.github/workflows/bot-changelog.yml b/.github/workflows/bot-changelog.yml index 2d06fafe682..c6d2a1507a3 100644 --- a/.github/workflows/bot-changelog.yml +++ b/.github/workflows/bot-changelog.yml @@ -40,7 +40,7 @@ jobs: matrix: include: - label: "dependencies" - changie_kind: "Dependency" + changie_kind: "Dependencies" - label: "snyk" changie_kind: "Security" runs-on: ubuntu-latest @@ -58,4 +58,4 @@ jobs: commit_message: "Add automated changelog yaml from template for bot PR" changie_kind: ${{ matrix.changie_kind }} label: ${{ matrix.label }} - custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n Issue: 4904\n PR: ${{ github.event.pull_request.number }}" + custom_changelog_string: "custom:\n Author: ${{ github.event.pull_request.user.login }}\n PR: ${{ github.event.pull_request.number }}" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d06170f9c55..3bbd8d14d5f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -201,13 +201,21 @@ Here are some general rules for adding tests: * Sometimes flake8 complains about lines that are actually fine, in which case you can put a comment on the line such as: # noqa or # noqa: ANNN, where ANNN is the error code that flake8 issues. * To collect output for `CProfile`, run dbt with the `-r` option and the name of an output file, i.e. `dbt -r dbt.cprof run`. If you just want to profile parsing, you can do: `dbt -r dbt.cprof parse`. `pip` install `snakeviz` to view the output. Run `snakeviz dbt.cprof` and output will be rendered in a browser window. -## Adding a CHANGELOG Entry +## Adding or modifying a CHANGELOG Entry We use [changie](https://changie.dev) to generate `CHANGELOG` entries. **Note:** Do not edit the `CHANGELOG.md` directly. Your modifications will be lost. Follow the steps to [install `changie`](https://changie.dev/guide/installation/) for your system. -Once changie is installed and your PR is created, simply run `changie new` and changie will walk you through the process of creating a changelog entry. Commit the file that's created and your changelog entry is complete! +Once changie is installed and your PR is created for a new feature, simply run the following command and changie will walk you through the process of creating a changelog entry: + +```shell +changie new +``` + +Commit the file that's created and your changelog entry is complete! + +If you are contributing to a feature already in progress, you will modify the changie yaml file in dbt/.changes/unreleased/ related to your change. If you need help finding this file, please ask within the discussion for the pull request! You don't need to worry about which `dbt-core` version your change will go into. Just create the changelog entry with `changie`, and open your PR against the `main` branch. All merged changes will be included in the next minor version of `dbt-core`. The Core maintainers _may_ choose to "backport" specific changes in order to patch older minor versions. In that case, a maintainer will take care of that backport after merging your PR, before releasing the new version of `dbt-core`. From 83b1fee06228055f6fe573d778775160c7127454 Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Mon, 12 Dec 2022 15:13:03 -0800 Subject: [PATCH 061/156] Add aranke to core committers (#6431) --- .changie.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changie.yaml b/.changie.yaml index 0571d809681..e417244506b 100644 --- a/.changie.yaml +++ b/.changie.yaml @@ -88,7 +88,7 @@ custom: footerFormat: | {{- $contributorDict := dict }} {{- /* any names added to this list should be all lowercase for later matching purposes */}} - {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} + {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }} {{- range $change := .Changes }} {{- $authorList := splitList " " $change.Custom.Author }} {{- /* loop through all authors for a single changelog */}} From 84782625805f4457e0169c474159832ef01e4648 Mon Sep 17 00:00:00 2001 From: Josh Devlin Date: Wed, 14 Dec 2022 03:12:34 +1100 Subject: [PATCH 062/156] Update docker README (#6423) --- docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index 4adde533d37..7a48010b7d3 100644 --- a/docker/README.md +++ b/docker/README.md @@ -105,7 +105,7 @@ The `ENTRYPOINT` for this Dockerfile is the command `dbt` so you can bind-mount docker run \ --network=host --mount type=bind,source=path/to/project,target=/usr/app \ ---mount type=bind,source=path/to/profiles.yml,target=/root/.dbt/ \ +--mount type=bind,source=path/to/profiles.yml,target=/root/.dbt/profiles.yml \ my-dbt \ ls ``` From fafd5edbda2e94e6452bf2487fffee6e267736b1 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Tue, 13 Dec 2022 12:39:35 -0500 Subject: [PATCH 063/156] CT 1644 node cleanup (#6427) * Remove unneeded SQL compilation attributes from SeedNode * Fix various places that referenced removed attributes * Cleanup a few Unions * More formatting in nodes.py * Mypy passing. Untested. * Unit tests working * use "doc" in documentation unique_ids * update some doc_ids * Fix some artifact tests. Still need previous version. * Update manifest/v8.json * Move relation_names to parsing * Fix a couple of tests * Update some artifacts. snapshot_seed has wrong schema. * Changie * Tweak NodeType.Documentation * Put store_failures property in the right place * Fix setting relation_name --- .../Under the Hood-20221211-214240.yaml | 7 + core/dbt/adapters/base/relation.py | 15 +- core/dbt/adapters/protocol.py | 9 +- core/dbt/compilation.py | 40 +- core/dbt/contracts/graph/manifest.py | 10 +- core/dbt/contracts/graph/nodes.py | 619 ++- core/dbt/contracts/util.py | 38 +- core/dbt/events/types.py | 2 +- core/dbt/graph/selector.py | 4 +- core/dbt/graph/selector_methods.py | 8 +- core/dbt/node_types.py | 2 +- core/dbt/parser/base.py | 24 +- core/dbt/parser/docs.py | 6 +- core/dbt/parser/manifest.py | 19 +- core/dbt/parser/schemas.py | 3 + core/dbt/parser/seeds.py | 3 + core/dbt/parser/snapshots.py | 2 + schemas/dbt/manifest/v8.json | 4037 ++++------------- test/unit/test_contracts_graph_parsed.py | 34 +- test/unit/test_graph_selector_methods.py | 5 +- test/unit/test_manifest.py | 7 +- test/unit/test_node_types.py | 2 +- test/unit/test_parser.py | 5 + .../artifacts/data/state/v8/manifest.json | 2 +- .../functional/artifacts/expected_manifest.py | 91 +- tests/functional/list/test_list.py | 1 - .../partial_parsing/test_pp_docs.py | 4 +- 27 files changed, 1507 insertions(+), 3492 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221211-214240.yaml diff --git a/.changes/unreleased/Under the Hood-20221211-214240.yaml b/.changes/unreleased/Under the Hood-20221211-214240.yaml new file mode 100644 index 00000000000..adeaefba257 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221211-214240.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Refactor and clean up manifest nodes +time: 2022-12-11T21:42:40.560074-05:00 +custom: + Author: gshank + Issue: "6426" + PR: "6427" diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py index cd69b80a579..0461990c92d 100644 --- a/core/dbt/adapters/base/relation.py +++ b/core/dbt/adapters/base/relation.py @@ -1,8 +1,8 @@ from collections.abc import Hashable from dataclasses import dataclass, field -from typing import Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set +from typing import Optional, TypeVar, Any, Type, Dict, Iterator, Tuple, Set -from dbt.contracts.graph.nodes import SourceDefinition, ParsedNode +from dbt.contracts.graph.nodes import SourceDefinition, ManifestNode, ResultNode, ParsedNode from dbt.contracts.relation import ( RelationType, ComponentName, @@ -210,7 +210,7 @@ def add_ephemeral_prefix(name: str): def create_ephemeral_from_node( cls: Type[Self], config: HasQuoting, - node: ParsedNode, + node: ManifestNode, ) -> Self: # Note that ephemeral models are based on the name. identifier = cls.add_ephemeral_prefix(node.name) @@ -223,7 +223,7 @@ def create_ephemeral_from_node( def create_from_node( cls: Type[Self], config: HasQuoting, - node: ParsedNode, + node: ManifestNode, quote_policy: Optional[Dict[str, bool]] = None, **kwargs: Any, ) -> Self: @@ -244,7 +244,7 @@ def create_from_node( def create_from( cls: Type[Self], config: HasQuoting, - node: Union[ParsedNode, SourceDefinition], + node: ResultNode, **kwargs: Any, ) -> Self: if node.resource_type == NodeType.Source: @@ -254,8 +254,11 @@ def create_from( ) return cls.create_from_source(node, **kwargs) else: + # Can't use ManifestNode here because of parameterized generics if not isinstance(node, (ParsedNode)): - raise InternalException(f"type mismatch, expected ParsedNode but got {type(node)}") + raise InternalException( + f"type mismatch, expected ManifestNode but got {type(node)}" + ) return cls.create_from_node(config, node, **kwargs) @classmethod diff --git a/core/dbt/adapters/protocol.py b/core/dbt/adapters/protocol.py index 0cc3b3c96ce..13b9bd79968 100644 --- a/core/dbt/adapters/protocol.py +++ b/core/dbt/adapters/protocol.py @@ -8,7 +8,6 @@ Generic, TypeVar, Tuple, - Union, Dict, Any, ) @@ -17,7 +16,7 @@ import agate from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse -from dbt.contracts.graph.nodes import ParsedNode, SourceDefinition, ManifestNode +from dbt.contracts.graph.nodes import ResultNode, ManifestNode from dbt.contracts.graph.model_config import BaseConfig from dbt.contracts.graph.manifest import Manifest from dbt.contracts.relation import Policy, HasQuoting @@ -47,11 +46,7 @@ def get_default_quote_policy(cls) -> Policy: ... @classmethod - def create_from( - cls: Type[Self], - config: HasQuoting, - node: Union[ParsedNode, SourceDefinition], - ) -> Self: + def create_from(cls: Type[Self], config: HasQuoting, node: ResultNode) -> Self: ... diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 7cd6f49a5e6..fcf98b4e914 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -13,11 +13,12 @@ from dbt.context.providers import generate_runtime_model_context from dbt.contracts.graph.manifest import Manifest, UniqueID from dbt.contracts.graph.nodes import ( - ParsedNode, ManifestNode, + ManifestSQLNode, GenericTestNode, GraphMemberNode, InjectedCTE, + SeedNode, ) from dbt.exceptions import ( dependency_not_found, @@ -167,7 +168,7 @@ def initialize(self): # a dict for jinja rendering of SQL def _create_node_context( self, - node: ManifestNode, + node: ManifestSQLNode, manifest: Manifest, extra_context: Dict[str, Any], ) -> Dict[str, Any]: @@ -186,14 +187,6 @@ def add_ephemeral_prefix(self, name: str): relation_cls = adapter.Relation return relation_cls.add_ephemeral_prefix(name) - def _get_relation_name(self, node: ParsedNode): - relation_name = None - if node.is_relational and not node.is_ephemeral_model: - adapter = get_adapter(self.config) - relation_cls = adapter.Relation - relation_name = str(relation_cls.create_from(self.config, node)) - return relation_name - def _inject_ctes_into_sql(self, sql: str, ctes: List[InjectedCTE]) -> str: """ `ctes` is a list of InjectedCTEs like: @@ -252,10 +245,10 @@ def _inject_ctes_into_sql(self, sql: str, ctes: List[InjectedCTE]) -> str: def _recursively_prepend_ctes( self, - model: ManifestNode, + model: ManifestSQLNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]], - ) -> Tuple[ManifestNode, List[InjectedCTE]]: + ) -> Tuple[ManifestSQLNode, List[InjectedCTE]]: """This method is called by the 'compile_node' method. Starting from the node that it is passed in, it will recursively call itself using the 'extra_ctes'. The 'ephemeral' models do @@ -270,7 +263,8 @@ def _recursively_prepend_ctes( # Just to make it plain that nothing is actually injected for this case if not model.extra_ctes: - model.extra_ctes_injected = True + if not isinstance(model, SeedNode): + model.extra_ctes_injected = True manifest.update_node(model) return (model, model.extra_ctes) @@ -289,6 +283,7 @@ def _recursively_prepend_ctes( f"could not be resolved: {cte.id}" ) cte_model = manifest.nodes[cte.id] + assert not isinstance(cte_model, SeedNode) if not cte_model.is_ephemeral_model: raise InternalException(f"{cte.id} is not ephemeral") @@ -332,16 +327,16 @@ def _recursively_prepend_ctes( return model, prepended_ctes - # Sets compiled fields in the ManifestNode passed in, + # Sets compiled fields in the ManifestSQLNode passed in, # creates a "context" dictionary for jinja rendering, # and then renders the "compiled_code" using the node, the # raw_code and the context. def _compile_node( self, - node: ManifestNode, + node: ManifestSQLNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, - ) -> ManifestNode: + ) -> ManifestSQLNode: if extra_context is None: extra_context = {} @@ -383,8 +378,6 @@ def _compile_node( node, ) - node.relation_name = self._get_relation_name(node) - node.compiled = True return node @@ -494,8 +487,11 @@ def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph return Graph(linker.graph) # writes the "compiled_code" into the target/compiled directory - def _write_node(self, node: ManifestNode) -> ManifestNode: - if not node.extra_ctes_injected or node.resource_type == NodeType.Snapshot: + def _write_node(self, node: ManifestSQLNode) -> ManifestSQLNode: + if not node.extra_ctes_injected or node.resource_type in ( + NodeType.Snapshot, + NodeType.Seed, + ): return node fire_event(WritingInjectedSQLForNode(node_info=get_node_info())) @@ -507,11 +503,11 @@ def _write_node(self, node: ManifestNode) -> ManifestNode: def compile_node( self, - node: ManifestNode, + node: ManifestSQLNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, write: bool = True, - ) -> ManifestNode: + ) -> ManifestSQLNode: """This is the main entry point into this code. It's called by CompileRunner.compile, GenericRPCRunner.compile, and RunTask.get_hook_sql. It calls '_compile_node' to convert diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 05c856f461f..cd1eb561fcc 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -29,11 +29,11 @@ GenericTestNode, Exposure, Metric, - HasUniqueID, UnpatchedSourceDefinition, ManifestNode, GraphMemberNode, ResultNode, + BaseNode, ) from dbt.contracts.graph.unparsed import SourcePatch from dbt.contracts.files import SourceFile, SchemaSourceFile, FileHash, AnySourceFile @@ -320,7 +320,7 @@ def _sort_values(dct): def build_node_edges(nodes: List[ManifestNode]): - """Build the forward and backward edges on the given list of ParsedNodes + """Build the forward and backward edges on the given list of ManifestNodes and return them as two separate dictionaries, each mapping unique IDs to lists of edges. """ @@ -338,10 +338,10 @@ def build_node_edges(nodes: List[ManifestNode]): # Build a map of children of macros and generic tests def build_macro_edges(nodes: List[Any]): forward_edges: Dict[str, List[str]] = { - n.unique_id: [] for n in nodes if n.unique_id.startswith("macro") or n.depends_on.macros + n.unique_id: [] for n in nodes if n.unique_id.startswith("macro") or n.depends_on_macros } for node in nodes: - for unique_id in node.depends_on.macros: + for unique_id in node.depends_on_macros: if unique_id in forward_edges.keys(): forward_edges[unique_id].append(node.unique_id) return _sort_values(forward_edges) @@ -1235,7 +1235,7 @@ def __post_serialize__(self, dct): return dct -def _check_duplicates(value: HasUniqueID, src: Mapping[str, HasUniqueID]): +def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]): if value.unique_id in src: raise_duplicate_resource_name(value, src[value.unique_id]) diff --git a/core/dbt/contracts/graph/nodes.py b/core/dbt/contracts/graph/nodes.py index a908167f49e..bc955e9503e 100644 --- a/core/dbt/contracts/graph/nodes.py +++ b/core/dbt/contracts/graph/nodes.py @@ -2,7 +2,6 @@ import time from dataclasses import dataclass, field from mashumaro.types import SerializableType -from pathlib import Path from typing import ( Optional, Union, @@ -12,7 +11,6 @@ Sequence, Tuple, Iterator, - TypeVar, ) from dbt.dataclass_schema import dbtClassMixin, ExtensibleDbtClassMixin @@ -20,11 +18,8 @@ from dbt.clients.system import write_file from dbt.contracts.files import FileHash from dbt.contracts.graph.unparsed import ( - UnparsedNode, - UnparsedDocumentation, Quoting, Docs, - UnparsedBaseNode, FreshnessThreshold, ExternalTable, HasYamlMetadata, @@ -64,96 +59,100 @@ SnapshotConfig, ) - -@dataclass -class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin, Replaceable): - name: str - description: str = "" - meta: Dict[str, Any] = field(default_factory=dict) - data_type: Optional[str] = None - quote: Optional[bool] = None - tags: List[str] = field(default_factory=list) - _extra: Dict[str, Any] = field(default_factory=dict) +# ===================================================================== +# This contains the classes for all of the nodes and node-like objects +# in the manifest. In the "nodes" dictionary of the manifest we find +# all of the objects in the ManifestNode union below. In addition the +# manifest contains "macros", "sources", "metrics", "exposures", "docs", +# and "disabled" dictionaries. +# +# The SeedNode is a ManifestNode, but can't be compiled because it has +# no SQL. +# +# All objects defined in this file should have BaseNode as a parent +# class. +# +# The two objects which do not show up in the DAG are Macro and +# Documentation. +# ===================================================================== + + +# ================================================== +# Various parent classes and node attribute classes +# ================================================== @dataclass -class HasFqn(dbtClassMixin, Replaceable): - fqn: List[str] +class BaseNode(dbtClassMixin, Replaceable): + """All nodes or node-like objects in this file should have this as a base class""" - def same_fqn(self, other: "HasFqn") -> bool: - return self.fqn == other.fqn + name: str + resource_type: NodeType + package_name: str + path: str + original_file_path: str + unique_id: str + @property + def search_name(self): + return self.name -@dataclass -class HasUniqueID(dbtClassMixin, Replaceable): - unique_id: str + @property + def file_id(self): + return f"{self.package_name}://{self.original_file_path}" + @property + def is_refable(self): + return self.resource_type in NodeType.refable() -@dataclass -class MacroDependsOn(dbtClassMixin, Replaceable): - macros: List[str] = field(default_factory=list) + @property + def should_store_failures(self): + return False - # 'in' on lists is O(n) so this is O(n^2) for # of macros - def add_macro(self, value: str): - if value not in self.macros: - self.macros.append(value) + # will this node map to an object in the database? + @property + def is_relational(self): + return self.resource_type in NodeType.refable() + @property + def is_ephemeral(self): + return self.config.materialized == "ephemeral" -@dataclass -class InjectedCTE(dbtClassMixin, Replaceable): - id: str - sql: str + @property + def is_ephemeral_model(self): + return self.is_refable and self.is_ephemeral + + def get_materialization(self): + return self.config.materialized @dataclass -class CompiledNode: - compiled: bool = False - compiled_code: Optional[str] = None - extra_ctes_injected: bool = False - extra_ctes: List[InjectedCTE] = field(default_factory=list) - relation_name: Optional[str] = None - _pre_injected_sql: Optional[str] = None +class GraphNode(BaseNode): + """Nodes in the DAG. Macro and Documentation don't have fqn.""" - def set_cte(self, cte_id: str, sql: str): - """This is the equivalent of what self.extra_ctes[cte_id] = sql would - do if extra_ctes were an OrderedDict - """ - for cte in self.extra_ctes: - if cte.id == cte_id: - cte.sql = sql - break - else: - self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) + fqn: List[str] - def __post_serialize__(self, dct): - dct = super().__post_serialize__(dct) - if "_pre_injected_sql" in dct: - del dct["_pre_injected_sql"] - # Remove compiled attributes - if "compiled" in dct and dct["compiled"] is False: - del dct["compiled"] - del dct["extra_ctes_injected"] - del dct["extra_ctes"] - # "omit_none" means these might not be in the dictionary - if "compiled_code" in dct: - del dct["compiled_code"] - if "relation_name" in dct: - del dct["relation_name"] - return dct + def same_fqn(self, other) -> bool: + return self.fqn == other.fqn @dataclass -class DependsOn(MacroDependsOn): - nodes: List[str] = field(default_factory=list) +class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin, Replaceable): + """Used in all ManifestNodes and SourceDefinition""" - def add_node(self, value: str): - if value not in self.nodes: - self.nodes.append(value) + name: str + description: str = "" + meta: Dict[str, Any] = field(default_factory=dict) + data_type: Optional[str] = None + quote: Optional[bool] = None + tags: List[str] = field(default_factory=list) + _extra: Dict[str, Any] = field(default_factory=dict) +# Metrics, exposures, @dataclass class HasRelationMetadata(dbtClassMixin, Replaceable): - database: Optional[str] + database: str schema: str # Can't set database to None like it ought to be @@ -167,57 +166,29 @@ def __pre_deserialize__(cls, data): return data -class ParsedNodeMixins(dbtClassMixin): - resource_type: NodeType - depends_on: DependsOn - config: NodeConfig - - @property - def is_refable(self): - return self.resource_type in NodeType.refable() - - @property - def should_store_failures(self): - return self.resource_type == NodeType.Test and ( - self.config.store_failures - if self.config.store_failures is not None - else flags.STORE_FAILURES - ) - - # will this node map to an object in the database? - @property - def is_relational(self): - return self.resource_type in NodeType.refable() or self.should_store_failures +@dataclass +class MacroDependsOn(dbtClassMixin, Replaceable): + """Used only in the Macro class""" - @property - def is_ephemeral(self): - return self.config.materialized == "ephemeral" + macros: List[str] = field(default_factory=list) - @property - def is_ephemeral_model(self): - return self.is_refable and self.is_ephemeral + # 'in' on lists is O(n) so this is O(n^2) for # of macros + def add_macro(self, value: str): + if value not in self.macros: + self.macros.append(value) - @property - def depends_on_nodes(self): - return self.depends_on.nodes - def patch(self, patch: "ParsedNodePatch"): - """Given a ParsedNodePatch, add the new information to the node.""" - # explicitly pick out the parts to update so we don't inadvertently - # step on the model name or anything - # Note: config should already be updated - self.patch_path: Optional[str] = patch.file_id - # update created_at so process_docs will run in partial parsing - self.created_at = time.time() - self.description = patch.description - self.columns = patch.columns +@dataclass +class DependsOn(MacroDependsOn): + nodes: List[str] = field(default_factory=list) - def get_materialization(self): - return self.config.materialized + def add_node(self, value: str): + if value not in self.nodes: + self.nodes.append(value) @dataclass -class ParsedNodeMandatory(UnparsedNode, HasUniqueID, HasFqn, HasRelationMetadata, Replaceable): +class ParsedNodeMandatory(GraphNode, HasRelationMetadata, Replaceable): alias: str checksum: FileHash config: NodeConfig = field(default_factory=NodeConfig) @@ -227,6 +198,8 @@ def identifier(self): return self.alias +# This needs to be in all ManifestNodes and also in SourceDefinition, +# because of "source freshness" @dataclass class NodeInfoMixin: _event_status: Dict[str, Any] = field(default_factory=dict) @@ -256,23 +229,20 @@ def clear_event_status(self): @dataclass -class ParsedNodeDefaults(NodeInfoMixin, CompiledNode, ParsedNodeMandatory): +class ParsedNode(NodeInfoMixin, ParsedNodeMandatory, SerializableType): tags: List[str] = field(default_factory=list) - refs: List[List[str]] = field(default_factory=list) - sources: List[List[str]] = field(default_factory=list) - metrics: List[List[str]] = field(default_factory=list) - depends_on: DependsOn = field(default_factory=DependsOn) description: str = field(default="") columns: Dict[str, ColumnInfo] = field(default_factory=dict) meta: Dict[str, Any] = field(default_factory=dict) docs: Docs = field(default_factory=Docs) patch_path: Optional[str] = None - compiled_path: Optional[str] = None build_path: Optional[str] = None deferred: bool = False unrendered_config: Dict[str, Any] = field(default_factory=dict) created_at: float = field(default_factory=lambda: time.time()) config_call_dict: Dict[str, Any] = field(default_factory=dict) + relation_name: Optional[str] = None + raw_code: str = "" def write_node(self, target_path: str, subdirectory: str, payload: str): if os.path.basename(self.path) == os.path.basename(self.original_file_path): @@ -286,12 +256,6 @@ def write_node(self, target_path: str, subdirectory: str, payload: str): write_file(full_path, payload) return full_path - -T = TypeVar("T", bound="ParsedNode") - - -@dataclass -class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType): def _serialize(self): return self.to_dict() @@ -343,10 +307,7 @@ def _persist_relation_docs(self) -> bool: return bool(self.config.persist_docs.get("relation")) return False - def same_body(self: T, other: T) -> bool: - return self.raw_code == other.raw_code - - def same_persisted_description(self: T, other: T) -> bool: + def same_persisted_description(self, other) -> bool: # the check on configs will handle the case where we have different # persist settings, so we only have to care about the cases where they # are the same.. @@ -363,7 +324,10 @@ def same_persisted_description(self: T, other: T) -> bool: return True - def same_database_representation(self, other: T) -> bool: + def same_body(self, other) -> bool: + return self.raw_code == other.raw_code + + def same_database_representation(self, other) -> bool: # compare the config representation, not the node's config value. This # compares the configured value, rather than the ultimate value (so # generate_*_name and unset values derived from the target are @@ -376,13 +340,24 @@ def same_database_representation(self, other: T) -> bool: return False return True - def same_config(self, old: T) -> bool: + def same_config(self, old) -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self: T, old: Optional[T]) -> bool: + def patch(self, patch: "ParsedNodePatch"): + """Given a ParsedNodePatch, add the new information to the node.""" + # explicitly pick out the parts to update so we don't inadvertently + # step on the model name or anything + # Note: config should already be updated + self.patch_path: Optional[str] = patch.file_id + # update created_at so process_docs will run in partial parsing + self.created_at = time.time() + self.description = patch.description + self.columns = patch.columns + + def same_contents(self, old) -> bool: if old is None: return False @@ -397,102 +372,198 @@ def same_contents(self: T, old: Optional[T]) -> bool: @dataclass -class AnalysisNode(ParsedNode): +class InjectedCTE(dbtClassMixin, Replaceable): + """Used in CompiledNodes as part of ephemeral model processing""" + + id: str + sql: str + + +@dataclass +class CompiledNode(ParsedNode): + """Contains attributes necessary for SQL files and nodes with refs, sources, etc, + so all ManifestNodes except SeedNode.""" + + language: str = "sql" + refs: List[List[str]] = field(default_factory=list) + sources: List[List[str]] = field(default_factory=list) + metrics: List[List[str]] = field(default_factory=list) + depends_on: DependsOn = field(default_factory=DependsOn) + compiled_path: Optional[str] = None + compiled: bool = False + compiled_code: Optional[str] = None + extra_ctes_injected: bool = False + extra_ctes: List[InjectedCTE] = field(default_factory=list) + _pre_injected_sql: Optional[str] = None + + @property + def empty(self): + return not self.raw_code.strip() + + def set_cte(self, cte_id: str, sql: str): + """This is the equivalent of what self.extra_ctes[cte_id] = sql would + do if extra_ctes were an OrderedDict + """ + for cte in self.extra_ctes: + if cte.id == cte_id: + cte.sql = sql + break + else: + self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) + + def __post_serialize__(self, dct): + dct = super().__post_serialize__(dct) + if "_pre_injected_sql" in dct: + del dct["_pre_injected_sql"] + # Remove compiled attributes + if "compiled" in dct and dct["compiled"] is False: + del dct["compiled"] + del dct["extra_ctes_injected"] + del dct["extra_ctes"] + # "omit_none" means these might not be in the dictionary + if "compiled_code" in dct: + del dct["compiled_code"] + return dct + + @property + def depends_on_nodes(self): + return self.depends_on.nodes + + @property + def depends_on_macros(self): + return self.depends_on.macros + + +# ==================================== +# CompiledNode subclasses +# ==================================== + + +@dataclass +class AnalysisNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]}) @dataclass -class HookNode(ParsedNode): +class HookNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]}) index: Optional[int] = None @dataclass -class ModelNode(ParsedNode): +class ModelNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]}) # TODO: rm? @dataclass -class RPCNode(ParsedNode): +class RPCNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]}) @dataclass -class SqlNode(ParsedNode): +class SqlNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]}) -def same_seeds(first: ParsedNode, second: ParsedNode) -> bool: - # for seeds, we check the hashes. If the hashes are different types, - # no match. If the hashes are both the same 'path', log a warning and - # assume they are the same - # if the current checksum is a path, we want to log a warning. - result = first.checksum == second.checksum - - if first.checksum.name == "path": - msg: str - if second.checksum.name != "path": - warn_or_error( - SeedIncreased(package_name=first.package_name, name=first.name), node=first - ) - elif result: - warn_or_error( - SeedExceedsLimitSamePath(package_name=first.package_name, name=first.name), - node=first, - ) - elif not result: - warn_or_error( - SeedExceedsLimitAndPathChanged(package_name=first.package_name, name=first.name), - node=first, - ) - else: - warn_or_error( - SeedExceedsLimitChecksumChanged( - package_name=first.package_name, - name=first.name, - checksum_name=second.checksum.name, - ), - node=first, - ) - - return result +# ==================================== +# Seed node +# ==================================== @dataclass -class SeedNode(ParsedNode): +class SeedNode(ParsedNode): # No SQLDefaults! resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) config: SeedConfig = field(default_factory=SeedConfig) # seeds need the root_path because the contents are not loaded initially # and we need the root_path to load the seed later root_path: Optional[str] = None + def same_seeds(self, other: "SeedNode") -> bool: + # for seeds, we check the hashes. If the hashes are different types, + # no match. If the hashes are both the same 'path', log a warning and + # assume they are the same + # if the current checksum is a path, we want to log a warning. + result = self.checksum == other.checksum + + if self.checksum.name == "path": + msg: str + if other.checksum.name != "path": + warn_or_error( + SeedIncreased(package_name=self.package_name, name=self.name), node=self + ) + elif result: + warn_or_error( + SeedExceedsLimitSamePath(package_name=self.package_name, name=self.name), + node=self, + ) + elif not result: + warn_or_error( + SeedExceedsLimitAndPathChanged(package_name=self.package_name, name=self.name), + node=self, + ) + else: + warn_or_error( + SeedExceedsLimitChecksumChanged( + package_name=self.package_name, + name=self.name, + checksum_name=other.checksum.name, + ), + node=self, + ) + + return result + @property def empty(self): """Seeds are never empty""" return False - def same_body(self: T, other: T) -> bool: - return same_seeds(self, other) + def same_body(self, other) -> bool: + return self.same_seeds(other) + @property + def depends_on_nodes(self): + return [] -@dataclass -class TestMetadata(dbtClassMixin, Replaceable): - name: str - # kwargs are the args that are left in the test builder after - # removing configs. They are set from the test builder when - # the test node is created. - kwargs: Dict[str, Any] = field(default_factory=dict) - namespace: Optional[str] = None + @property + def depends_on_macros(self): + return [] + @property + def extra_ctes(self): + return [] -@dataclass -class HasTestMetadata(dbtClassMixin): - test_metadata: TestMetadata + @property + def extra_ctes_injected(self): + return False + + @property + def language(self): + return "sql" + + +# ==================================== +# Singular Test node +# ==================================== + + +class TestShouldStoreFailures: + @property + def should_store_failures(self): + if self.config.store_failures: + return self.config.store_failures + return flags.STORE_FAILURES + + @property + def is_relational(self): + if self.should_store_failures: + return True + return False @dataclass -class SingularTestNode(ParsedNode): +class SingularTestNode(TestShouldStoreFailures, CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) # Was not able to make mypy happy and keep the code working. We need to # refactor the various configs. @@ -503,8 +574,30 @@ def test_node_type(self): return "singular" +# ==================================== +# Generic Test node +# ==================================== + + @dataclass -class GenericTestNode(ParsedNode, HasTestMetadata): +class TestMetadata(dbtClassMixin, Replaceable): + name: str + # kwargs are the args that are left in the test builder after + # removing configs. They are set from the test builder when + # the test node is created. + kwargs: Dict[str, Any] = field(default_factory=dict) + namespace: Optional[str] = None + + +# This has to be separated out because it has no default and so +# has to be included as a superclass, not an attribute +@dataclass +class HasTestMetadata(dbtClassMixin): + test_metadata: TestMetadata + + +@dataclass +class GenericTestNode(TestShouldStoreFailures, CompiledNode, HasTestMetadata): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) column_name: Optional[str] = None file_key_name: Optional[str] = None @@ -523,54 +616,39 @@ def test_node_type(self): return "generic" +# ==================================== +# Snapshot node +# ==================================== + + @dataclass -class IntermediateSnapshotNode(ParsedNode): +class IntermediateSnapshotNode(CompiledNode): # at an intermediate stage in parsing, where we've built something better # than an unparsed node for rendering in parse mode, it's pretty possible # that we won't have critical snapshot-related information that is only # defined in config blocks. To fix that, we have an intermediate type that # uses a regular node config, which the snapshot parser will then convert - # into a full ParsedSnapshotNode after rendering. + # into a full ParsedSnapshotNode after rendering. Note: it currently does + # not work to set snapshot config in schema files because of the validation. resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) config: EmptySnapshotConfig = field(default_factory=EmptySnapshotConfig) @dataclass -class SnapshotNode(ParsedNode): +class SnapshotNode(CompiledNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) config: SnapshotConfig -@dataclass -class ParsedPatch(HasYamlMetadata, Replaceable): - name: str - description: str - meta: Dict[str, Any] - docs: Docs - config: Dict[str, Any] +# ==================================== +# Macro +# ==================================== -# The parsed node update is only the 'patch', not the test. The test became a -# regular parsed node. Note that description and columns must be present, but -# may be empty. @dataclass -class ParsedNodePatch(ParsedPatch): - columns: Dict[str, ColumnInfo] - - -@dataclass -class ParsedMacroPatch(ParsedPatch): - arguments: List[MacroArgument] = field(default_factory=list) - - -@dataclass -class Macro(UnparsedBaseNode, HasUniqueID): - name: str +class Macro(BaseNode): macro_sql: str resource_type: NodeType = field(metadata={"restrict": [NodeType.Macro]}) - # TODO: can macros even have tags? - tags: List[str] = field(default_factory=list) - # TODO: is this ever populated? depends_on: MacroDependsOn = field(default_factory=MacroDependsOn) description: str = "" meta: Dict[str, Any] = field(default_factory=dict) @@ -580,7 +658,7 @@ class Macro(UnparsedBaseNode, HasUniqueID): created_at: float = field(default_factory=lambda: time.time()) supported_languages: Optional[List[ModelLanguage]] = None - def patch(self, patch: ParsedMacroPatch): + def patch(self, patch: "ParsedMacroPatch"): self.patch_path: Optional[str] = patch.file_id self.description = patch.description self.created_at = time.time() @@ -595,11 +673,20 @@ def same_contents(self, other: Optional["Macro"]) -> bool: # same name/package is its content return self.macro_sql == other.macro_sql + @property + def depends_on_macros(self): + return self.depends_on.macros + + +# ==================================== +# Documentation node +# ==================================== + @dataclass -class Documentation(UnparsedDocumentation, HasUniqueID): - name: str +class Documentation(BaseNode): block_contents: str + resource_type: NodeType = field(metadata={"restrict": [NodeType.Documentation]}) @property def search_name(self): @@ -613,6 +700,11 @@ def same_contents(self, other: Optional["Documentation"]) -> bool: return self.block_contents == other.block_contents +# ==================================== +# Source node +# ==================================== + + def normalize_test(testdef: TestDef) -> Dict[str, Any]: if isinstance(testdef, str): return {testdef: {}} @@ -621,11 +713,12 @@ def normalize_test(testdef: TestDef) -> Dict[str, Any]: @dataclass -class UnpatchedSourceDefinition(UnparsedBaseNode, HasUniqueID, HasFqn): +class UnpatchedSourceDefinition(BaseNode): source: UnparsedSourceDefinition table: UnparsedSourceTableDefinition + fqn: List[str] resource_type: NodeType = field(metadata={"restrict": [NodeType.Source]}) - patch_path: Optional[Path] = None + patch_path: Optional[str] = None def get_full_source_name(self): return f"{self.source.name}_{self.table.name}" @@ -633,10 +726,6 @@ def get_full_source_name(self): def get_source_representation(self): return f'source("{self.source.name}", "{self.table.name}")' - @property - def name(self) -> str: - return self.get_full_source_name() - @property def quote_columns(self) -> Optional[bool]: result = None @@ -668,13 +757,7 @@ def tests(self) -> List[TestDef]: @dataclass -class ParsedSourceMandatory( - UnparsedBaseNode, - HasUniqueID, - HasRelationMetadata, - HasFqn, -): - name: str +class ParsedSourceMandatory(GraphNode, HasRelationMetadata): source_name: str source_description: str loader: str @@ -694,7 +777,7 @@ class SourceDefinition(NodeInfoMixin, ParsedSourceMandatory): source_meta: Dict[str, Any] = field(default_factory=dict) tags: List[str] = field(default_factory=list) config: SourceConfig = field(default_factory=SourceConfig) - patch_path: Optional[Path] = None + patch_path: Optional[str] = None unrendered_config: Dict[str, Any] = field(default_factory=dict) relation_name: Optional[str] = None created_at: float = field(default_factory=lambda: time.time()) @@ -797,12 +880,16 @@ def search_name(self): return f"{self.source_name}.{self.name}" +# ==================================== +# Exposure node +# ==================================== + + @dataclass -class Exposure(UnparsedBaseNode, HasUniqueID, HasFqn): - name: str +class Exposure(GraphNode): type: ExposureType owner: ExposureOwner - resource_type: NodeType = NodeType.Exposure + resource_type: NodeType = field(metadata={"restrict": [NodeType.Exposure]}) description: str = "" label: Optional[str] = None maturity: Optional[MaturityType] = None @@ -872,6 +959,11 @@ def same_contents(self, old: Optional["Exposure"]) -> bool: ) +# ==================================== +# Metric node +# ==================================== + + @dataclass class MetricReference(dbtClassMixin, Replaceable): sql: Optional[Union[str, int]] @@ -879,7 +971,7 @@ class MetricReference(dbtClassMixin, Replaceable): @dataclass -class Metric(UnparsedBaseNode, HasUniqueID, HasFqn): +class Metric(GraphNode): name: str description: str label: str @@ -889,10 +981,10 @@ class Metric(UnparsedBaseNode, HasUniqueID, HasFqn): filters: List[MetricFilter] time_grains: List[str] dimensions: List[str] + resource_type: NodeType = field(metadata={"restrict": [NodeType.Metric]}) window: Optional[MetricTime] = None model: Optional[str] = None model_unique_id: Optional[str] = None - resource_type: NodeType = NodeType.Metric meta: Dict[str, Any] = field(default_factory=dict) tags: List[str] = field(default_factory=list) config: MetricConfig = field(default_factory=MetricConfig) @@ -969,7 +1061,41 @@ def same_contents(self, old: Optional["Metric"]) -> bool: ) -ManifestNode = Union[ +# ==================================== +# Patches +# ==================================== + + +@dataclass +class ParsedPatch(HasYamlMetadata, Replaceable): + name: str + description: str + meta: Dict[str, Any] + docs: Docs + config: Dict[str, Any] + + +# The parsed node update is only the 'patch', not the test. The test became a +# regular parsed node. Note that description and columns must be present, but +# may be empty. +@dataclass +class ParsedNodePatch(ParsedPatch): + columns: Dict[str, ColumnInfo] + + +@dataclass +class ParsedMacroPatch(ParsedPatch): + arguments: List[MacroArgument] = field(default_factory=list) + + +# ==================================== +# Node unions/categories +# ==================================== + + +# ManifestNode without SeedNode, which doesn't have the +# SQL related attributes +ManifestSQLNode = Union[ AnalysisNode, SingularTestNode, HookNode, @@ -977,29 +1103,32 @@ def same_contents(self, old: Optional["Metric"]) -> bool: RPCNode, SqlNode, GenericTestNode, - SeedNode, SnapshotNode, ] +# All SQL nodes plus SeedNode (csv files) +ManifestNode = Union[ + ManifestSQLNode, + SeedNode, +] + ResultNode = Union[ ManifestNode, SourceDefinition, ] +# All nodes that can be in the DAG GraphMemberNode = Union[ ResultNode, Exposure, Metric, ] - +# All "nodes" (or node-like objects) in this file Resource = Union[ + GraphMemberNode, Documentation, Macro, - ParsedNode, - Exposure, - Metric, - SourceDefinition, ] TestNode = Union[ diff --git a/core/dbt/contracts/util.py b/core/dbt/contracts/util.py index 354052b67e4..99f7a35c66d 100644 --- a/core/dbt/contracts/util.py +++ b/core/dbt/contracts/util.py @@ -237,18 +237,43 @@ def rename_sql_attr(node_content: dict) -> dict: return node_content +def upgrade_node_content(node_content): + rename_sql_attr(node_content) + if node_content["resource_type"] != "seed" and "root_path" in node_content: + del node_content["root_path"] + + +def upgrade_seed_content(node_content): + # Remove compilation related attributes + for attr_name in ( + "language", + "refs", + "sources", + "metrics", + "depends_on", + "compiled_path", + "compiled", + "compiled_code", + "extra_ctes_injected", + "extra_ctes", + "relation_name", + ): + if attr_name in node_content: + del node_content[attr_name] + + def upgrade_manifest_json(manifest: dict) -> dict: for node_content in manifest.get("nodes", {}).values(): - node_content = rename_sql_attr(node_content) - if node_content["resource_type"] != "seed" and "root_path" in node_content: - del node_content["root_path"] + upgrade_node_content(node_content) + if node_content["resource_type"] == "seed": + upgrade_seed_content(node_content) for disabled in manifest.get("disabled", {}).values(): # There can be multiple disabled nodes for the same unique_id # so make sure all the nodes get the attr renamed for node_content in disabled: - rename_sql_attr(node_content) - if node_content["resource_type"] != "seed" and "root_path" in node_content: - del node_content["root_path"] + upgrade_node_content(node_content) + if node_content["resource_type"] == "seed": + upgrade_seed_content(node_content) for metric_content in manifest.get("metrics", {}).values(): # handle attr renames + value translation ("expression" -> "derived") metric_content = rename_metric_attr(metric_content) @@ -266,6 +291,7 @@ def upgrade_manifest_json(manifest: dict) -> dict: for doc_content in manifest.get("docs", {}).values(): if "root_path" in doc_content: del doc_content["root_path"] + doc_content["resource_type"] = "doc" return manifest diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index f5f93cf5886..0a0cd04fe1d 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -2400,7 +2400,7 @@ def code(self): return "Z011" def message(self) -> str: - return "" + return f"{self.exc_info}" # We don't write "clean" events to the log, because the clean command diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 0ed8ac50b0a..ed91596712b 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -217,7 +217,7 @@ def expand_selection( if can_select_indirectly(node): # should we add it in directly? if indirect_selection == IndirectSelection.Eager or set( - node.depends_on.nodes + node.depends_on_nodes ) <= set(selected): direct_nodes.add(unique_id) # if not: @@ -241,7 +241,7 @@ def incorporate_indirect_nodes( for unique_id in indirect_nodes: if unique_id in self.manifest.nodes: node = self.manifest.nodes[unique_id] - if set(node.depends_on.nodes) <= set(selected): + if set(node.depends_on_nodes) <= set(selected): selected.add(unique_id) return selected diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py index f7044ecaf32..c77625649bc 100644 --- a/core/dbt/graph/selector_methods.py +++ b/core/dbt/graph/selector_methods.py @@ -9,7 +9,6 @@ from dbt.contracts.graph.manifest import Manifest, WritableManifest from dbt.contracts.graph.nodes import ( - HasTestMetadata, SingularTestNode, Exposure, Metric, @@ -377,8 +376,8 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu class TestNameSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: for node, real_node in self.parsed_nodes(included_nodes): - if isinstance(real_node, HasTestMetadata): - if real_node.test_metadata.name == selector: + if real_node.resource_type == NodeType.Test and hasattr(real_node, "test_metadata"): + if real_node.test_metadata.name == selector: # type: ignore[union-attr] yield node @@ -428,6 +427,9 @@ def _macros_modified(self) -> List[str]: return modified def recursively_check_macros_modified(self, node, visited_macros): + if not hasattr(node, "depends_on"): + return False + for macro_uid in node.depends_on.macros: if macro_uid in visited_macros: continue diff --git a/core/dbt/node_types.py b/core/dbt/node_types.py index a6fa5ff4f84..ec7517d2029 100644 --- a/core/dbt/node_types.py +++ b/core/dbt/node_types.py @@ -13,7 +13,7 @@ class NodeType(StrEnum): # TODO: rm? RPCCall = "rpc" SqlOperation = "sql operation" - Documentation = "docs block" + Documentation = "doc" Source = "source" Macro = "macro" Exposure = "exposure" diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 7fff4daebf4..bfcd3b20e14 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -16,7 +16,7 @@ from dbt.config import Project, RuntimeConfig from dbt.context.context_config import ContextConfig from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.nodes import HasUniqueID, ManifestNode +from dbt.contracts.graph.nodes import ManifestNode, BaseNode from dbt.contracts.graph.unparsed import UnparsedNode, Docs from dbt.exceptions import ParsingException, validator_error_message, InternalException from dbt import hooks @@ -26,8 +26,8 @@ # internally, the parser may store a less-restrictive type that will be # transformed into the final type. But it will have to be derived from # ParsedNode to be operable. -FinalValue = TypeVar("FinalValue", bound=HasUniqueID) -IntermediateValue = TypeVar("IntermediateValue", bound=HasUniqueID) +FinalValue = TypeVar("FinalValue", bound=BaseNode) +IntermediateValue = TypeVar("IntermediateValue", bound=BaseNode) IntermediateNode = TypeVar("IntermediateNode", bound=Any) FinalNode = TypeVar("FinalNode", bound=ManifestNode) @@ -253,12 +253,13 @@ def update_parsed_node_config_dict( self._mangle_hooks(final_config_dict) parsed_node.config = parsed_node.config.from_dict(final_config_dict) - def update_parsed_node_name( + def update_parsed_node_relation_names( self, parsed_node: IntermediateNode, config_dict: Dict[str, Any] ) -> None: self._update_node_database(parsed_node, config_dict) self._update_node_schema(parsed_node, config_dict) self._update_node_alias(parsed_node, config_dict) + self._update_node_relation_name(parsed_node) def update_parsed_node_config( self, @@ -317,7 +318,7 @@ def update_parsed_node_config( # parsed_node.config is what it would be if they did nothing self.update_parsed_node_config_dict(parsed_node, config_dict) # This updates the node database/schema/alias - self.update_parsed_node_name(parsed_node, config_dict) + self.update_parsed_node_relation_names(parsed_node, config_dict) # tests don't have hooks if parsed_node.resource_type == NodeType.Test: @@ -388,6 +389,19 @@ def parse_node(self, block: ConfiguredBlockType) -> FinalNode: self.add_result_node(block, result) return result + def _update_node_relation_name(self, node: ManifestNode): + # Seed and Snapshot nodes and Models that are not ephemeral, + # and TestNodes that store_failures. + # TestNodes do not get a relation_name without store failures + # because no schema is created. + if node.is_relational and not node.is_ephemeral_model: + adapter = get_adapter(self.root_project) + relation_cls = adapter.Relation + node.relation_name = str(relation_cls.create_from(self.root_project, node)) + else: + # Set it to None in case it changed with a config update + node.relation_name = None + @abc.abstractmethod def parse_file(self, file_block: FileBlock) -> None: pass diff --git a/core/dbt/parser/docs.py b/core/dbt/parser/docs.py index fb9b488276e..edc7f83acfc 100644 --- a/core/dbt/parser/docs.py +++ b/core/dbt/parser/docs.py @@ -23,9 +23,8 @@ def get_compiled_path(cls, block: FileBlock): return block.path.relative_path def generate_unique_id(self, resource_name: str, _: Optional[str] = None) -> str: - # because docs are in their own graph namespace, node type doesn't - # need to be part of the unique ID. - return "{}.{}".format(self.project.project_name, resource_name) + # For consistency, use the same format for doc unique_ids + return f"doc.{self.project.project_name}.{resource_name}" def parse_block(self, block: BlockContents) -> Iterable[Documentation]: unique_id = self.generate_unique_id(block.name) @@ -38,6 +37,7 @@ def parse_block(self, block: BlockContents) -> Iterable[Documentation]: unique_id=unique_id, name=block.name, block_contents=contents, + resource_type=NodeType.Documentation, ) return [doc] diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 21594a93318..2e284b43cfa 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -62,12 +62,13 @@ ) from dbt.contracts.graph.nodes import ( SourceDefinition, - ParsedNode, Macro, ColumnInfo, Exposure, Metric, + SeedNode, ManifestNode, + ResultNode, ) from dbt.contracts.util import Writable from dbt.exceptions import ( @@ -1040,7 +1041,7 @@ def _check_manifest(manifest: Manifest, config: RuntimeConfig) -> None: def _get_node_column(node, column_name): - """Given a ParsedNode, add some fields that might be missing. Return a + """Given a ManifestNode, add some fields that might be missing. Return a reference to the dict that refers to the given column, creating it if it doesn't yet exist. """ @@ -1053,7 +1054,7 @@ def _get_node_column(node, column_name): return column -DocsContextCallback = Callable[[Union[ParsedNode, SourceDefinition]], Dict[str, Any]] +DocsContextCallback = Callable[[ResultNode], Dict[str, Any]] # node and column descriptions @@ -1191,6 +1192,10 @@ def _process_metrics_for_node( node: Union[ManifestNode, Metric, Exposure], ): """Given a manifest and a node in that manifest, process its metrics""" + + if isinstance(node, SeedNode): + return + for metric in node.metrics: target_metric: Optional[Union[Disabled, Metric]] = None target_metric_name: str @@ -1232,6 +1237,10 @@ def _process_metrics_for_node( def _process_refs_for_node(manifest: Manifest, current_project: str, node: ManifestNode): """Given a manifest and a node in that manifest, process its refs""" + + if isinstance(node, SeedNode): + return + for ref in node.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None target_model_name: str @@ -1323,6 +1332,10 @@ def _process_sources_for_metric(manifest: Manifest, current_project: str, metric def _process_sources_for_node(manifest: Manifest, current_project: str, node: ManifestNode): + + if isinstance(node, SeedNode): + return + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in node.sources: target_source = manifest.resolve_source( diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 5756ed4ba02..831647d0322 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -731,6 +731,7 @@ def add_source_definitions(self, source: UnparsedSourceDefinition) -> None: unique_id=unique_id, resource_type=NodeType.Source, fqn=fqn, + name=f"{source.name}_{table.name}", ) self.manifest.add_source(self.yaml.file, source_def) @@ -1028,6 +1029,7 @@ def parse_exposure(self, unparsed: UnparsedExposure): ) parsed = Exposure( + resource_type=NodeType.Exposure, package_name=package_name, path=path, original_file_path=self.yaml.path.original_file_path, @@ -1131,6 +1133,7 @@ def parse_metric(self, unparsed: UnparsedMetric): ) parsed = Metric( + resource_type=NodeType.Metric, package_name=package_name, path=path, original_file_path=self.yaml.path.original_file_path, diff --git a/core/dbt/parser/seeds.py b/core/dbt/parser/seeds.py index 02c20df7cf5..23c77e1ed7c 100644 --- a/core/dbt/parser/seeds.py +++ b/core/dbt/parser/seeds.py @@ -9,6 +9,9 @@ class SeedParser(SimpleSQLParser[SeedNode]): def parse_from_dict(self, dct, validate=True) -> SeedNode: # seeds need the root_path because the contents are not loaded dct["root_path"] = self.project.project_root + if "language" in dct: + del dct["language"] + # raw_code is not currently used, but it might be in the future if validate: SeedNode.validate(dct) return SeedNode.from_dict(dct) diff --git a/core/dbt/parser/snapshots.py b/core/dbt/parser/snapshots.py index 754a0341e69..7fc46d1a05a 100644 --- a/core/dbt/parser/snapshots.py +++ b/core/dbt/parser/snapshots.py @@ -38,6 +38,8 @@ def set_snapshot_attributes(self, node): # the target schema must be set if we got here, so overwrite the node's # schema node.schema = node.config.target_schema + # We need to set relation_name again, since database/schema might have changed + self._update_node_relation_name(node) return node diff --git a/schemas/dbt/manifest/v8.json b/schemas/dbt/manifest/v8.json index 9bf00fdc7f2..d92dc46b79c 100644 --- a/schemas/dbt/manifest/v8.json +++ b/schemas/dbt/manifest/v8.json @@ -20,58 +20,31 @@ "additionalProperties": { "oneOf": [ { - "$ref": "#/definitions/CompiledAnalysisNode" + "$ref": "#/definitions/AnalysisNode" }, { - "$ref": "#/definitions/CompiledSingularTestNode" + "$ref": "#/definitions/SingularTestNode" }, { - "$ref": "#/definitions/CompiledModelNode" + "$ref": "#/definitions/HookNode" }, { - "$ref": "#/definitions/CompiledHookNode" + "$ref": "#/definitions/ModelNode" }, { - "$ref": "#/definitions/CompiledRPCNode" + "$ref": "#/definitions/RPCNode" }, { - "$ref": "#/definitions/CompiledSqlNode" + "$ref": "#/definitions/SqlNode" }, { - "$ref": "#/definitions/CompiledGenericTestNode" + "$ref": "#/definitions/GenericTestNode" }, { - "$ref": "#/definitions/CompiledSeedNode" + "$ref": "#/definitions/SnapshotNode" }, { - "$ref": "#/definitions/CompiledSnapshotNode" - }, - { - "$ref": "#/definitions/ParsedAnalysisNode" - }, - { - "$ref": "#/definitions/ParsedSingularTestNode" - }, - { - "$ref": "#/definitions/ParsedHookNode" - }, - { - "$ref": "#/definitions/ParsedModelNode" - }, - { - "$ref": "#/definitions/ParsedRPCNode" - }, - { - "$ref": "#/definitions/ParsedSqlNode" - }, - { - "$ref": "#/definitions/ParsedGenericTestNode" - }, - { - "$ref": "#/definitions/ParsedSeedNode" - }, - { - "$ref": "#/definitions/ParsedSnapshotNode" + "$ref": "#/definitions/SeedNode" } ] }, @@ -80,35 +53,35 @@ "sources": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/ParsedSourceDefinition" + "$ref": "#/definitions/SourceDefinition" }, "description": "The sources defined in the dbt project and its dependencies" }, "macros": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/ParsedMacro" + "$ref": "#/definitions/Macro" }, "description": "The macros defined in the dbt project and its dependencies" }, "docs": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/ParsedDocumentation" + "$ref": "#/definitions/Documentation" }, "description": "The docs defined in the dbt project and its dependencies" }, "exposures": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/ParsedExposure" + "$ref": "#/definitions/Exposure" }, "description": "The exposures defined in the dbt project and its dependencies" }, "metrics": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/ParsedMetric" + "$ref": "#/definitions/Metric" }, "description": "The metrics defined in the dbt project and its dependencies" }, @@ -125,61 +98,34 @@ "items": { "oneOf": [ { - "$ref": "#/definitions/CompiledAnalysisNode" - }, - { - "$ref": "#/definitions/CompiledSingularTestNode" - }, - { - "$ref": "#/definitions/CompiledModelNode" - }, - { - "$ref": "#/definitions/CompiledHookNode" - }, - { - "$ref": "#/definitions/CompiledRPCNode" - }, - { - "$ref": "#/definitions/CompiledSqlNode" - }, - { - "$ref": "#/definitions/CompiledGenericTestNode" - }, - { - "$ref": "#/definitions/CompiledSeedNode" + "$ref": "#/definitions/AnalysisNode" }, { - "$ref": "#/definitions/CompiledSnapshotNode" + "$ref": "#/definitions/SingularTestNode" }, { - "$ref": "#/definitions/ParsedAnalysisNode" + "$ref": "#/definitions/HookNode" }, { - "$ref": "#/definitions/ParsedSingularTestNode" + "$ref": "#/definitions/ModelNode" }, { - "$ref": "#/definitions/ParsedHookNode" + "$ref": "#/definitions/RPCNode" }, { - "$ref": "#/definitions/ParsedModelNode" + "$ref": "#/definitions/SqlNode" }, { - "$ref": "#/definitions/ParsedRPCNode" + "$ref": "#/definitions/GenericTestNode" }, { - "$ref": "#/definitions/ParsedSqlNode" + "$ref": "#/definitions/SnapshotNode" }, { - "$ref": "#/definitions/ParsedGenericTestNode" + "$ref": "#/definitions/SeedNode" }, { - "$ref": "#/definitions/ParsedSeedNode" - }, - { - "$ref": "#/definitions/ParsedSnapshotNode" - }, - { - "$ref": "#/definitions/ParsedSourceDefinition" + "$ref": "#/definitions/SourceDefinition" } ] } @@ -227,7 +173,7 @@ } }, "additionalProperties": false, - "description": "WritableManifest(metadata: dbt.contracts.graph.manifest.ManifestMetadata, nodes: Mapping[str, Union[dbt.contracts.graph.compiled.CompiledAnalysisNode, dbt.contracts.graph.compiled.CompiledSingularTestNode, dbt.contracts.graph.compiled.CompiledModelNode, dbt.contracts.graph.compiled.CompiledHookNode, dbt.contracts.graph.compiled.CompiledRPCNode, dbt.contracts.graph.compiled.CompiledSqlNode, dbt.contracts.graph.compiled.CompiledGenericTestNode, dbt.contracts.graph.compiled.CompiledSeedNode, dbt.contracts.graph.compiled.CompiledSnapshotNode, dbt.contracts.graph.parsed.ParsedAnalysisNode, dbt.contracts.graph.parsed.ParsedSingularTestNode, dbt.contracts.graph.parsed.ParsedHookNode, dbt.contracts.graph.parsed.ParsedModelNode, dbt.contracts.graph.parsed.ParsedRPCNode, dbt.contracts.graph.parsed.ParsedSqlNode, dbt.contracts.graph.parsed.ParsedGenericTestNode, dbt.contracts.graph.parsed.ParsedSeedNode, dbt.contracts.graph.parsed.ParsedSnapshotNode]], sources: Mapping[str, dbt.contracts.graph.parsed.ParsedSourceDefinition], macros: Mapping[str, dbt.contracts.graph.parsed.ParsedMacro], docs: Mapping[str, dbt.contracts.graph.parsed.ParsedDocumentation], exposures: Mapping[str, dbt.contracts.graph.parsed.ParsedExposure], metrics: Mapping[str, dbt.contracts.graph.parsed.ParsedMetric], selectors: Mapping[str, Any], disabled: Optional[Mapping[str, List[Union[dbt.contracts.graph.compiled.CompiledAnalysisNode, dbt.contracts.graph.compiled.CompiledSingularTestNode, dbt.contracts.graph.compiled.CompiledModelNode, dbt.contracts.graph.compiled.CompiledHookNode, dbt.contracts.graph.compiled.CompiledRPCNode, dbt.contracts.graph.compiled.CompiledSqlNode, dbt.contracts.graph.compiled.CompiledGenericTestNode, dbt.contracts.graph.compiled.CompiledSeedNode, dbt.contracts.graph.compiled.CompiledSnapshotNode, dbt.contracts.graph.parsed.ParsedAnalysisNode, dbt.contracts.graph.parsed.ParsedSingularTestNode, dbt.contracts.graph.parsed.ParsedHookNode, dbt.contracts.graph.parsed.ParsedModelNode, dbt.contracts.graph.parsed.ParsedRPCNode, dbt.contracts.graph.parsed.ParsedSqlNode, dbt.contracts.graph.parsed.ParsedGenericTestNode, dbt.contracts.graph.parsed.ParsedSeedNode, dbt.contracts.graph.parsed.ParsedSnapshotNode, dbt.contracts.graph.parsed.ParsedSourceDefinition]]]], parent_map: Optional[Dict[str, List[str]]], child_map: Optional[Dict[str, List[str]]])", + "description": "WritableManifest(metadata: dbt.contracts.graph.manifest.ManifestMetadata, nodes: Mapping[str, Union[dbt.contracts.graph.nodes.AnalysisNode, dbt.contracts.graph.nodes.SingularTestNode, dbt.contracts.graph.nodes.HookNode, dbt.contracts.graph.nodes.ModelNode, dbt.contracts.graph.nodes.RPCNode, dbt.contracts.graph.nodes.SqlNode, dbt.contracts.graph.nodes.GenericTestNode, dbt.contracts.graph.nodes.SnapshotNode, dbt.contracts.graph.nodes.SeedNode]], sources: Mapping[str, dbt.contracts.graph.nodes.SourceDefinition], macros: Mapping[str, dbt.contracts.graph.nodes.Macro], docs: Mapping[str, dbt.contracts.graph.nodes.Documentation], exposures: Mapping[str, dbt.contracts.graph.nodes.Exposure], metrics: Mapping[str, dbt.contracts.graph.nodes.Metric], selectors: Mapping[str, Any], disabled: Optional[Mapping[str, List[Union[dbt.contracts.graph.nodes.AnalysisNode, dbt.contracts.graph.nodes.SingularTestNode, dbt.contracts.graph.nodes.HookNode, dbt.contracts.graph.nodes.ModelNode, dbt.contracts.graph.nodes.RPCNode, dbt.contracts.graph.nodes.SqlNode, dbt.contracts.graph.nodes.GenericTestNode, dbt.contracts.graph.nodes.SnapshotNode, dbt.contracts.graph.nodes.SeedNode, dbt.contracts.graph.nodes.SourceDefinition]]]], parent_map: Optional[Dict[str, List[str]]], child_map: Optional[Dict[str, List[str]]])", "definitions": { "ManifestMetadata": { "type": "object", @@ -244,7 +190,7 @@ "generated_at": { "type": "string", "format": "date-time", - "default": "2022-11-30T05:36:16.443035Z" + "default": "2022-12-13T03:30:15.966964Z" }, "invocation_id": { "oneOf": [ @@ -255,7 +201,7 @@ "type": "null" } ], - "default": "ff51bdcd-689d-45b3-8dbb-5a8016382eef" + "default": "4f2b967b-7e02-46de-a7ea-268a05e3fab1" }, "env": { "type": "object", @@ -313,54 +259,36 @@ "additionalProperties": false, "description": "Metadata for the manifest." }, - "CompiledAnalysisNode": { + "AnalysisNode": { "type": "object", "required": [ - "compiled", + "database", "schema", - "fqn", - "unique_id", - "raw_code", - "language", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "resource_type", + "unique_id", + "fqn", "alias", "checksum" ], "properties": { - "compiled": { - "type": "boolean" - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { "type": "string" }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "schema": { "type": "string" }, - "raw_code": { + "name": { "type": "string" }, - "language": { - "type": "string" + "resource_type": { + "type": "string", + "enum": [ + "analysis" + ] }, "package_name": { "type": "string" @@ -371,14 +299,14 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "analysis" - ] + "fqn": { + "type": "array", + "items": { + "type": "string" + } }, "alias": { "type": "string" @@ -420,43 +348,6 @@ }, "default": [] }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, "description": { "type": "string", "default": "" @@ -489,16 +380,6 @@ } ] }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, "build_path": { "oneOf": [ { @@ -519,13 +400,13 @@ }, "created_at": { "type": "number", - "default": 1669786576.4447858 + "default": 1670902215.970579 }, "config_call_dict": { "type": "object", "default": {} }, - "compiled_code": { + "relation_name": { "oneOf": [ { "type": "string" @@ -535,18 +416,66 @@ } ] }, - "extra_ctes_injected": { - "type": "boolean", - "default": false + "raw_code": { + "type": "string", + "default": "" }, - "extra_ctes": { + "language": { + "type": "string", + "default": "sql" + }, + "refs": { "type": "array", "items": { - "$ref": "#/definitions/InjectedCTE" + "type": "array", + "items": { + "type": "string" + } }, "default": [] }, - "relation_name": { + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { "oneOf": [ { "type": "string" @@ -555,10 +484,21 @@ "type": "null" } ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] } }, "additionalProperties": false, - "description": "CompiledAnalysisNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + "description": "AnalysisNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" }, "FileHash": { "type": "object", @@ -781,28 +721,6 @@ "additionalProperties": false, "description": "Docs(show: bool = True, node_color: Optional[str] = None)" }, - "DependsOn": { - "type": "object", - "required": [], - "properties": { - "macros": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "nodes": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - } - }, - "additionalProperties": false, - "description": "DependsOn(macros: List[str] = , nodes: List[str] = )" - }, "ColumnInfo": { "type": "object", "required": [ @@ -849,7 +767,29 @@ } }, "additionalProperties": true, - "description": "ColumnInfo(name: str, description: str = '', meta: Dict[str, Any] = , data_type: Optional[str] = None, quote: Optional[bool] = None, tags: List[str] = , _extra: Dict[str, Any] = )" + "description": "Used in all ManifestNodes and SourceDefinition" + }, + "DependsOn": { + "type": "object", + "required": [], + "properties": { + "macros": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + }, + "nodes": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "additionalProperties": false, + "description": "DependsOn(macros: List[str] = , nodes: List[str] = )" }, "InjectedCTE": { "type": "object", @@ -866,56 +806,38 @@ } }, "additionalProperties": false, - "description": "InjectedCTE(id: str, sql: str)" + "description": "Used in CompiledNodes as part of ephemeral model processing" }, - "CompiledSingularTestNode": { + "SingularTestNode": { "type": "object", "required": [ - "compiled", + "database", "schema", - "fqn", - "unique_id", - "raw_code", - "language", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "resource_type", + "unique_id", + "fqn", "alias", "checksum" ], "properties": { - "compiled": { - "type": "boolean" - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { "type": "string" }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "schema": { "type": "string" }, - "raw_code": { + "name": { "type": "string" }, - "language": { - "type": "string" + "resource_type": { + "type": "string", + "enum": [ + "test" + ] }, "package_name": { "type": "string" @@ -926,14 +848,14 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "test" - ] + "fqn": { + "type": "array", + "items": { + "type": "string" + } }, "alias": { "type": "string" @@ -967,43 +889,6 @@ }, "default": [] }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, "description": { "type": "string", "default": "" @@ -1036,16 +921,6 @@ } ] }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, "build_path": { "oneOf": [ { @@ -1066,13 +941,13 @@ }, "created_at": { "type": "number", - "default": 1669786576.445715 + "default": 1670902215.973521 }, "config_call_dict": { "type": "object", "default": {} }, - "compiled_code": { + "relation_name": { "oneOf": [ { "type": "string" @@ -1082,18 +957,66 @@ } ] }, - "extra_ctes_injected": { - "type": "boolean", - "default": false + "raw_code": { + "type": "string", + "default": "" }, - "extra_ctes": { + "language": { + "type": "string", + "default": "sql" + }, + "refs": { "type": "array", "items": { - "$ref": "#/definitions/InjectedCTE" + "type": "array", + "items": { + "type": "string" + } }, "default": [] }, - "relation_name": { + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { "oneOf": [ { "type": "string" @@ -1102,10 +1025,21 @@ "type": "null" } ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] } }, "additionalProperties": false, - "description": "CompiledSingularTestNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + "description": "SingularTestNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" }, "TestConfig": { "type": "object", @@ -1219,54 +1153,36 @@ "additionalProperties": true, "description": "TestConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = 'dbt_test__audit', database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'test', severity: dbt.contracts.graph.model_config.Severity = 'ERROR', store_failures: Optional[bool] = None, where: Optional[str] = None, limit: Optional[int] = None, fail_calc: str = 'count(*)', warn_if: str = '!= 0', error_if: str = '!= 0')" }, - "CompiledModelNode": { + "HookNode": { "type": "object", "required": [ - "compiled", + "database", "schema", - "fqn", - "unique_id", - "raw_code", - "language", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "resource_type", + "unique_id", + "fqn", "alias", "checksum" ], "properties": { - "compiled": { - "type": "boolean" - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { "type": "string" }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "schema": { "type": "string" }, - "raw_code": { + "name": { "type": "string" }, - "language": { - "type": "string" + "resource_type": { + "type": "string", + "enum": [ + "operation" + ] }, "package_name": { "type": "string" @@ -1277,14 +1193,14 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "model" - ] + "fqn": { + "type": "array", + "items": { + "type": "string" + } }, "alias": { "type": "string" @@ -1326,43 +1242,6 @@ }, "default": [] }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, "description": { "type": "string", "default": "" @@ -1395,16 +1274,6 @@ } ] }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, "build_path": { "oneOf": [ { @@ -1425,12 +1294,81 @@ }, "created_at": { "type": "number", - "default": 1669786576.4462662 + "default": 1670902215.975156 }, "config_call_dict": { "type": "object", "default": {} }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "language": { + "type": "string", + "default": "sql" + }, + "refs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, "compiled_code": { "oneOf": [ { @@ -1452,10 +1390,10 @@ }, "default": [] }, - "relation_name": { + "index": { "oneOf": [ { - "type": "string" + "type": "integer" }, { "type": "null" @@ -1464,56 +1402,38 @@ } }, "additionalProperties": false, - "description": "CompiledModelNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + "description": "HookNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None, index: Optional[int] = None)" }, - "CompiledHookNode": { + "ModelNode": { "type": "object", "required": [ - "compiled", + "database", "schema", - "fqn", - "unique_id", - "raw_code", - "language", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "resource_type", + "unique_id", + "fqn", "alias", "checksum" ], "properties": { - "compiled": { - "type": "boolean" - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { "type": "string" }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "schema": { "type": "string" }, - "raw_code": { + "name": { "type": "string" }, - "language": { - "type": "string" + "resource_type": { + "type": "string", + "enum": [ + "model" + ] }, "package_name": { "type": "string" @@ -1524,14 +1444,14 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "operation" - ] + "fqn": { + "type": "array", + "items": { + "type": "string" + } }, "alias": { "type": "string" @@ -1573,43 +1493,6 @@ }, "default": [] }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, "description": { "type": "string", "default": "" @@ -1642,16 +1525,6 @@ } ] }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, "build_path": { "oneOf": [ { @@ -1672,13 +1545,13 @@ }, "created_at": { "type": "number", - "default": 1669786576.446837 + "default": 1670902215.976732 }, "config_call_dict": { "type": "object", "default": {} }, - "compiled_code": { + "relation_name": { "oneOf": [ { "type": "string" @@ -1688,18 +1561,52 @@ } ] }, - "extra_ctes_injected": { - "type": "boolean", - "default": false + "raw_code": { + "type": "string", + "default": "" }, - "extra_ctes": { + "language": { + "type": "string", + "default": "sql" + }, + "refs": { "type": "array", "items": { - "$ref": "#/definitions/InjectedCTE" + "type": "array", + "items": { + "type": "string" + } }, "default": [] }, - "relation_name": { + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { "oneOf": [ { "type": "string" @@ -1709,68 +1616,65 @@ } ] }, - "index": { + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { "oneOf": [ { - "type": "integer" + "type": "string" }, { "type": "null" } ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] } }, "additionalProperties": false, - "description": "CompiledHookNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None, index: Optional[int] = None)" + "description": "ModelNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" }, - "CompiledRPCNode": { + "RPCNode": { "type": "object", "required": [ - "compiled", + "database", "schema", - "fqn", - "unique_id", - "raw_code", - "language", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "resource_type", + "unique_id", + "fqn", "alias", "checksum" ], "properties": { - "compiled": { - "type": "boolean" - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] + "type": "string" }, "schema": { "type": "string" }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { - "type": "string" - }, - "raw_code": { + "name": { "type": "string" }, - "language": { - "type": "string" + "resource_type": { + "type": "string", + "enum": [ + "rpc" + ] }, "package_name": { "type": "string" @@ -1781,14 +1685,14 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "rpc" - ] + "fqn": { + "type": "array", + "items": { + "type": "string" + } }, "alias": { "type": "string" @@ -1830,43 +1734,6 @@ }, "default": [] }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, "description": { "type": "string", "default": "" @@ -1899,16 +1766,6 @@ } ] }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, "build_path": { "oneOf": [ { @@ -1929,13 +1786,13 @@ }, "created_at": { "type": "number", - "default": 1669786576.447436 + "default": 1670902215.978195 }, "config_call_dict": { "type": "object", "default": {} }, - "compiled_code": { + "relation_name": { "oneOf": [ { "type": "string" @@ -1945,18 +1802,66 @@ } ] }, - "extra_ctes_injected": { - "type": "boolean", - "default": false + "raw_code": { + "type": "string", + "default": "" }, - "extra_ctes": { + "language": { + "type": "string", + "default": "sql" + }, + "refs": { "type": "array", "items": { - "$ref": "#/definitions/InjectedCTE" + "type": "array", + "items": { + "type": "string" + } }, "default": [] }, - "relation_name": { + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { "oneOf": [ { "type": "string" @@ -1965,59 +1870,52 @@ "type": "null" } ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] } }, "additionalProperties": false, - "description": "CompiledRPCNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + "description": "RPCNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" }, - "CompiledSqlNode": { + "SqlNode": { "type": "object", "required": [ - "compiled", + "database", "schema", - "fqn", - "unique_id", - "raw_code", - "language", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "resource_type", + "unique_id", + "fqn", "alias", "checksum" ], "properties": { - "compiled": { - "type": "boolean" - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { "type": "string" }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "schema": { "type": "string" }, - "raw_code": { + "name": { "type": "string" }, - "language": { - "type": "string" + "resource_type": { + "type": "string", + "enum": [ + "sql operation" + ] }, "package_name": { "type": "string" @@ -2028,14 +1926,14 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "sql operation" - ] + "fqn": { + "type": "array", + "items": { + "type": "string" + } }, "alias": { "type": "string" @@ -2077,43 +1975,6 @@ }, "default": [] }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, "description": { "type": "string", "default": "" @@ -2146,16 +2007,6 @@ } ] }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, "build_path": { "oneOf": [ { @@ -2176,13 +2027,13 @@ }, "created_at": { "type": "number", - "default": 1669786576.448 + "default": 1670902215.979718 }, "config_call_dict": { "type": "object", "default": {} }, - "compiled_code": { + "relation_name": { "oneOf": [ { "type": "string" @@ -2192,18 +2043,66 @@ } ] }, - "extra_ctes_injected": { - "type": "boolean", - "default": false + "raw_code": { + "type": "string", + "default": "" }, - "extra_ctes": { + "language": { + "type": "string", + "default": "sql" + }, + "refs": { "type": "array", "items": { - "$ref": "#/definitions/InjectedCTE" + "type": "array", + "items": { + "type": "string" + } }, "default": [] }, - "relation_name": { + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { "oneOf": [ { "type": "string" @@ -2212,26 +2111,35 @@ "type": "null" } ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] } }, "additionalProperties": false, - "description": "CompiledSqlNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" + "description": "SqlNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" }, - "CompiledGenericTestNode": { + "GenericTestNode": { "type": "object", "required": [ "test_metadata", - "compiled", + "database", "schema", - "fqn", - "unique_id", - "raw_code", - "language", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "resource_type", + "unique_id", + "fqn", "alias", "checksum" ], @@ -2239,36 +2147,20 @@ "test_metadata": { "$ref": "#/definitions/TestMetadata" }, - "compiled": { - "type": "boolean" - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { "type": "string" }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "schema": { "type": "string" }, - "raw_code": { + "name": { "type": "string" }, - "language": { - "type": "string" + "resource_type": { + "type": "string", + "enum": [ + "test" + ] }, "package_name": { "type": "string" @@ -2279,14 +2171,14 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "test" - ] + "fqn": { + "type": "array", + "items": { + "type": "string" + } }, "alias": { "type": "string" @@ -2320,43 +2212,6 @@ }, "default": [] }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, "description": { "type": "string", "default": "" @@ -2389,16 +2244,6 @@ } ] }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, "build_path": { "oneOf": [ { @@ -2419,13 +2264,13 @@ }, "created_at": { "type": "number", - "default": 1669786576.448638 + "default": 1670902215.981434 }, "config_call_dict": { "type": "object", "default": {} }, - "compiled_code": { + "relation_name": { "oneOf": [ { "type": "string" @@ -2435,18 +2280,66 @@ } ] }, - "extra_ctes_injected": { - "type": "boolean", - "default": false + "raw_code": { + "type": "string", + "default": "" }, - "extra_ctes": { + "language": { + "type": "string", + "default": "sql" + }, + "refs": { "type": "array", "items": { - "$ref": "#/definitions/InjectedCTE" + "type": "array", + "items": { + "type": "string" + } }, "default": [] }, - "relation_name": { + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { "oneOf": [ { "type": "string" @@ -2456,6 +2349,17 @@ } ] }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] + }, "column_name": { "oneOf": [ { @@ -2478,7 +2382,7 @@ } }, "additionalProperties": false, - "description": "CompiledGenericTestNode(test_metadata: dbt.contracts.graph.parsed.TestMetadata, compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None, column_name: Optional[str] = None, file_key_name: Optional[str] = None)" + "description": "GenericTestNode(test_metadata: dbt.contracts.graph.nodes.TestMetadata, database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None, column_name: Optional[str] = None, file_key_name: Optional[str] = None)" }, "TestMetadata": { "type": "object", @@ -2507,54 +2411,37 @@ "additionalProperties": false, "description": "TestMetadata(name: str, kwargs: Dict[str, Any] = , namespace: Optional[str] = None)" }, - "CompiledSeedNode": { + "SnapshotNode": { "type": "object", "required": [ - "compiled", + "database", "schema", - "fqn", - "unique_id", - "raw_code", - "language", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "resource_type", + "unique_id", + "fqn", "alias", - "checksum" + "checksum", + "config" ], "properties": { - "compiled": { - "type": "boolean" - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] + "type": "string" }, "schema": { "type": "string" }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "name": { "type": "string" }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" + "resource_type": { + "type": "string", + "enum": [ + "snapshot" + ] }, "package_name": { "type": "string" @@ -2565,14 +2452,14 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "seed" - ] + "fqn": { + "type": "array", + "items": { + "type": "string" + } }, "alias": { "type": "string" @@ -2581,32 +2468,7 @@ "$ref": "#/definitions/FileHash" }, "config": { - "$ref": "#/definitions/SeedConfig", - "default": { - "enabled": true, - "alias": null, - "schema": null, - "database": null, - "tags": [], - "meta": {}, - "materialized": "seed", - "incremental_strategy": null, - "persist_docs": {}, - "quoting": {}, - "column_types": {}, - "full_refresh": null, - "unique_key": null, - "on_schema_change": "ignore", - "grants": {}, - "packages": [], - "docs": { - "show": true, - "node_color": null - }, - "quote_columns": null, - "post-hook": [], - "pre-hook": [] - } + "$ref": "#/definitions/SnapshotConfig" }, "tags": { "type": "array", @@ -2615,43 +2477,6 @@ }, "default": [] }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, "description": { "type": "string", "default": "" @@ -2684,16 +2509,6 @@ } ] }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, "build_path": { "oneOf": [ { @@ -2714,13 +2529,13 @@ }, "created_at": { "type": "number", - "default": 1669786576.449645 + "default": 1670902215.984685 }, "config_call_dict": { "type": "object", "default": {} }, - "compiled_code": { + "relation_name": { "oneOf": [ { "type": "string" @@ -2730,18 +2545,52 @@ } ] }, - "extra_ctes_injected": { - "type": "boolean", - "default": false + "raw_code": { + "type": "string", + "default": "" }, - "extra_ctes": { + "language": { + "type": "string", + "default": "sql" + }, + "refs": { "type": "array", "items": { - "$ref": "#/definitions/InjectedCTE" + "type": "array", + "items": { + "type": "string" + } }, "default": [] }, - "relation_name": { + "sources": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "metrics": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "string" + } + }, + "default": [] + }, + "depends_on": { + "$ref": "#/definitions/DependsOn", + "default": { + "macros": [], + "nodes": [] + } + }, + "compiled_path": { "oneOf": [ { "type": "string" @@ -2751,7 +2600,11 @@ } ] }, - "root_path": { + "compiled": { + "type": "boolean", + "default": false + }, + "compiled_code": { "oneOf": [ { "type": "string" @@ -2760,12 +2613,23 @@ "type": "null" } ] + }, + "extra_ctes_injected": { + "type": "boolean", + "default": false + }, + "extra_ctes": { + "type": "array", + "items": { + "$ref": "#/definitions/InjectedCTE" + }, + "default": [] } }, "additionalProperties": false, - "description": "CompiledSeedNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SeedConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None, root_path: Optional[str] = None)" + "description": "SnapshotNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SnapshotConfig, _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', language: str = 'sql', refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , compiled_path: Optional[str] = None, compiled: bool = False, compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.nodes.InjectedCTE] = , _pre_injected_sql: Optional[str] = None)" }, - "SeedConfig": { + "SnapshotConfig": { "type": "object", "required": [], "properties": { @@ -2823,7 +2687,7 @@ }, "materialized": { "type": "string", - "default": "seed" + "default": "snapshot" }, "incremental_strategy": { "oneOf": [ @@ -2876,12 +2740,6 @@ { "type": "string" }, - { - "type": "array", - "items": { - "type": "string" - } - }, { "type": "null" } @@ -2916,10 +2774,56 @@ "node_color": null } }, - "quote_columns": { + "strategy": { "oneOf": [ { - "type": "boolean" + "type": "string" + }, + { + "type": "null" + } + ] + }, + "target_schema": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "target_database": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "updated_at": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "check_cols": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } }, { "type": "null" @@ -2928,56 +2832,38 @@ } }, "additionalProperties": true, - "description": "SeedConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'seed', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Union[str, List[str], NoneType] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , quote_columns: Optional[bool] = None)" + "description": "SnapshotConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'snapshot', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Optional[str] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , strategy: Optional[str] = None, target_schema: Optional[str] = None, target_database: Optional[str] = None, updated_at: Optional[str] = None, check_cols: Union[str, List[str], NoneType] = None)" }, - "CompiledSnapshotNode": { + "SeedNode": { "type": "object", "required": [ - "compiled", + "database", "schema", - "fqn", - "unique_id", - "raw_code", - "language", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "resource_type", + "unique_id", + "fqn", "alias", "checksum" ], "properties": { - "compiled": { - "type": "boolean" - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] + "type": "string" }, "schema": { "type": "string" }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "name": { "type": "string" }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" + "resource_type": { + "type": "string", + "enum": [ + "seed" + ] }, "package_name": { "type": "string" @@ -2988,223 +2874,7 @@ "original_file_path": { "type": "string" }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "snapshot" - ] - }, - "alias": { - "type": "string" - }, - "checksum": { - "$ref": "#/definitions/FileHash" - }, - "config": { - "$ref": "#/definitions/NodeConfig", - "default": { - "enabled": true, - "alias": null, - "schema": null, - "database": null, - "tags": [], - "meta": {}, - "materialized": "view", - "incremental_strategy": null, - "persist_docs": {}, - "quoting": {}, - "column_types": {}, - "full_refresh": null, - "unique_key": null, - "on_schema_change": "ignore", - "grants": {}, - "packages": [], - "docs": { - "show": true, - "node_color": null - }, - "post-hook": [], - "pre-hook": [] - } - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, - "description": { - "type": "string", - "default": "" - }, - "columns": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ColumnInfo" - }, - "default": {} - }, - "meta": { - "type": "object", - "default": {} - }, - "docs": { - "$ref": "#/definitions/Docs", - "default": { - "show": true, - "node_color": null - } - }, - "patch_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "build_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "deferred": { - "type": "boolean", - "default": false - }, - "unrendered_config": { - "type": "object", - "default": {} - }, - "created_at": { - "type": "number", - "default": 1669786576.450196 - }, - "config_call_dict": { - "type": "object", - "default": {} - }, - "compiled_code": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "extra_ctes_injected": { - "type": "boolean", - "default": false - }, - "extra_ctes": { - "type": "array", - "items": { - "$ref": "#/definitions/InjectedCTE" - }, - "default": [] - }, - "relation_name": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false, - "description": "CompiledSnapshotNode(compiled: bool, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , compiled_code: Optional[str] = None, extra_ctes_injected: bool = False, extra_ctes: List[dbt.contracts.graph.compiled.InjectedCTE] = , relation_name: Optional[str] = None, _pre_injected_sql: Optional[str] = None)" - }, - "ParsedAnalysisNode": { - "type": "object", - "required": [ - "schema", - "fqn", - "unique_id", - "raw_code", - "language", - "package_name", - "path", - "original_file_path", - "name", - "resource_type", - "alias", - "checksum" - ], - "properties": { - "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { + "unique_id": { "type": "string" }, "fqn": { @@ -3213,33 +2883,6 @@ "type": "string" } }, - "unique_id": { - "type": "string" - }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" - }, - "package_name": { - "type": "string" - }, - "path": { - "type": "string" - }, - "original_file_path": { - "type": "string" - }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "analysis" - ] - }, "alias": { "type": "string" }, @@ -3247,7 +2890,7 @@ "$ref": "#/definitions/FileHash" }, "config": { - "$ref": "#/definitions/NodeConfig", + "$ref": "#/definitions/SeedConfig", "default": { "enabled": true, "alias": null, @@ -3255,7 +2898,7 @@ "database": null, "tags": [], "meta": {}, - "materialized": "view", + "materialized": "seed", "incremental_strategy": null, "persist_docs": {}, "quoting": {}, @@ -3269,6 +2912,7 @@ "show": true, "node_color": null }, + "quote_columns": null, "post-hook": [], "pre-hook": [] } @@ -3276,1748 +2920,9 @@ "tags": { "type": "array", "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, - "description": { - "type": "string", - "default": "" - }, - "columns": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ColumnInfo" - }, - "default": {} - }, - "meta": { - "type": "object", - "default": {} - }, - "docs": { - "$ref": "#/definitions/Docs", - "default": { - "show": true, - "node_color": null - } - }, - "patch_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "build_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "deferred": { - "type": "boolean", - "default": false - }, - "unrendered_config": { - "type": "object", - "default": {} - }, - "created_at": { - "type": "number", - "default": 1669786576.450721 - }, - "config_call_dict": { - "type": "object", - "default": {} - } - }, - "additionalProperties": false, - "description": "ParsedAnalysisNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" - }, - "ParsedSingularTestNode": { - "type": "object", - "required": [ - "schema", - "fqn", - "unique_id", - "raw_code", - "language", - "package_name", - "path", - "original_file_path", - "name", - "resource_type", - "alias", - "checksum" - ], - "properties": { - "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { - "type": "string" - }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { - "type": "string" - }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" - }, - "package_name": { - "type": "string" - }, - "path": { - "type": "string" - }, - "original_file_path": { - "type": "string" - }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "test" - ] - }, - "alias": { - "type": "string" - }, - "checksum": { - "$ref": "#/definitions/FileHash" - }, - "config": { - "$ref": "#/definitions/TestConfig", - "default": { - "enabled": true, - "alias": null, - "schema": "dbt_test__audit", - "database": null, - "tags": [], - "meta": {}, - "materialized": "test", - "severity": "ERROR", - "store_failures": null, - "where": null, - "limit": null, - "fail_calc": "count(*)", - "warn_if": "!= 0", - "error_if": "!= 0" - } - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, - "description": { - "type": "string", - "default": "" - }, - "columns": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ColumnInfo" - }, - "default": {} - }, - "meta": { - "type": "object", - "default": {} - }, - "docs": { - "$ref": "#/definitions/Docs", - "default": { - "show": true, - "node_color": null - } - }, - "patch_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "build_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "deferred": { - "type": "boolean", - "default": false - }, - "unrendered_config": { - "type": "object", - "default": {} - }, - "created_at": { - "type": "number", - "default": 1669786576.451207 - }, - "config_call_dict": { - "type": "object", - "default": {} - } - }, - "additionalProperties": false, - "description": "ParsedSingularTestNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" - }, - "ParsedHookNode": { - "type": "object", - "required": [ - "schema", - "fqn", - "unique_id", - "raw_code", - "language", - "package_name", - "path", - "original_file_path", - "name", - "resource_type", - "alias", - "checksum" - ], - "properties": { - "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { - "type": "string" - }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { - "type": "string" - }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" - }, - "package_name": { - "type": "string" - }, - "path": { - "type": "string" - }, - "original_file_path": { - "type": "string" - }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "operation" - ] - }, - "alias": { - "type": "string" - }, - "checksum": { - "$ref": "#/definitions/FileHash" - }, - "config": { - "$ref": "#/definitions/NodeConfig", - "default": { - "enabled": true, - "alias": null, - "schema": null, - "database": null, - "tags": [], - "meta": {}, - "materialized": "view", - "incremental_strategy": null, - "persist_docs": {}, - "quoting": {}, - "column_types": {}, - "full_refresh": null, - "unique_key": null, - "on_schema_change": "ignore", - "grants": {}, - "packages": [], - "docs": { - "show": true, - "node_color": null - }, - "post-hook": [], - "pre-hook": [] - } - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, - "description": { - "type": "string", - "default": "" - }, - "columns": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ColumnInfo" - }, - "default": {} - }, - "meta": { - "type": "object", - "default": {} - }, - "docs": { - "$ref": "#/definitions/Docs", - "default": { - "show": true, - "node_color": null - } - }, - "patch_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "build_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "deferred": { - "type": "boolean", - "default": false - }, - "unrendered_config": { - "type": "object", - "default": {} - }, - "created_at": { - "type": "number", - "default": 1669786576.451676 - }, - "config_call_dict": { - "type": "object", - "default": {} - }, - "index": { - "oneOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false, - "description": "ParsedHookNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , index: Optional[int] = None)" - }, - "ParsedModelNode": { - "type": "object", - "required": [ - "schema", - "fqn", - "unique_id", - "raw_code", - "language", - "package_name", - "path", - "original_file_path", - "name", - "resource_type", - "alias", - "checksum" - ], - "properties": { - "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { - "type": "string" - }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { - "type": "string" - }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" - }, - "package_name": { - "type": "string" - }, - "path": { - "type": "string" - }, - "original_file_path": { - "type": "string" - }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "model" - ] - }, - "alias": { - "type": "string" - }, - "checksum": { - "$ref": "#/definitions/FileHash" - }, - "config": { - "$ref": "#/definitions/NodeConfig", - "default": { - "enabled": true, - "alias": null, - "schema": null, - "database": null, - "tags": [], - "meta": {}, - "materialized": "view", - "incremental_strategy": null, - "persist_docs": {}, - "quoting": {}, - "column_types": {}, - "full_refresh": null, - "unique_key": null, - "on_schema_change": "ignore", - "grants": {}, - "packages": [], - "docs": { - "show": true, - "node_color": null - }, - "post-hook": [], - "pre-hook": [] - } - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, - "description": { - "type": "string", - "default": "" - }, - "columns": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ColumnInfo" - }, - "default": {} - }, - "meta": { - "type": "object", - "default": {} - }, - "docs": { - "$ref": "#/definitions/Docs", - "default": { - "show": true, - "node_color": null - } - }, - "patch_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "build_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "deferred": { - "type": "boolean", - "default": false - }, - "unrendered_config": { - "type": "object", - "default": {} - }, - "created_at": { - "type": "number", - "default": 1669786576.452158 - }, - "config_call_dict": { - "type": "object", - "default": {} - } - }, - "additionalProperties": false, - "description": "ParsedModelNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" - }, - "ParsedRPCNode": { - "type": "object", - "required": [ - "schema", - "fqn", - "unique_id", - "raw_code", - "language", - "package_name", - "path", - "original_file_path", - "name", - "resource_type", - "alias", - "checksum" - ], - "properties": { - "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { - "type": "string" - }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { - "type": "string" - }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" - }, - "package_name": { - "type": "string" - }, - "path": { - "type": "string" - }, - "original_file_path": { - "type": "string" - }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "rpc" - ] - }, - "alias": { - "type": "string" - }, - "checksum": { - "$ref": "#/definitions/FileHash" - }, - "config": { - "$ref": "#/definitions/NodeConfig", - "default": { - "enabled": true, - "alias": null, - "schema": null, - "database": null, - "tags": [], - "meta": {}, - "materialized": "view", - "incremental_strategy": null, - "persist_docs": {}, - "quoting": {}, - "column_types": {}, - "full_refresh": null, - "unique_key": null, - "on_schema_change": "ignore", - "grants": {}, - "packages": [], - "docs": { - "show": true, - "node_color": null - }, - "post-hook": [], - "pre-hook": [] - } - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, - "description": { - "type": "string", - "default": "" - }, - "columns": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ColumnInfo" - }, - "default": {} - }, - "meta": { - "type": "object", - "default": {} - }, - "docs": { - "$ref": "#/definitions/Docs", - "default": { - "show": true, - "node_color": null - } - }, - "patch_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "build_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "deferred": { - "type": "boolean", - "default": false - }, - "unrendered_config": { - "type": "object", - "default": {} - }, - "created_at": { - "type": "number", - "default": 1669786576.452618 - }, - "config_call_dict": { - "type": "object", - "default": {} - } - }, - "additionalProperties": false, - "description": "ParsedRPCNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" - }, - "ParsedSqlNode": { - "type": "object", - "required": [ - "schema", - "fqn", - "unique_id", - "raw_code", - "language", - "package_name", - "path", - "original_file_path", - "name", - "resource_type", - "alias", - "checksum" - ], - "properties": { - "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { - "type": "string" - }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { - "type": "string" - }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" - }, - "package_name": { - "type": "string" - }, - "path": { - "type": "string" - }, - "original_file_path": { - "type": "string" - }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "sql operation" - ] - }, - "alias": { - "type": "string" - }, - "checksum": { - "$ref": "#/definitions/FileHash" - }, - "config": { - "$ref": "#/definitions/NodeConfig", - "default": { - "enabled": true, - "alias": null, - "schema": null, - "database": null, - "tags": [], - "meta": {}, - "materialized": "view", - "incremental_strategy": null, - "persist_docs": {}, - "quoting": {}, - "column_types": {}, - "full_refresh": null, - "unique_key": null, - "on_schema_change": "ignore", - "grants": {}, - "packages": [], - "docs": { - "show": true, - "node_color": null - }, - "post-hook": [], - "pre-hook": [] - } - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, - "description": { - "type": "string", - "default": "" - }, - "columns": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ColumnInfo" - }, - "default": {} - }, - "meta": { - "type": "object", - "default": {} - }, - "docs": { - "$ref": "#/definitions/Docs", - "default": { - "show": true, - "node_color": null - } - }, - "patch_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "build_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "deferred": { - "type": "boolean", - "default": false - }, - "unrendered_config": { - "type": "object", - "default": {} - }, - "created_at": { - "type": "number", - "default": 1669786576.453086 - }, - "config_call_dict": { - "type": "object", - "default": {} - } - }, - "additionalProperties": false, - "description": "ParsedSqlNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.NodeConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" - }, - "ParsedGenericTestNode": { - "type": "object", - "required": [ - "test_metadata", - "schema", - "fqn", - "unique_id", - "raw_code", - "language", - "package_name", - "path", - "original_file_path", - "name", - "resource_type", - "alias", - "checksum" - ], - "properties": { - "test_metadata": { - "$ref": "#/definitions/TestMetadata" - }, - "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { - "type": "string" - }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { - "type": "string" - }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" - }, - "package_name": { - "type": "string" - }, - "path": { - "type": "string" - }, - "original_file_path": { - "type": "string" - }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "test" - ] - }, - "alias": { - "type": "string" - }, - "checksum": { - "$ref": "#/definitions/FileHash" - }, - "config": { - "$ref": "#/definitions/TestConfig", - "default": { - "enabled": true, - "alias": null, - "schema": "dbt_test__audit", - "database": null, - "tags": [], - "meta": {}, - "materialized": "test", - "severity": "ERROR", - "store_failures": null, - "where": null, - "limit": null, - "fail_calc": "count(*)", - "warn_if": "!= 0", - "error_if": "!= 0" - } - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, - "description": { - "type": "string", - "default": "" - }, - "columns": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ColumnInfo" - }, - "default": {} - }, - "meta": { - "type": "object", - "default": {} - }, - "docs": { - "$ref": "#/definitions/Docs", - "default": { - "show": true, - "node_color": null - } - }, - "patch_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "build_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "deferred": { - "type": "boolean", - "default": false - }, - "unrendered_config": { - "type": "object", - "default": {} - }, - "created_at": { - "type": "number", - "default": 1669786576.4535701 - }, - "config_call_dict": { - "type": "object", - "default": {} - }, - "column_name": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "file_key_name": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false, - "description": "ParsedGenericTestNode(test_metadata: dbt.contracts.graph.parsed.TestMetadata, database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.TestConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , column_name: Optional[str] = None, file_key_name: Optional[str] = None)" - }, - "ParsedSeedNode": { - "type": "object", - "required": [ - "schema", - "fqn", - "unique_id", - "raw_code", - "language", - "package_name", - "path", - "original_file_path", - "name", - "resource_type", - "alias", - "checksum" - ], - "properties": { - "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { - "type": "string" - }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { - "type": "string" - }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" - }, - "package_name": { - "type": "string" - }, - "path": { - "type": "string" - }, - "original_file_path": { - "type": "string" - }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "seed" - ] - }, - "alias": { - "type": "string" - }, - "checksum": { - "$ref": "#/definitions/FileHash" - }, - "config": { - "$ref": "#/definitions/SeedConfig", - "default": { - "enabled": true, - "alias": null, - "schema": null, - "database": null, - "tags": [], - "meta": {}, - "materialized": "seed", - "incremental_strategy": null, - "persist_docs": {}, - "quoting": {}, - "column_types": {}, - "full_refresh": null, - "unique_key": null, - "on_schema_change": "ignore", - "grants": {}, - "packages": [], - "docs": { - "show": true, - "node_color": null - }, - "quote_columns": null, - "post-hook": [], - "pre-hook": [] - } - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } - }, - "description": { - "type": "string", - "default": "" - }, - "columns": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ColumnInfo" - }, - "default": {} - }, - "meta": { - "type": "object", - "default": {} - }, - "docs": { - "$ref": "#/definitions/Docs", - "default": { - "show": true, - "node_color": null - } - }, - "patch_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "build_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "deferred": { - "type": "boolean", - "default": false - }, - "unrendered_config": { - "type": "object", - "default": {} - }, - "created_at": { - "type": "number", - "default": 1669786576.454068 - }, - "config_call_dict": { - "type": "object", - "default": {} - }, - "root_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false, - "description": "ParsedSeedNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SeedConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , root_path: Optional[str] = None)" - }, - "ParsedSnapshotNode": { - "type": "object", - "required": [ - "schema", - "fqn", - "unique_id", - "raw_code", - "language", - "package_name", - "path", - "original_file_path", - "name", - "resource_type", - "alias", - "checksum", - "config" - ], - "properties": { - "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "schema": { - "type": "string" - }, - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { - "type": "string" - }, - "raw_code": { - "type": "string" - }, - "language": { - "type": "string" - }, - "package_name": { - "type": "string" - }, - "path": { - "type": "string" - }, - "original_file_path": { - "type": "string" - }, - "name": { - "type": "string" - }, - "resource_type": { - "type": "string", - "enum": [ - "snapshot" - ] - }, - "alias": { - "type": "string" - }, - "checksum": { - "$ref": "#/definitions/FileHash" - }, - "config": { - "$ref": "#/definitions/SnapshotConfig" - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, - "refs": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "sources": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "metrics": { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "string" - } - }, - "default": [] - }, - "depends_on": { - "$ref": "#/definitions/DependsOn", - "default": { - "macros": [], - "nodes": [] - } + "type": "string" + }, + "default": [] }, "description": { "type": "string", @@ -5051,16 +2956,6 @@ } ] }, - "compiled_path": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, "build_path": { "oneOf": [ { @@ -5081,17 +2976,41 @@ }, "created_at": { "type": "number", - "default": 1669786576.454986 + "default": 1670902215.987447 }, "config_call_dict": { "type": "object", "default": {} + }, + "relation_name": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "raw_code": { + "type": "string", + "default": "" + }, + "root_path": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] } }, "additionalProperties": false, - "description": "ParsedSnapshotNode(database: Optional[str], schema: str, fqn: List[str], unique_id: str, raw_code: str, language: str, package_name: str, path: str, original_file_path: str, name: str, resource_type: dbt.node_types.NodeType, alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SnapshotConfig, _event_status: Dict[str, Any] = , tags: List[str] = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, compiled_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = )" + "description": "SeedNode(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], alias: str, checksum: dbt.contracts.files.FileHash, config: dbt.contracts.graph.model_config.SeedConfig = , _event_status: Dict[str, Any] = , tags: List[str] = , description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, build_path: Optional[str] = None, deferred: bool = False, unrendered_config: Dict[str, Any] = , created_at: float = , config_call_dict: Dict[str, Any] = , relation_name: Optional[str] = None, raw_code: str = '', root_path: Optional[str] = None)" }, - "SnapshotConfig": { + "SeedConfig": { "type": "object", "required": [], "properties": { @@ -5149,7 +3068,7 @@ }, "materialized": { "type": "string", - "default": "snapshot" + "default": "seed" }, "incremental_strategy": { "oneOf": [ @@ -5202,6 +3121,12 @@ { "type": "string" }, + { + "type": "array", + "items": { + "type": "string" + } + }, { "type": "null" } @@ -5236,56 +3161,10 @@ "node_color": null } }, - "strategy": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "target_schema": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "target_database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "updated_at": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] - }, - "check_cols": { + "quote_columns": { "oneOf": [ { - "type": "string" - }, - { - "type": "array", - "items": { - "type": "string" - } + "type": "boolean" }, { "type": "null" @@ -5294,47 +3173,41 @@ } }, "additionalProperties": true, - "description": "SnapshotConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'snapshot', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Optional[str] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , strategy: Optional[str] = None, target_schema: Optional[str] = None, target_database: Optional[str] = None, updated_at: Optional[str] = None, check_cols: Union[str, List[str], NoneType] = None)" + "description": "SeedConfig(_extra: Dict[str, Any] = , enabled: bool = True, alias: Optional[str] = None, schema: Optional[str] = None, database: Optional[str] = None, tags: Union[List[str], str] = , meta: Dict[str, Any] = , materialized: str = 'seed', incremental_strategy: Optional[str] = None, persist_docs: Dict[str, Any] = , post_hook: List[dbt.contracts.graph.model_config.Hook] = , pre_hook: List[dbt.contracts.graph.model_config.Hook] = , quoting: Dict[str, Any] = , column_types: Dict[str, Any] = , full_refresh: Optional[bool] = None, unique_key: Union[str, List[str], NoneType] = None, on_schema_change: Optional[str] = 'ignore', grants: Dict[str, Any] = , packages: List[str] = , docs: dbt.contracts.graph.unparsed.Docs = , quote_columns: Optional[bool] = None)" }, - "ParsedSourceDefinition": { + "SourceDefinition": { "type": "object", "required": [ - "fqn", + "database", "schema", - "unique_id", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", + "unique_id", + "fqn", "source_name", "source_description", "loader", - "identifier", - "resource_type" + "identifier" ], "properties": { - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, "database": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ] + "type": "string" }, "schema": { "type": "string" }, - "unique_id": { + "name": { "type": "string" }, + "resource_type": { + "type": "string", + "enum": [ + "source" + ] + }, "package_name": { "type": "string" }, @@ -5344,9 +3217,15 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, "source_name": { "type": "string" }, @@ -5359,12 +3238,6 @@ "identifier": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "source" - ] - }, "quoting": { "$ref": "#/definitions/Quoting", "default": { @@ -5462,11 +3335,11 @@ }, "created_at": { "type": "number", - "default": 1669786576.455929 + "default": 1670902215.989922 } }, "additionalProperties": false, - "description": "ParsedSourceDefinition(fqn: List[str], database: Optional[str], schema: str, unique_id: str, package_name: str, path: str, original_file_path: str, name: str, source_name: str, source_description: str, loader: str, identifier: str, resource_type: dbt.node_types.NodeType, _event_status: Dict[str, Any] = , quoting: dbt.contracts.graph.unparsed.Quoting = , loaded_at_field: Optional[str] = None, freshness: Optional[dbt.contracts.graph.unparsed.FreshnessThreshold] = None, external: Optional[dbt.contracts.graph.unparsed.ExternalTable] = None, description: str = '', columns: Dict[str, dbt.contracts.graph.parsed.ColumnInfo] = , meta: Dict[str, Any] = , source_meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.SourceConfig = , patch_path: Optional[pathlib.Path] = None, unrendered_config: Dict[str, Any] = , relation_name: Optional[str] = None, created_at: float = )" + "description": "SourceDefinition(database: str, schema: str, name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], source_name: str, source_description: str, loader: str, identifier: str, _event_status: Dict[str, Any] = , quoting: dbt.contracts.graph.unparsed.Quoting = , loaded_at_field: Optional[str] = None, freshness: Optional[dbt.contracts.graph.unparsed.FreshnessThreshold] = None, external: Optional[dbt.contracts.graph.unparsed.ExternalTable] = None, description: str = '', columns: Dict[str, dbt.contracts.graph.nodes.ColumnInfo] = , meta: Dict[str, Any] = , source_meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.SourceConfig = , patch_path: Optional[str] = None, unrendered_config: Dict[str, Any] = , relation_name: Optional[str] = None, created_at: float = )" }, "Quoting": { "type": "object", @@ -5577,7 +3450,7 @@ "generated_at": { "type": "string", "format": "date-time", - "default": "2022-11-30T05:36:16.440838Z" + "default": "2022-12-13T03:30:15.961825Z" }, "invocation_id": { "oneOf": [ @@ -5588,7 +3461,7 @@ "type": "null" } ], - "default": "ff51bdcd-689d-45b3-8dbb-5a8016382eef" + "default": "4f2b967b-7e02-46de-a7ea-268a05e3fab1" }, "env": { "type": "object", @@ -5865,21 +3738,27 @@ "additionalProperties": true, "description": "SourceConfig(_extra: Dict[str, Any] = , enabled: bool = True)" }, - "ParsedMacro": { + "Macro": { "type": "object", "required": [ - "unique_id", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", - "macro_sql", - "resource_type" + "unique_id", + "macro_sql" ], "properties": { - "unique_id": { + "name": { "type": "string" }, + "resource_type": { + "type": "string", + "enum": [ + "macro" + ] + }, "package_name": { "type": "string" }, @@ -5889,25 +3768,12 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, "macro_sql": { "type": "string" }, - "resource_type": { - "type": "string", - "enum": [ - "macro" - ] - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "default": [] - }, "depends_on": { "$ref": "#/definitions/MacroDependsOn", "default": { @@ -5948,7 +3814,7 @@ }, "created_at": { "type": "number", - "default": 1669786576.45632 + "default": 1670902215.990816 }, "supported_languages": { "oneOf": [ @@ -5969,7 +3835,7 @@ } }, "additionalProperties": false, - "description": "ParsedMacro(unique_id: str, package_name: str, path: str, original_file_path: str, name: str, macro_sql: str, resource_type: dbt.node_types.NodeType, tags: List[str] = , depends_on: dbt.contracts.graph.parsed.MacroDependsOn = , description: str = '', meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, arguments: List[dbt.contracts.graph.unparsed.MacroArgument] = , created_at: float = , supported_languages: Optional[List[dbt.node_types.ModelLanguage]] = None)" + "description": "Macro(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, macro_sql: str, depends_on: dbt.contracts.graph.nodes.MacroDependsOn = , description: str = '', meta: Dict[str, Any] = , docs: dbt.contracts.graph.unparsed.Docs = , patch_path: Optional[str] = None, arguments: List[dbt.contracts.graph.unparsed.MacroArgument] = , created_at: float = , supported_languages: Optional[List[dbt.node_types.ModelLanguage]] = None)" }, "MacroDependsOn": { "type": "object", @@ -5984,7 +3850,7 @@ } }, "additionalProperties": false, - "description": "MacroDependsOn(macros: List[str] = )" + "description": "Used only in the Macro class" }, "MacroArgument": { "type": "object", @@ -6013,20 +3879,27 @@ "additionalProperties": false, "description": "MacroArgument(name: str, type: Optional[str] = None, description: str = '')" }, - "ParsedDocumentation": { + "Documentation": { "type": "object", "required": [ - "unique_id", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", + "unique_id", "block_contents" ], "properties": { - "unique_id": { + "name": { "type": "string" }, + "resource_type": { + "type": "string", + "enum": [ + "doc" + ] + }, "package_name": { "type": "string" }, @@ -6036,7 +3909,7 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, "block_contents": { @@ -6044,30 +3917,31 @@ } }, "additionalProperties": false, - "description": "ParsedDocumentation(unique_id: str, package_name: str, path: str, original_file_path: str, name: str, block_contents: str)" + "description": "Documentation(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, block_contents: str)" }, - "ParsedExposure": { + "Exposure": { "type": "object", "required": [ - "fqn", - "unique_id", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", + "unique_id", + "fqn", "type", "owner" ], "properties": { - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "name": { "type": "string" }, + "resource_type": { + "type": "string", + "enum": [ + "exposure" + ] + }, "package_name": { "type": "string" }, @@ -6077,9 +3951,15 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, "type": { "type": "string", "enum": [ @@ -6093,25 +3973,6 @@ "owner": { "$ref": "#/definitions/ExposureOwner" }, - "resource_type": { - "type": "string", - "enum": [ - "model", - "analysis", - "test", - "snapshot", - "operation", - "seed", - "rpc", - "sql operation", - "docs block", - "source", - "macro", - "exposure", - "metric" - ], - "default": "exposure" - }, "description": { "type": "string", "default": "" @@ -6211,11 +4072,11 @@ }, "created_at": { "type": "number", - "default": 1669786576.456964 + "default": 1670902215.993354 } }, "additionalProperties": false, - "description": "ParsedExposure(fqn: List[str], unique_id: str, package_name: str, path: str, original_file_path: str, name: str, type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, resource_type: dbt.node_types.NodeType = , description: str = '', label: Optional[str] = None, maturity: Optional[dbt.contracts.graph.unparsed.MaturityType] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.ExposureConfig = , unrendered_config: Dict[str, Any] = , url: Optional[str] = None, depends_on: dbt.contracts.graph.parsed.DependsOn = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" + "description": "Exposure(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], type: dbt.contracts.graph.unparsed.ExposureType, owner: dbt.contracts.graph.unparsed.ExposureOwner, description: str = '', label: Optional[str] = None, maturity: Optional[dbt.contracts.graph.unparsed.MaturityType] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.ExposureConfig = , unrendered_config: Dict[str, Any] = , url: Optional[str] = None, depends_on: dbt.contracts.graph.nodes.DependsOn = , refs: List[List[str]] = , sources: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" }, "ExposureOwner": { "type": "object", @@ -6252,15 +4113,16 @@ "additionalProperties": true, "description": "ExposureConfig(_extra: Dict[str, Any] = , enabled: bool = True)" }, - "ParsedMetric": { + "Metric": { "type": "object", "required": [ - "fqn", - "unique_id", + "name", + "resource_type", "package_name", "path", "original_file_path", - "name", + "unique_id", + "fqn", "description", "label", "calculation_method", @@ -6271,15 +4133,15 @@ "dimensions" ], "properties": { - "fqn": { - "type": "array", - "items": { - "type": "string" - } - }, - "unique_id": { + "name": { "type": "string" }, + "resource_type": { + "type": "string", + "enum": [ + "metric" + ] + }, "package_name": { "type": "string" }, @@ -6289,9 +4151,15 @@ "original_file_path": { "type": "string" }, - "name": { + "unique_id": { "type": "string" }, + "fqn": { + "type": "array", + "items": { + "type": "string" + } + }, "description": { "type": "string" }, @@ -6355,25 +4223,6 @@ } ] }, - "resource_type": { - "type": "string", - "enum": [ - "model", - "analysis", - "test", - "snapshot", - "operation", - "seed", - "rpc", - "sql operation", - "docs block", - "source", - "macro", - "exposure", - "metric" - ], - "default": "metric" - }, "meta": { "type": "object", "default": {} @@ -6434,11 +4283,11 @@ }, "created_at": { "type": "number", - "default": 1669786576.4576042 + "default": 1670902215.995033 } }, "additionalProperties": false, - "description": "ParsedMetric(fqn: List[str], unique_id: str, package_name: str, path: str, original_file_path: str, name: str, description: str, label: str, calculation_method: str, timestamp: str, expression: str, filters: List[dbt.contracts.graph.unparsed.MetricFilter], time_grains: List[str], dimensions: List[str], window: Optional[dbt.contracts.graph.unparsed.MetricTime] = None, model: Optional[str] = None, model_unique_id: Optional[str] = None, resource_type: dbt.node_types.NodeType = , meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.MetricConfig = , unrendered_config: Dict[str, Any] = , sources: List[List[str]] = , depends_on: dbt.contracts.graph.parsed.DependsOn = , refs: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" + "description": "Metric(name: str, resource_type: dbt.node_types.NodeType, package_name: str, path: str, original_file_path: str, unique_id: str, fqn: List[str], description: str, label: str, calculation_method: str, timestamp: str, expression: str, filters: List[dbt.contracts.graph.unparsed.MetricFilter], time_grains: List[str], dimensions: List[str], window: Optional[dbt.contracts.graph.unparsed.MetricTime] = None, model: Optional[str] = None, model_unique_id: Optional[str] = None, meta: Dict[str, Any] = , tags: List[str] = , config: dbt.contracts.graph.model_config.MetricConfig = , unrendered_config: Dict[str, Any] = , sources: List[List[str]] = , depends_on: dbt.contracts.graph.nodes.DependsOn = , refs: List[List[str]] = , metrics: List[List[str]] = , created_at: float = )" }, "MetricFilter": { "type": "object", diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index 453d544ca1c..ae792cdb718 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -428,14 +428,9 @@ def basic_parsed_seed_dict(): 'path': '/root/seeds/seed.csv', 'original_file_path': 'seeds/seed.csv', 'package_name': 'test', - 'language': 'sql', 'raw_code': '', 'unique_id': 'seed.test.foo', 'fqn': ['test', 'seeds', 'foo'], - 'refs': [], - 'sources': [], - 'metrics': [], - 'depends_on': {'macros': [], 'nodes': []}, 'database': 'test_db', 'description': '', 'schema': 'test_schema', @@ -474,14 +469,9 @@ def basic_parsed_seed_object(): path='/root/seeds/seed.csv', original_file_path='seeds/seed.csv', package_name='test', - language='sql', raw_code='', unique_id='seed.test.foo', fqn=['test', 'seeds', 'foo'], - refs=[], - sources=[], - metrics=[], - depends_on=DependsOn(), database='test_db', description='', schema='test_schema', @@ -507,7 +497,6 @@ def minimal_parsed_seed_dict(): 'path': '/root/seeds/seed.csv', 'original_file_path': 'seeds/seed.csv', 'package_name': 'test', - 'language': 'sql', 'raw_code': '', 'unique_id': 'seed.test.foo', 'fqn': ['test', 'seeds', 'foo'], @@ -527,14 +516,9 @@ def complex_parsed_seed_dict(): 'path': '/root/seeds/seed.csv', 'original_file_path': 'seeds/seed.csv', 'package_name': 'test', - 'language': 'sql', 'raw_code': '', 'unique_id': 'seed.test.foo', 'fqn': ['test', 'seeds', 'foo'], - 'refs': [], - 'sources': [], - 'metrics': [], - 'depends_on': {'macros': [], 'nodes': []}, 'database': 'test_db', 'description': 'a description', 'schema': 'test_schema', @@ -576,14 +560,9 @@ def complex_parsed_seed_object(): path='/root/seeds/seed.csv', original_file_path='seeds/seed.csv', package_name='test', - language='sql', raw_code='', unique_id='seed.test.foo', fqn=['test', 'seeds', 'foo'], - refs=[], - sources=[], - metrics=[], - depends_on=DependsOn(), database='test_db', description='a description', schema='test_schema', @@ -605,7 +584,10 @@ def complex_parsed_seed_object(): def test_seed_basic(basic_parsed_seed_dict, basic_parsed_seed_object, minimal_parsed_seed_dict): + dct = basic_parsed_seed_object.to_dict() + assert_symmetric(basic_parsed_seed_object, basic_parsed_seed_dict) + assert basic_parsed_seed_object.get_materialization() == 'seed' assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, SeedNode) @@ -1804,7 +1786,6 @@ def _ok_dict(self): 'macro_sql': '{% macro foo() %}select 1 as id{% endmacro %}', 'resource_type': 'macro', 'unique_id': 'macro.test.foo', - 'tags': [], 'depends_on': {'macros': []}, 'meta': {}, 'description': 'my macro description', @@ -1822,7 +1803,6 @@ def test_ok(self): macro_sql='{% macro foo() %}select 1 as id{% endmacro %}', resource_type=NodeType.Macro, unique_id='macro.test.foo', - tags=[], depends_on=MacroDependsOn(), meta={}, description='my macro description', @@ -1849,6 +1829,7 @@ def _ok_dict(self): return { 'block_contents': 'some doc contents', 'name': 'foo', + 'resource_type': 'doc', 'original_file_path': '/root/docs/doc.md', 'package_name': 'test', 'path': '/root/docs', @@ -1863,7 +1844,8 @@ def test_ok(self): original_file_path='/root/docs/doc.md', name='foo', unique_id='test.foo', - block_contents='some doc contents' + block_contents='some doc contents', + resource_type=NodeType.Documentation, ) self.assert_symmetric(doc, doc_dict) pickle.loads(pickle.dumps(doc)) @@ -2113,6 +2095,7 @@ def minimal_parsed_exposure_dict(): 'original_file_path': 'models/something.yml', 'description': '', 'created_at': 1.0, + 'resource_type': 'exposure', } @@ -2152,6 +2135,7 @@ def basic_parsed_exposure_dict(): def basic_parsed_exposure_object(): return Exposure( name='my_exposure', + resource_type=NodeType.Exposure, type=ExposureType.Notebook, fqn=['test', 'exposures', 'my_exposure'], unique_id='exposure.test.my_exposure', @@ -2209,6 +2193,7 @@ def complex_parsed_exposure_dict(): def complex_parsed_exposure_object(): return Exposure( name='my_exposure', + resource_type=NodeType.Exposure, type=ExposureType.Analysis, owner=ExposureOwner(email='test@example.com', name='A Name'), maturity=MaturityType.Low, @@ -2327,6 +2312,7 @@ def basic_parsed_metric_dict(): def basic_parsed_metric_object(): return Metric( name='my_metric', + resource_type=NodeType.Metric, calculation_method='count', fqn=['test', 'metrics', 'my_metric'], unique_id='metric.test.my_metric', diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index 5d99182d62a..0497d5da02a 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -118,8 +118,6 @@ def make_seed(pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extr fqn = [pkg] + fqn_extras + [name] return SeedNode( - language='sql', - raw_code='', database='dbt', schema='dbt_schema', alias=alias, @@ -182,7 +180,6 @@ def make_macro(pkg, name, macro_sql, path=None, depends_on_macros=None): path=path, original_file_path=path, resource_type=NodeType.Macro, - tags=[], depends_on=MacroDependsOn(macros=depends_on_macros), ) @@ -338,6 +335,7 @@ def make_exposure(pkg, name, path=None, fqn_extras=None, owner=None): fqn = [pkg, 'exposures'] + fqn_extras + [name] return Exposure( name=name, + resource_type=NodeType.Exposure, type=ExposureType.Notebook, fqn=fqn, unique_id=f'exposure.{pkg}.{name}', @@ -354,6 +352,7 @@ def make_metric(pkg, name, path=None): return Metric( name=name, + resource_type=NodeType.Metric, path='schema.yml', package_name=pkg, original_file_path=path, diff --git a/test/unit/test_manifest.py b/test/unit/test_manifest.py index 8bec74787cd..576a525823b 100644 --- a/test/unit/test_manifest.py +++ b/test/unit/test_manifest.py @@ -46,7 +46,7 @@ 'depends_on', 'database', 'schema', 'name', 'resource_type', 'package_name', 'path', 'original_file_path', 'raw_code', 'language', 'description', 'columns', 'fqn', 'build_path', 'compiled_path', 'patch_path', 'docs', - 'deferred', 'checksum', 'unrendered_config', 'created_at', 'config_call_dict', + 'deferred', 'checksum', 'unrendered_config', 'created_at', 'config_call_dict', 'relation_name', }) REQUIRED_COMPILED_NODE_KEYS = frozenset(REQUIRED_PARSED_NODE_KEYS | { @@ -501,15 +501,10 @@ def test_get_resource_fqns(self): unique_id='seed.root.seed', fqn=['root', 'seed'], package_name='root', - refs=[['events']], - sources=[], - depends_on=DependsOn(), config=self.model_config, tags=[], path='seed.csv', original_file_path='seed.csv', - language='sql', - raw_code='-- csv --', checksum=FileHash.empty(), ) manifest = Manifest(nodes=nodes, sources=self.sources, macros={}, docs={}, diff --git a/test/unit/test_node_types.py b/test/unit/test_node_types.py index fcfb115b9b9..06c27dba7fe 100644 --- a/test/unit/test_node_types.py +++ b/test/unit/test_node_types.py @@ -10,7 +10,7 @@ NodeType.Seed: "seeds", NodeType.RPCCall: "rpcs", NodeType.SqlOperation: "sql operations", - NodeType.Documentation: "docs blocks", + NodeType.Documentation: "docs", NodeType.Source: "sources", NodeType.Macro: "macros", NodeType.Exposure: "exposures", diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 9a91b59eb26..316caffa870 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -173,9 +173,13 @@ def assertEqualNodes(node_one, node_two): node_one_dict = node_one.to_dict() if 'created_at' in node_one_dict: del node_one_dict['created_at'] + if "relation_name" in node_one_dict: + del node_one_dict["relation_name"] node_two_dict = node_two.to_dict() if 'created_at' in node_two_dict: del node_two_dict['created_at'] + if "relation_name" in node_two_dict: + del node_two_dict["relation_name"] # we don't reall care the order of packages, doing this because it is hard to # make config.packages a set instead of a list if 'config' in node_one_dict and 'packages' in node_one_dict['config']: @@ -1333,6 +1337,7 @@ def test_basic(self): raw_code=raw_code, checksum=block.file.checksum, unrendered_config={}, + relation_name=None, ) assertEqualNodes(node, expected) file_id = 'snowplow://' + normalize('analyses/nested/analysis_1.sql') diff --git a/tests/functional/artifacts/data/state/v8/manifest.json b/tests/functional/artifacts/data/state/v8/manifest.json index 9cfefdaaf85..58e3f04da3c 100644 --- a/tests/functional/artifacts/data/state/v8/manifest.json +++ b/tests/functional/artifacts/data/state/v8/manifest.json @@ -1 +1 @@ -{"metadata": {"dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v8.json", "dbt_version": "1.4.0a1", "generated_at": "2022-11-04T14:47:38.242390Z", "invocation_id": "c6157471-2b64-428a-ada9-044ddfcc03ac", "env": {}, "project_id": "098f6bcd4621d373cade4e832627b4f6", "user_id": null, "send_anonymous_usage_stats": false, "adapter_type": "postgres"}, "nodes": {"model.test.my_model": {"resource_type": "model", "depends_on": {"macros": [], "nodes": []}, "config": {"enabled": true, "alias": null, "schema": null, "database": null, "tags": [], "meta": {}, "materialized": "view", "incremental_strategy": null, "persist_docs": {}, "quoting": {}, "column_types": {}, "full_refresh": null, "unique_key": null, "on_schema_change": "ignore", "grants": {}, "packages": [], "docs": {"show": true, "node_color": null}, "post-hook": [], "pre-hook": []}, "database": "dbt", "schema": "test16675732582545487557_test_previous_version_state", "fqn": ["test", "my_model"], "unique_id": "model.test.my_model", "raw_code": "select 1 as id", "language": "sql", "package_name": "test", "path": "my_model.sql", "original_file_path": "models/my_model.sql", "name": "my_model", "alias": "my_model", "checksum": {"name": "sha256", "checksum": "2b9123e04ab8bb798f7c565afdc3ee0e56fcd66b4bfbdb435b4891c878d947c5"}, "tags": [], "refs": [], "sources": [], "metrics": [], "description": "", "columns": {}, "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "compiled_path": null, "build_path": null, "deferred": false, "unrendered_config": {}, "created_at": 1667573258.9454}}, "sources": {}, "macros": {"macro.dbt_postgres.postgres__current_timestamp": {"unique_id": "macro.dbt_postgres.postgres__current_timestamp", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__current_timestamp", "macro_sql": "{% macro postgres__current_timestamp() -%}\n now()\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.265455, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_string_as_time": {"unique_id": "macro.dbt_postgres.postgres__snapshot_string_as_time", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__snapshot_string_as_time", "macro_sql": "{% macro postgres__snapshot_string_as_time(timestamp) -%}\n {%- set result = \"'\" ~ timestamp ~ \"'::timestamp without time zone\" -%}\n {{ return(result) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.266111, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_get_time": {"unique_id": "macro.dbt_postgres.postgres__snapshot_get_time", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__snapshot_get_time", "macro_sql": "{% macro postgres__snapshot_get_time() -%}\n {{ current_timestamp() }}::timestamp without time zone\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.266408, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_backcompat": {"unique_id": "macro.dbt_postgres.postgres__current_timestamp_backcompat", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__current_timestamp_backcompat", "macro_sql": "{% macro postgres__current_timestamp_backcompat() %}\n current_timestamp::{{ type_timestamp() }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.266695, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat": {"unique_id": "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "name": "postgres__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro postgres__current_timestamp_in_utc_backcompat() %}\n (current_timestamp at time zone 'utc')::{{ type_timestamp() }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.266977, "supported_languages": null}, "macro.dbt_postgres.postgres__get_catalog": {"unique_id": "macro.dbt_postgres.postgres__get_catalog", "package_name": "dbt_postgres", "path": "macros/catalog.sql", "original_file_path": "macros/catalog.sql", "name": "postgres__get_catalog", "macro_sql": "{% macro postgres__get_catalog(information_schema, schemas) -%}\n\n {%- call statement('catalog', fetch_result=True) -%}\n {#\n If the user has multiple databases set and the first one is wrong, this will fail.\n But we won't fail in the case where there are multiple quoting-difference-only dbs, which is better.\n #}\n {% set database = information_schema.database %}\n {{ adapter.verify_database(database) }}\n\n select\n '{{ database }}' as table_database,\n sch.nspname as table_schema,\n tbl.relname as table_name,\n case tbl.relkind\n when 'v' then 'VIEW'\n else 'BASE TABLE'\n end as table_type,\n tbl_desc.description as table_comment,\n col.attname as column_name,\n col.attnum as column_index,\n pg_catalog.format_type(col.atttypid, col.atttypmod) as column_type,\n col_desc.description as column_comment,\n pg_get_userbyid(tbl.relowner) as table_owner\n\n from pg_catalog.pg_namespace sch\n join pg_catalog.pg_class tbl on tbl.relnamespace = sch.oid\n join pg_catalog.pg_attribute col on col.attrelid = tbl.oid\n left outer join pg_catalog.pg_description tbl_desc on (tbl_desc.objoid = tbl.oid and tbl_desc.objsubid = 0)\n left outer join pg_catalog.pg_description col_desc on (col_desc.objoid = tbl.oid and col_desc.objsubid = col.attnum)\n\n where (\n {%- for schema in schemas -%}\n upper(sch.nspname) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n )\n and not pg_is_other_temp_schema(sch.oid) -- not a temporary schema belonging to another session\n and tbl.relpersistence in ('p', 'u') -- [p]ermanent table or [u]nlogged table. Exclude [t]emporary tables\n and tbl.relkind in ('r', 'v', 'f', 'p') -- o[r]dinary table, [v]iew, [f]oreign table, [p]artitioned table. Other values are [i]ndex, [S]equence, [c]omposite type, [t]OAST table, [m]aterialized view\n and col.attnum > 0 -- negative numbers are used for system columns such as oid\n and not col.attisdropped -- column as not been dropped\n\n order by\n sch.nspname,\n tbl.relname,\n col.attnum\n\n {%- endcall -%}\n\n {{ return(load_result('catalog').table) }}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2698722, "supported_languages": null}, "macro.dbt_postgres.postgres_get_relations": {"unique_id": "macro.dbt_postgres.postgres_get_relations", "package_name": "dbt_postgres", "path": "macros/relations.sql", "original_file_path": "macros/relations.sql", "name": "postgres_get_relations", "macro_sql": "{% macro postgres_get_relations () -%}\n\n {#\n -- in pg_depend, objid is the dependent, refobjid is the referenced object\n -- > a pg_depend entry indicates that the referenced object cannot be\n -- > dropped without also dropping the dependent object.\n #}\n\n {%- call statement('relations', fetch_result=True) -%}\n with relation as (\n select\n pg_rewrite.ev_class as class,\n pg_rewrite.oid as id\n from pg_rewrite\n ),\n class as (\n select\n oid as id,\n relname as name,\n relnamespace as schema,\n relkind as kind\n from pg_class\n ),\n dependency as (\n select distinct\n pg_depend.objid as id,\n pg_depend.refobjid as ref\n from pg_depend\n ),\n schema as (\n select\n pg_namespace.oid as id,\n pg_namespace.nspname as name\n from pg_namespace\n where nspname != 'information_schema' and nspname not like 'pg\\_%'\n ),\n referenced as (\n select\n relation.id AS id,\n referenced_class.name ,\n referenced_class.schema ,\n referenced_class.kind\n from relation\n join class as referenced_class on relation.class=referenced_class.id\n where referenced_class.kind in ('r', 'v')\n ),\n relationships as (\n select\n referenced.name as referenced_name,\n referenced.schema as referenced_schema_id,\n dependent_class.name as dependent_name,\n dependent_class.schema as dependent_schema_id,\n referenced.kind as kind\n from referenced\n join dependency on referenced.id=dependency.id\n join class as dependent_class on dependency.ref=dependent_class.id\n where\n (referenced.name != dependent_class.name or\n referenced.schema != dependent_class.schema)\n )\n\n select\n referenced_schema.name as referenced_schema,\n relationships.referenced_name as referenced_name,\n dependent_schema.name as dependent_schema,\n relationships.dependent_name as dependent_name\n from relationships\n join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id\n join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id\n group by referenced_schema, referenced_name, dependent_schema, dependent_name\n order by referenced_schema, referenced_name, dependent_schema, dependent_name;\n\n {%- endcall -%}\n\n {{ return(load_result('relations').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.271539, "supported_languages": null}, "macro.dbt_postgres.postgres__create_table_as": {"unique_id": "macro.dbt_postgres.postgres__create_table_as", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__create_table_as", "macro_sql": "{% macro postgres__create_table_as(temporary, relation, sql) -%}\n {%- set unlogged = config.get('unlogged', default=false) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary -%}\n temporary\n {%- elif unlogged -%}\n unlogged\n {%- endif %} table {{ relation }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2825222, "supported_languages": null}, "macro.dbt_postgres.postgres__get_create_index_sql": {"unique_id": "macro.dbt_postgres.postgres__get_create_index_sql", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__get_create_index_sql", "macro_sql": "{% macro postgres__get_create_index_sql(relation, index_dict) -%}\n {%- set index_config = adapter.parse_index(index_dict) -%}\n {%- set comma_separated_columns = \", \".join(index_config.columns) -%}\n {%- set index_name = index_config.render(relation) -%}\n\n create {% if index_config.unique -%}\n unique\n {%- endif %} index if not exists\n \"{{ index_name }}\"\n on {{ relation }} {% if index_config.type -%}\n using {{ index_config.type }}\n {%- endif %}\n ({{ comma_separated_columns }});\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.283871, "supported_languages": null}, "macro.dbt_postgres.postgres__create_schema": {"unique_id": "macro.dbt_postgres.postgres__create_schema", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__create_schema", "macro_sql": "{% macro postgres__create_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier().include(database=False) }}\n {%- endcall -%}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.284721, "supported_languages": null}, "macro.dbt_postgres.postgres__drop_schema": {"unique_id": "macro.dbt_postgres.postgres__drop_schema", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__drop_schema", "macro_sql": "{% macro postgres__drop_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier().include(database=False) }} cascade\n {%- endcall -%}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.285629, "supported_languages": null}, "macro.dbt_postgres.postgres__get_columns_in_relation": {"unique_id": "macro.dbt_postgres.postgres__get_columns_in_relation", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__get_columns_in_relation", "macro_sql": "{% macro postgres__get_columns_in_relation(relation) -%}\n {% call statement('get_columns_in_relation', fetch_result=True) %}\n select\n column_name,\n data_type,\n character_maximum_length,\n numeric_precision,\n numeric_scale\n\n from {{ relation.information_schema('columns') }}\n where table_name = '{{ relation.identifier }}'\n {% if relation.schema %}\n and table_schema = '{{ relation.schema }}'\n {% endif %}\n order by ordinal_position\n\n {% endcall %}\n {% set table = load_result('get_columns_in_relation').table %}\n {{ return(sql_convert_columns_in_relation(table)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.sql_convert_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.286848, "supported_languages": null}, "macro.dbt_postgres.postgres__list_relations_without_caching": {"unique_id": "macro.dbt_postgres.postgres__list_relations_without_caching", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__list_relations_without_caching", "macro_sql": "{% macro postgres__list_relations_without_caching(schema_relation) %}\n {% call statement('list_relations_without_caching', fetch_result=True) -%}\n select\n '{{ schema_relation.database }}' as database,\n tablename as name,\n schemaname as schema,\n 'table' as type\n from pg_tables\n where schemaname ilike '{{ schema_relation.schema }}'\n union all\n select\n '{{ schema_relation.database }}' as database,\n viewname as name,\n schemaname as schema,\n 'view' as type\n from pg_views\n where schemaname ilike '{{ schema_relation.schema }}'\n {% endcall %}\n {{ return(load_result('list_relations_without_caching').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2877922, "supported_languages": null}, "macro.dbt_postgres.postgres__information_schema_name": {"unique_id": "macro.dbt_postgres.postgres__information_schema_name", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__information_schema_name", "macro_sql": "{% macro postgres__information_schema_name(database) -%}\n {% if database_name -%}\n {{ adapter.verify_database(database_name) }}\n {%- endif -%}\n information_schema\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.288241, "supported_languages": null}, "macro.dbt_postgres.postgres__list_schemas": {"unique_id": "macro.dbt_postgres.postgres__list_schemas", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__list_schemas", "macro_sql": "{% macro postgres__list_schemas(database) %}\n {% if database -%}\n {{ adapter.verify_database(database) }}\n {%- endif -%}\n {% call statement('list_schemas', fetch_result=True, auto_begin=False) %}\n select distinct nspname from pg_namespace\n {% endcall %}\n {{ return(load_result('list_schemas').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.289124, "supported_languages": null}, "macro.dbt_postgres.postgres__check_schema_exists": {"unique_id": "macro.dbt_postgres.postgres__check_schema_exists", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__check_schema_exists", "macro_sql": "{% macro postgres__check_schema_exists(information_schema, schema) -%}\n {% if information_schema.database -%}\n {{ adapter.verify_database(information_schema.database) }}\n {%- endif -%}\n {% call statement('check_schema_exists', fetch_result=True, auto_begin=False) %}\n select count(*) from pg_namespace where nspname = '{{ schema }}'\n {% endcall %}\n {{ return(load_result('check_schema_exists').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.290139, "supported_languages": null}, "macro.dbt_postgres.postgres__make_relation_with_suffix": {"unique_id": "macro.dbt_postgres.postgres__make_relation_with_suffix", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__make_relation_with_suffix", "macro_sql": "{% macro postgres__make_relation_with_suffix(base_relation, suffix, dstring) %}\n {% if dstring %}\n {% set dt = modules.datetime.datetime.now() %}\n {% set dtstring = dt.strftime(\"%H%M%S%f\") %}\n {% set suffix = suffix ~ dtstring %}\n {% endif %}\n {% set suffix_length = suffix|length %}\n {% set relation_max_name_length = base_relation.relation_max_name_length() %}\n {% if suffix_length > relation_max_name_length %}\n {% do exceptions.raise_compiler_error('Relation suffix is too long (' ~ suffix_length ~ ' characters). Maximum length is ' ~ relation_max_name_length ~ ' characters.') %}\n {% endif %}\n {% set identifier = base_relation.identifier[:relation_max_name_length - suffix_length] ~ suffix %}\n\n {{ return(base_relation.incorporate(path={\"identifier\": identifier })) }}\n\n {% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2923021, "supported_languages": null}, "macro.dbt_postgres.postgres__make_intermediate_relation": {"unique_id": "macro.dbt_postgres.postgres__make_intermediate_relation", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__make_intermediate_relation", "macro_sql": "{% macro postgres__make_intermediate_relation(base_relation, suffix) %}\n {{ return(postgres__make_relation_with_suffix(base_relation, suffix, dstring=False)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.292798, "supported_languages": null}, "macro.dbt_postgres.postgres__make_temp_relation": {"unique_id": "macro.dbt_postgres.postgres__make_temp_relation", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__make_temp_relation", "macro_sql": "{% macro postgres__make_temp_relation(base_relation, suffix) %}\n {% set temp_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=True) %}\n {{ return(temp_relation.incorporate(path={\"schema\": none,\n \"database\": none})) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.293597, "supported_languages": null}, "macro.dbt_postgres.postgres__make_backup_relation": {"unique_id": "macro.dbt_postgres.postgres__make_backup_relation", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__make_backup_relation", "macro_sql": "{% macro postgres__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {% set backup_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=False) %}\n {{ return(backup_relation.incorporate(type=backup_relation_type)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.294295, "supported_languages": null}, "macro.dbt_postgres.postgres_escape_comment": {"unique_id": "macro.dbt_postgres.postgres_escape_comment", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres_escape_comment", "macro_sql": "{% macro postgres_escape_comment(comment) -%}\n {% if comment is not string %}\n {% do exceptions.raise_compiler_error('cannot escape a non-string: ' ~ comment) %}\n {% endif %}\n {%- set magic = '$dbt_comment_literal_block$' -%}\n {%- if magic in comment -%}\n {%- do exceptions.raise_compiler_error('The string ' ~ magic ~ ' is not allowed in comments.') -%}\n {%- endif -%}\n {{ magic }}{{ comment }}{{ magic }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2953768, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_relation_comment": {"unique_id": "macro.dbt_postgres.postgres__alter_relation_comment", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__alter_relation_comment", "macro_sql": "{% macro postgres__alter_relation_comment(relation, comment) %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on {{ relation.type }} {{ relation }} is {{ escaped_comment }};\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.296059, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_column_comment": {"unique_id": "macro.dbt_postgres.postgres__alter_column_comment", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__alter_column_comment", "macro_sql": "{% macro postgres__alter_column_comment(relation, column_dict) %}\n {% set existing_columns = adapter.get_columns_in_relation(relation) | map(attribute=\"name\") | list %}\n {% for column_name in column_dict if (column_name in existing_columns) %}\n {% set comment = column_dict[column_name]['description'] %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on column {{ relation }}.{{ adapter.quote(column_name) if column_dict[column_name]['quote'] else column_name }} is {{ escaped_comment }};\n {% endfor %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2975638, "supported_languages": null}, "macro.dbt_postgres.postgres__get_show_grant_sql": {"unique_id": "macro.dbt_postgres.postgres__get_show_grant_sql", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__get_show_grant_sql", "macro_sql": "\n\n{%- macro postgres__get_show_grant_sql(relation) -%}\n select grantee, privilege_type\n from {{ relation.information_schema('role_table_grants') }}\n where grantor = current_role\n and grantee != current_role\n and table_schema = '{{ relation.schema }}'\n and table_name = '{{ relation.identifier }}'\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.2980769, "supported_languages": null}, "macro.dbt_postgres.postgres__copy_grants": {"unique_id": "macro.dbt_postgres.postgres__copy_grants", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "name": "postgres__copy_grants", "macro_sql": "{% macro postgres__copy_grants() %}\n {{ return(False) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.298378, "supported_languages": null}, "macro.dbt_postgres.postgres__get_incremental_default_sql": {"unique_id": "macro.dbt_postgres.postgres__get_incremental_default_sql", "package_name": "dbt_postgres", "path": "macros/materializations/incremental_strategies.sql", "original_file_path": "macros/materializations/incremental_strategies.sql", "name": "postgres__get_incremental_default_sql", "macro_sql": "{% macro postgres__get_incremental_default_sql(arg_dict) %}\n\n {% if arg_dict[\"unique_key\"] %}\n {% do return(get_incremental_delete_insert_sql(arg_dict)) %}\n {% else %}\n {% do return(get_incremental_append_sql(arg_dict)) %}\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_incremental_delete_insert_sql", "macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.299883, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_merge_sql": {"unique_id": "macro.dbt_postgres.postgres__snapshot_merge_sql", "package_name": "dbt_postgres", "path": "macros/materializations/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshot_merge.sql", "name": "postgres__snapshot_merge_sql", "macro_sql": "{% macro postgres__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n update {{ target }}\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_scd_id::text = {{ target }}.dbt_scd_id::text\n and DBT_INTERNAL_SOURCE.dbt_change_type::text in ('update'::text, 'delete'::text)\n and {{ target }}.dbt_valid_to is null;\n\n insert into {{ target }} ({{ insert_cols_csv }})\n select {% for column in insert_cols -%}\n DBT_INTERNAL_SOURCE.{{ column }} {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_change_type::text = 'insert'::text;\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3019001, "supported_languages": null}, "macro.dbt_postgres.postgres__dateadd": {"unique_id": "macro.dbt_postgres.postgres__dateadd", "package_name": "dbt_postgres", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "name": "postgres__dateadd", "macro_sql": "{% macro postgres__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n {{ from_date_or_timestamp }} + ((interval '1 {{ datepart }}') * ({{ interval }}))\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.302811, "supported_languages": null}, "macro.dbt_postgres.postgres__listagg": {"unique_id": "macro.dbt_postgres.postgres__listagg", "package_name": "dbt_postgres", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "name": "postgres__listagg", "macro_sql": "{% macro postgres__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n (array_agg(\n {{ measure }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n ))[1:{{ limit_num }}],\n {{ delimiter_text }}\n )\n {%- else %}\n string_agg(\n {{ measure }},\n {{ delimiter_text }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n )\n {%- endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.304598, "supported_languages": null}, "macro.dbt_postgres.postgres__datediff": {"unique_id": "macro.dbt_postgres.postgres__datediff", "package_name": "dbt_postgres", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "name": "postgres__datediff", "macro_sql": "{% macro postgres__datediff(first_date, second_date, datepart) -%}\n\n {% if datepart == 'year' %}\n (date_part('year', ({{second_date}})::date) - date_part('year', ({{first_date}})::date))\n {% elif datepart == 'quarter' %}\n ({{ datediff(first_date, second_date, 'year') }} * 4 + date_part('quarter', ({{second_date}})::date) - date_part('quarter', ({{first_date}})::date))\n {% elif datepart == 'month' %}\n ({{ datediff(first_date, second_date, 'year') }} * 12 + date_part('month', ({{second_date}})::date) - date_part('month', ({{first_date}})::date))\n {% elif datepart == 'day' %}\n (({{second_date}})::date - ({{first_date}})::date)\n {% elif datepart == 'week' %}\n ({{ datediff(first_date, second_date, 'day') }} / 7 + case\n when date_part('dow', ({{first_date}})::timestamp) <= date_part('dow', ({{second_date}})::timestamp) then\n case when {{first_date}} <= {{second_date}} then 0 else -1 end\n else\n case when {{first_date}} <= {{second_date}} then 1 else 0 end\n end)\n {% elif datepart == 'hour' %}\n ({{ datediff(first_date, second_date, 'day') }} * 24 + date_part('hour', ({{second_date}})::timestamp) - date_part('hour', ({{first_date}})::timestamp))\n {% elif datepart == 'minute' %}\n ({{ datediff(first_date, second_date, 'hour') }} * 60 + date_part('minute', ({{second_date}})::timestamp) - date_part('minute', ({{first_date}})::timestamp))\n {% elif datepart == 'second' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60 + floor(date_part('second', ({{second_date}})::timestamp)) - floor(date_part('second', ({{first_date}})::timestamp)))\n {% elif datepart == 'millisecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000 + floor(date_part('millisecond', ({{second_date}})::timestamp)) - floor(date_part('millisecond', ({{first_date}})::timestamp)))\n {% elif datepart == 'microsecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000000 + floor(date_part('microsecond', ({{second_date}})::timestamp)) - floor(date_part('microsecond', ({{first_date}})::timestamp)))\n {% else %}\n {{ exceptions.raise_compiler_error(\"Unsupported datepart for macro datediff in postgres: {!r}\".format(datepart)) }}\n {% endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.31184, "supported_languages": null}, "macro.dbt_postgres.postgres__any_value": {"unique_id": "macro.dbt_postgres.postgres__any_value", "package_name": "dbt_postgres", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "name": "postgres__any_value", "macro_sql": "{% macro postgres__any_value(expression) -%}\n\n min({{ expression }})\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.312644, "supported_languages": null}, "macro.dbt_postgres.postgres__last_day": {"unique_id": "macro.dbt_postgres.postgres__last_day", "package_name": "dbt_postgres", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "name": "postgres__last_day", "macro_sql": "{% macro postgres__last_day(date, datepart) -%}\n\n {%- if datepart == 'quarter' -%}\n -- postgres dateadd does not support quarter interval.\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd('month', '3', dbt.date_trunc(datepart, date))\n )}}\n as date)\n {%- else -%}\n {{dbt.default_last_day(date, datepart)}}\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc", "macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.314328, "supported_languages": null}, "macro.dbt_postgres.postgres__split_part": {"unique_id": "macro.dbt_postgres.postgres__split_part", "package_name": "dbt_postgres", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "name": "postgres__split_part", "macro_sql": "{% macro postgres__split_part(string_text, delimiter_text, part_number) %}\n\n {% if part_number >= 0 %}\n {{ dbt.default__split_part(string_text, delimiter_text, part_number) }}\n {% else %}\n {{ dbt._split_part_negative(string_text, delimiter_text, part_number) }}\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__split_part", "macro.dbt._split_part_negative"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3157928, "supported_languages": null}, "macro.dbt.run_hooks": {"unique_id": "macro.dbt.run_hooks", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "run_hooks", "macro_sql": "{% macro run_hooks(hooks, inside_transaction=True) %}\n {% for hook in hooks | selectattr('transaction', 'equalto', inside_transaction) %}\n {% if not inside_transaction and loop.first %}\n {% call statement(auto_begin=inside_transaction) %}\n commit;\n {% endcall %}\n {% endif %}\n {% set rendered = render(hook.get('sql')) | trim %}\n {% if (rendered | length) > 0 %}\n {% call statement(auto_begin=inside_transaction) %}\n {{ rendered }}\n {% endcall %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.318797, "supported_languages": null}, "macro.dbt.make_hook_config": {"unique_id": "macro.dbt.make_hook_config", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "make_hook_config", "macro_sql": "{% macro make_hook_config(sql, inside_transaction) %}\n {{ tojson({\"sql\": sql, \"transaction\": inside_transaction}) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.319341, "supported_languages": null}, "macro.dbt.before_begin": {"unique_id": "macro.dbt.before_begin", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "before_begin", "macro_sql": "{% macro before_begin(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.319724, "supported_languages": null}, "macro.dbt.in_transaction": {"unique_id": "macro.dbt.in_transaction", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "in_transaction", "macro_sql": "{% macro in_transaction(sql) %}\n {{ make_hook_config(sql, inside_transaction=True) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.320093, "supported_languages": null}, "macro.dbt.after_commit": {"unique_id": "macro.dbt.after_commit", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "name": "after_commit", "macro_sql": "{% macro after_commit(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.320464, "supported_languages": null}, "macro.dbt.set_sql_header": {"unique_id": "macro.dbt.set_sql_header", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "name": "set_sql_header", "macro_sql": "{% macro set_sql_header(config) -%}\n {{ config.set('sql_header', caller()) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.321773, "supported_languages": null}, "macro.dbt.should_full_refresh": {"unique_id": "macro.dbt.should_full_refresh", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "name": "should_full_refresh", "macro_sql": "{% macro should_full_refresh() %}\n {% set config_full_refresh = config.get('full_refresh') %}\n {% if config_full_refresh is none %}\n {% set config_full_refresh = flags.FULL_REFRESH %}\n {% endif %}\n {% do return(config_full_refresh) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.322558, "supported_languages": null}, "macro.dbt.should_store_failures": {"unique_id": "macro.dbt.should_store_failures", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "name": "should_store_failures", "macro_sql": "{% macro should_store_failures() %}\n {% set config_store_failures = config.get('store_failures') %}\n {% if config_store_failures is none %}\n {% set config_store_failures = flags.STORE_FAILURES %}\n {% endif %}\n {% do return(config_store_failures) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.323475, "supported_languages": null}, "macro.dbt.snapshot_merge_sql": {"unique_id": "macro.dbt.snapshot_merge_sql", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "name": "snapshot_merge_sql", "macro_sql": "{% macro snapshot_merge_sql(target, source, insert_cols) -%}\n {{ adapter.dispatch('snapshot_merge_sql', 'dbt')(target, source, insert_cols) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.324824, "supported_languages": null}, "macro.dbt.default__snapshot_merge_sql": {"unique_id": "macro.dbt.default__snapshot_merge_sql", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "name": "default__snapshot_merge_sql", "macro_sql": "{% macro default__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on DBT_INTERNAL_SOURCE.dbt_scd_id = DBT_INTERNAL_DEST.dbt_scd_id\n\n when matched\n and DBT_INTERNAL_DEST.dbt_valid_to is null\n and DBT_INTERNAL_SOURCE.dbt_change_type in ('update', 'delete')\n then update\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n\n when not matched\n and DBT_INTERNAL_SOURCE.dbt_change_type = 'insert'\n then insert ({{ insert_cols_csv }})\n values ({{ insert_cols_csv }})\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3254972, "supported_languages": null}, "macro.dbt.strategy_dispatch": {"unique_id": "macro.dbt.strategy_dispatch", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "strategy_dispatch", "macro_sql": "{% macro strategy_dispatch(name) -%}\n{% set original_name = name %}\n {% if '.' in name %}\n {% set package_name, name = name.split(\".\", 1) %}\n {% else %}\n {% set package_name = none %}\n {% endif %}\n\n {% if package_name is none %}\n {% set package_context = context %}\n {% elif package_name in context %}\n {% set package_context = context[package_name] %}\n {% else %}\n {% set error_msg %}\n Could not find package '{{package_name}}', called with '{{original_name}}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n\n {%- set search_name = 'snapshot_' ~ name ~ '_strategy' -%}\n\n {% if search_name not in package_context %}\n {% set error_msg %}\n The specified strategy macro '{{name}}' was not found in package '{{ package_name }}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n {{ return(package_context[search_name]) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3332338, "supported_languages": null}, "macro.dbt.snapshot_hash_arguments": {"unique_id": "macro.dbt.snapshot_hash_arguments", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_hash_arguments", "macro_sql": "{% macro snapshot_hash_arguments(args) -%}\n {{ adapter.dispatch('snapshot_hash_arguments', 'dbt')(args) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.333798, "supported_languages": null}, "macro.dbt.default__snapshot_hash_arguments": {"unique_id": "macro.dbt.default__snapshot_hash_arguments", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "default__snapshot_hash_arguments", "macro_sql": "{% macro default__snapshot_hash_arguments(args) -%}\n md5({%- for arg in args -%}\n coalesce(cast({{ arg }} as varchar ), '')\n {% if not loop.last %} || '|' || {% endif %}\n {%- endfor -%})\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3343842, "supported_languages": null}, "macro.dbt.snapshot_timestamp_strategy": {"unique_id": "macro.dbt.snapshot_timestamp_strategy", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_timestamp_strategy", "macro_sql": "{% macro snapshot_timestamp_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set primary_key = config['unique_key'] %}\n {% set updated_at = config['updated_at'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n\n {#/*\n The snapshot relation might not have an {{ updated_at }} value if the\n snapshot strategy is changed from `check` to `timestamp`. We\n should use a dbt-created column for the comparison in the snapshot\n table instead of assuming that the user-supplied {{ updated_at }}\n will be present in the historical data.\n\n See https://github.com/dbt-labs/dbt-core/issues/2350\n */ #}\n {% set row_changed_expr -%}\n ({{ snapshotted_rel }}.dbt_valid_from < {{ current_rel }}.{{ updated_at }})\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.336249, "supported_languages": null}, "macro.dbt.snapshot_string_as_time": {"unique_id": "macro.dbt.snapshot_string_as_time", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_string_as_time", "macro_sql": "{% macro snapshot_string_as_time(timestamp) -%}\n {{ adapter.dispatch('snapshot_string_as_time', 'dbt')(timestamp) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_string_as_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.336672, "supported_languages": null}, "macro.dbt.default__snapshot_string_as_time": {"unique_id": "macro.dbt.default__snapshot_string_as_time", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "default__snapshot_string_as_time", "macro_sql": "{% macro default__snapshot_string_as_time(timestamp) %}\n {% do exceptions.raise_not_implemented(\n 'snapshot_string_as_time macro not implemented for adapter '+adapter.type()\n ) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.337124, "supported_languages": null}, "macro.dbt.snapshot_check_all_get_existing_columns": {"unique_id": "macro.dbt.snapshot_check_all_get_existing_columns", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_check_all_get_existing_columns", "macro_sql": "{% macro snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) -%}\n {%- if not target_exists -%}\n {#-- no table yet -> return whatever the query does --#}\n {{ return((false, query_columns)) }}\n {%- endif -%}\n\n {#-- handle any schema changes --#}\n {%- set target_relation = adapter.get_relation(database=node.database, schema=node.schema, identifier=node.alias) -%}\n\n {% if check_cols_config == 'all' %}\n {%- set query_columns = get_columns_in_query(node['compiled_code']) -%}\n\n {% elif check_cols_config is iterable and (check_cols_config | length) > 0 %}\n {#-- query for proper casing/quoting, to support comparison below --#}\n {%- set select_check_cols_from_target -%}\n select {{ check_cols_config | join(', ') }} from ({{ node['compiled_code'] }}) subq\n {%- endset -%}\n {% set query_columns = get_columns_in_query(select_check_cols_from_target) %}\n\n {% else %}\n {% do exceptions.raise_compiler_error(\"Invalid value for 'check_cols': \" ~ check_cols_config) %}\n {% endif %}\n\n {%- set existing_cols = adapter.get_columns_in_relation(target_relation) | map(attribute = 'name') | list -%}\n {%- set ns = namespace() -%} {#-- handle for-loop scoping with a namespace --#}\n {%- set ns.column_added = false -%}\n\n {%- set intersection = [] -%}\n {%- for col in query_columns -%}\n {%- if col in existing_cols -%}\n {%- do intersection.append(adapter.quote(col)) -%}\n {%- else -%}\n {% set ns.column_added = true %}\n {%- endif -%}\n {%- endfor -%}\n {{ return((ns.column_added, intersection)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.340634, "supported_languages": null}, "macro.dbt.snapshot_check_strategy": {"unique_id": "macro.dbt.snapshot_check_strategy", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "name": "snapshot_check_strategy", "macro_sql": "{% macro snapshot_check_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set check_cols_config = config['check_cols'] %}\n {% set primary_key = config['unique_key'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n {% set updated_at = config.get('updated_at', snapshot_get_time()) %}\n\n {% set column_added = false %}\n\n {% set column_added, check_cols = snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) %}\n\n {%- set row_changed_expr -%}\n (\n {%- if column_added -%}\n {{ get_true_sql() }}\n {%- else -%}\n {%- for col in check_cols -%}\n {{ snapshotted_rel }}.{{ col }} != {{ current_rel }}.{{ col }}\n or\n (\n (({{ snapshotted_rel }}.{{ col }} is null) and not ({{ current_rel }}.{{ col }} is null))\n or\n ((not {{ snapshotted_rel }}.{{ col }} is null) and ({{ current_rel }}.{{ col }} is null))\n )\n {%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n {%- endif -%}\n )\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.snapshot_get_time", "macro.dbt.snapshot_check_all_get_existing_columns", "macro.dbt.get_true_sql", "macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.34407, "supported_languages": null}, "macro.dbt.create_columns": {"unique_id": "macro.dbt.create_columns", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "create_columns", "macro_sql": "{% macro create_columns(relation, columns) %}\n {{ adapter.dispatch('create_columns', 'dbt')(relation, columns) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__create_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.351994, "supported_languages": null}, "macro.dbt.default__create_columns": {"unique_id": "macro.dbt.default__create_columns", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__create_columns", "macro_sql": "{% macro default__create_columns(relation, columns) %}\n {% for column in columns %}\n {% call statement() %}\n alter table {{ relation }} add column \"{{ column.name }}\" {{ column.data_type }};\n {% endcall %}\n {% endfor %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.35276, "supported_languages": null}, "macro.dbt.post_snapshot": {"unique_id": "macro.dbt.post_snapshot", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "post_snapshot", "macro_sql": "{% macro post_snapshot(staging_relation) %}\n {{ adapter.dispatch('post_snapshot', 'dbt')(staging_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.353195, "supported_languages": null}, "macro.dbt.default__post_snapshot": {"unique_id": "macro.dbt.default__post_snapshot", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__post_snapshot", "macro_sql": "{% macro default__post_snapshot(staging_relation) %}\n {# no-op #}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.353436, "supported_languages": null}, "macro.dbt.get_true_sql": {"unique_id": "macro.dbt.get_true_sql", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "get_true_sql", "macro_sql": "{% macro get_true_sql() %}\n {{ adapter.dispatch('get_true_sql', 'dbt')() }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_true_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3538182, "supported_languages": null}, "macro.dbt.default__get_true_sql": {"unique_id": "macro.dbt.default__get_true_sql", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__get_true_sql", "macro_sql": "{% macro default__get_true_sql() %}\n {{ return('TRUE') }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3541272, "supported_languages": null}, "macro.dbt.snapshot_staging_table": {"unique_id": "macro.dbt.snapshot_staging_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "snapshot_staging_table", "macro_sql": "{% macro snapshot_staging_table(strategy, source_sql, target_relation) -%}\n {{ adapter.dispatch('snapshot_staging_table', 'dbt')(strategy, source_sql, target_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__snapshot_staging_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3546538, "supported_languages": null}, "macro.dbt.default__snapshot_staging_table": {"unique_id": "macro.dbt.default__snapshot_staging_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__snapshot_staging_table", "macro_sql": "{% macro default__snapshot_staging_table(strategy, source_sql, target_relation) -%}\n\n with snapshot_query as (\n\n {{ source_sql }}\n\n ),\n\n snapshotted_data as (\n\n select *,\n {{ strategy.unique_key }} as dbt_unique_key\n\n from {{ target_relation }}\n where dbt_valid_to is null\n\n ),\n\n insertions_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to,\n {{ strategy.scd_id }} as dbt_scd_id\n\n from snapshot_query\n ),\n\n updates_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n {{ strategy.updated_at }} as dbt_valid_to\n\n from snapshot_query\n ),\n\n {%- if strategy.invalidate_hard_deletes %}\n\n deletes_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key\n from snapshot_query\n ),\n {% endif %}\n\n insertions as (\n\n select\n 'insert' as dbt_change_type,\n source_data.*\n\n from insertions_source_data as source_data\n left outer join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where snapshotted_data.dbt_unique_key is null\n or (\n snapshotted_data.dbt_unique_key is not null\n and (\n {{ strategy.row_changed }}\n )\n )\n\n ),\n\n updates as (\n\n select\n 'update' as dbt_change_type,\n source_data.*,\n snapshotted_data.dbt_scd_id\n\n from updates_source_data as source_data\n join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where (\n {{ strategy.row_changed }}\n )\n )\n\n {%- if strategy.invalidate_hard_deletes -%}\n ,\n\n deletes as (\n\n select\n 'delete' as dbt_change_type,\n source_data.*,\n {{ snapshot_get_time() }} as dbt_valid_from,\n {{ snapshot_get_time() }} as dbt_updated_at,\n {{ snapshot_get_time() }} as dbt_valid_to,\n snapshotted_data.dbt_scd_id\n\n from snapshotted_data\n left join deletes_source_data as source_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where source_data.dbt_unique_key is null\n )\n {%- endif %}\n\n select * from insertions\n union all\n select * from updates\n {%- if strategy.invalidate_hard_deletes %}\n union all\n select * from deletes\n {%- endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.356801, "supported_languages": null}, "macro.dbt.build_snapshot_table": {"unique_id": "macro.dbt.build_snapshot_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "build_snapshot_table", "macro_sql": "{% macro build_snapshot_table(strategy, sql) -%}\n {{ adapter.dispatch('build_snapshot_table', 'dbt')(strategy, sql) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__build_snapshot_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.357286, "supported_languages": null}, "macro.dbt.default__build_snapshot_table": {"unique_id": "macro.dbt.default__build_snapshot_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "default__build_snapshot_table", "macro_sql": "{% macro default__build_snapshot_table(strategy, sql) %}\n\n select *,\n {{ strategy.scd_id }} as dbt_scd_id,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to\n from (\n {{ sql }}\n ) sbq\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.357929, "supported_languages": null}, "macro.dbt.build_snapshot_staging_table": {"unique_id": "macro.dbt.build_snapshot_staging_table", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "name": "build_snapshot_staging_table", "macro_sql": "{% macro build_snapshot_staging_table(strategy, sql, target_relation) %}\n {% set temp_relation = make_temp_relation(target_relation) %}\n\n {% set select = snapshot_staging_table(strategy, sql, target_relation) %}\n\n {% call statement('build_snapshot_staging_relation') %}\n {{ create_table_as(True, temp_relation, select) }}\n {% endcall %}\n\n {% do return(temp_relation) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.make_temp_relation", "macro.dbt.snapshot_staging_table", "macro.dbt.statement", "macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.359015, "supported_languages": null}, "macro.dbt.materialization_snapshot_default": {"unique_id": "macro.dbt.materialization_snapshot_default", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot.sql", "original_file_path": "macros/materializations/snapshots/snapshot.sql", "name": "materialization_snapshot_default", "macro_sql": "{% materialization snapshot, default %}\n {%- set config = model['config'] -%}\n\n {%- set target_table = model.get('alias', model.get('name')) -%}\n\n {%- set strategy_name = config.get('strategy') -%}\n {%- set unique_key = config.get('unique_key') %}\n -- grab current tables grants config for comparision later on\n {%- set grant_config = config.get('grants') -%}\n\n {% set target_relation_exists, target_relation = get_or_create_relation(\n database=model.database,\n schema=model.schema,\n identifier=target_table,\n type='table') -%}\n\n {%- if not target_relation.is_table -%}\n {% do exceptions.relation_wrong_type(target_relation, 'table') %}\n {%- endif -%}\n\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set strategy_macro = strategy_dispatch(strategy_name) %}\n {% set strategy = strategy_macro(model, \"snapshotted_data\", \"source_data\", config, target_relation_exists) %}\n\n {% if not target_relation_exists %}\n\n {% set build_sql = build_snapshot_table(strategy, model['compiled_code']) %}\n {% set final_sql = create_table_as(False, target_relation, build_sql) %}\n\n {% else %}\n\n {{ adapter.valid_snapshot_target(target_relation) }}\n\n {% set staging_table = build_snapshot_staging_table(strategy, sql, target_relation) %}\n\n -- this may no-op if the database does not require column expansion\n {% do adapter.expand_target_column_types(from_relation=staging_table,\n to_relation=target_relation) %}\n\n {% set missing_columns = adapter.get_missing_columns(staging_table, target_relation)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% do create_columns(target_relation, missing_columns) %}\n\n {% set source_columns = adapter.get_columns_in_relation(staging_table)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% set quoted_source_columns = [] %}\n {% for column in source_columns %}\n {% do quoted_source_columns.append(adapter.quote(column.name)) %}\n {% endfor %}\n\n {% set final_sql = snapshot_merge_sql(\n target = target_relation,\n source = staging_table,\n insert_cols = quoted_source_columns\n )\n %}\n\n {% endif %}\n\n {% call statement('main') %}\n {{ final_sql }}\n {% endcall %}\n\n {% set should_revoke = should_revoke(target_relation_exists, full_refresh_mode=False) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if not target_relation_exists %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {% if staging_table is defined %}\n {% do post_snapshot(staging_table) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_or_create_relation", "macro.dbt.run_hooks", "macro.dbt.strategy_dispatch", "macro.dbt.build_snapshot_table", "macro.dbt.create_table_as", "macro.dbt.build_snapshot_staging_table", "macro.dbt.create_columns", "macro.dbt.snapshot_merge_sql", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes", "macro.dbt.post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.37277, "supported_languages": ["sql"]}, "macro.dbt.materialization_test_default": {"unique_id": "macro.dbt.materialization_test_default", "package_name": "dbt", "path": "macros/materializations/tests/test.sql", "original_file_path": "macros/materializations/tests/test.sql", "name": "materialization_test_default", "macro_sql": "{%- materialization test, default -%}\n\n {% set relations = [] %}\n\n {% if should_store_failures() %}\n\n {% set identifier = model['alias'] %}\n {% set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n {% set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database, type='table') -%} %}\n\n {% if old_relation %}\n {% do adapter.drop_relation(old_relation) %}\n {% endif %}\n\n {% call statement(auto_begin=True) %}\n {{ create_table_as(False, target_relation, sql) }}\n {% endcall %}\n\n {% do relations.append(target_relation) %}\n\n {% set main_sql %}\n select *\n from {{ target_relation }}\n {% endset %}\n\n {{ adapter.commit() }}\n\n {% else %}\n\n {% set main_sql = sql %}\n\n {% endif %}\n\n {% set limit = config.get('limit') %}\n {% set fail_calc = config.get('fail_calc') %}\n {% set warn_if = config.get('warn_if') %}\n {% set error_if = config.get('error_if') %}\n\n {% call statement('main', fetch_result=True) -%}\n\n {{ get_test_sql(main_sql, fail_calc, warn_if, error_if, limit)}}\n\n {%- endcall %}\n\n {{ return({'relations': relations}) }}\n\n{%- endmaterialization -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.should_store_failures", "macro.dbt.statement", "macro.dbt.create_table_as", "macro.dbt.get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3780842, "supported_languages": ["sql"]}, "macro.dbt.get_test_sql": {"unique_id": "macro.dbt.get_test_sql", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "name": "get_test_sql", "macro_sql": "{% macro get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n {{ adapter.dispatch('get_test_sql', 'dbt')(main_sql, fail_calc, warn_if, error_if, limit) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.379586, "supported_languages": null}, "macro.dbt.default__get_test_sql": {"unique_id": "macro.dbt.default__get_test_sql", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "name": "default__get_test_sql", "macro_sql": "{% macro default__get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n select\n {{ fail_calc }} as failures,\n {{ fail_calc }} {{ warn_if }} as should_warn,\n {{ fail_calc }} {{ error_if }} as should_error\n from (\n {{ main_sql }}\n {{ \"limit \" ~ limit if limit != none }}\n ) dbt_internal_test\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3803918, "supported_languages": null}, "macro.dbt.get_where_subquery": {"unique_id": "macro.dbt.get_where_subquery", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "name": "get_where_subquery", "macro_sql": "{% macro get_where_subquery(relation) -%}\n {% do return(adapter.dispatch('get_where_subquery', 'dbt')(relation)) %}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_where_subquery"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.381695, "supported_languages": null}, "macro.dbt.default__get_where_subquery": {"unique_id": "macro.dbt.default__get_where_subquery", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "name": "default__get_where_subquery", "macro_sql": "{% macro default__get_where_subquery(relation) -%}\n {% set where = config.get('where', '') %}\n {% if where %}\n {%- set filtered -%}\n (select * from {{ relation }} where {{ where }}) dbt_subquery\n {%- endset -%}\n {% do return(filtered) %}\n {%- else -%}\n {% do return(relation) %}\n {%- endif -%}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.3826728, "supported_languages": null}, "macro.dbt.get_quoted_csv": {"unique_id": "macro.dbt.get_quoted_csv", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "get_quoted_csv", "macro_sql": "{% macro get_quoted_csv(column_names) %}\n\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote(col)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.386467, "supported_languages": null}, "macro.dbt.diff_columns": {"unique_id": "macro.dbt.diff_columns", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "diff_columns", "macro_sql": "{% macro diff_columns(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% set source_names = source_columns | map(attribute = 'column') | list %}\n {% set target_names = target_columns | map(attribute = 'column') | list %}\n\n {# --check whether the name attribute exists in the target - this does not perform a data type check #}\n {% for sc in source_columns %}\n {% if sc.name not in target_names %}\n {{ result.append(sc) }}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.387875, "supported_languages": null}, "macro.dbt.diff_column_data_types": {"unique_id": "macro.dbt.diff_column_data_types", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "diff_column_data_types", "macro_sql": "{% macro diff_column_data_types(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% for sc in source_columns %}\n {% set tc = target_columns | selectattr(\"name\", \"equalto\", sc.name) | list | first %}\n {% if tc %}\n {% if sc.data_type != tc.data_type and not sc.can_expand_to(other_column=tc) %}\n {{ result.append( { 'column_name': tc.name, 'new_type': sc.data_type } ) }}\n {% endif %}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.389664, "supported_languages": null}, "macro.dbt.get_merge_update_columns": {"unique_id": "macro.dbt.get_merge_update_columns", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "get_merge_update_columns", "macro_sql": "{% macro get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {{ return(adapter.dispatch('get_merge_update_columns', 'dbt')(merge_update_columns, merge_exclude_columns, dest_columns)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.390273, "supported_languages": null}, "macro.dbt.default__get_merge_update_columns": {"unique_id": "macro.dbt.default__get_merge_update_columns", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "name": "default__get_merge_update_columns", "macro_sql": "{% macro default__get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {%- set default_cols = dest_columns | map(attribute=\"quoted\") | list -%}\n\n {%- if merge_update_columns and merge_exclude_columns -%}\n {{ exceptions.raise_compiler_error(\n 'Model cannot specify merge_update_columns and merge_exclude_columns. Please update model to use only one config'\n )}}\n {%- elif merge_update_columns -%}\n {%- set update_columns = merge_update_columns -%}\n {%- elif merge_exclude_columns -%}\n {%- set update_columns = [] -%}\n {%- for column in dest_columns -%}\n {% if column.column | lower not in merge_exclude_columns | map(\"lower\") | list %}\n {%- do update_columns.append(column.quoted) -%}\n {% endif %}\n {%- endfor -%}\n {%- else -%}\n {%- set update_columns = default_cols -%}\n {%- endif -%}\n\n {{ return(update_columns) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.392107, "supported_languages": null}, "macro.dbt.get_merge_sql": {"unique_id": "macro.dbt.get_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "get_merge_sql", "macro_sql": "{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%}\n {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.400875, "supported_languages": null}, "macro.dbt.default__get_merge_sql": {"unique_id": "macro.dbt.default__get_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "default__get_merge_sql", "macro_sql": "{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%}\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set merge_update_columns = config.get('merge_update_columns') -%}\n {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%}\n {%- set update_columns = get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not mapping and unique_key is not string %}\n {% for key in unique_key %}\n {% set this_key_match %}\n DBT_INTERNAL_SOURCE.{{ key }} = DBT_INTERNAL_DEST.{{ key }}\n {% endset %}\n {% do predicates.append(this_key_match) %}\n {% endfor %}\n {% else %}\n {% set unique_key_match %}\n DBT_INTERNAL_SOURCE.{{ unique_key }} = DBT_INTERNAL_DEST.{{ unique_key }}\n {% endset %}\n {% do predicates.append(unique_key_match) %}\n {% endif %}\n {% else %}\n {% do predicates.append('FALSE') %}\n {% endif %}\n\n {{ sql_header if sql_header is not none }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on {{ predicates | join(' and ') }}\n\n {% if unique_key %}\n when matched then update set\n {% for column_name in update_columns -%}\n {{ column_name }} = DBT_INTERNAL_SOURCE.{{ column_name }}\n {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n {% endif %}\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_quoted_csv", "macro.dbt.get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4049711, "supported_languages": null}, "macro.dbt.get_delete_insert_merge_sql": {"unique_id": "macro.dbt.get_delete_insert_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "get_delete_insert_merge_sql", "macro_sql": "{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4055731, "supported_languages": null}, "macro.dbt.default__get_delete_insert_merge_sql": {"unique_id": "macro.dbt.default__get_delete_insert_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "default__get_delete_insert_merge_sql", "macro_sql": "{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not string %}\n delete from {{target }}\n using {{ source }}\n where (\n {% for key in unique_key %}\n {{ source }}.{{ key }} = {{ target }}.{{ key }}\n {{ \"and \" if not loop.last }}\n {% endfor %}\n );\n {% else %}\n delete from {{ target }}\n where (\n {{ unique_key }}) in (\n select ({{ unique_key }})\n from {{ source }}\n );\n\n {% endif %}\n {% endif %}\n\n insert into {{ target }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ source }}\n )\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.407451, "supported_languages": null}, "macro.dbt.get_insert_overwrite_merge_sql": {"unique_id": "macro.dbt.get_insert_overwrite_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "get_insert_overwrite_merge_sql", "macro_sql": "{% macro get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header=false) -%}\n {{ adapter.dispatch('get_insert_overwrite_merge_sql', 'dbt')(target, source, dest_columns, predicates, include_sql_header) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.408113, "supported_languages": null}, "macro.dbt.default__get_insert_overwrite_merge_sql": {"unique_id": "macro.dbt.default__get_insert_overwrite_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "name": "default__get_insert_overwrite_merge_sql", "macro_sql": "{% macro default__get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header) -%}\n {#-- The only time include_sql_header is True: --#}\n {#-- BigQuery + insert_overwrite strategy + \"static\" partitions config --#}\n {#-- We should consider including the sql header at the materialization level instead --#}\n\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none and include_sql_header }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on FALSE\n\n when not matched by source\n {% if predicates %} and {{ predicates | join(' and ') }} {% endif %}\n then delete\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4097211, "supported_languages": null}, "macro.dbt.is_incremental": {"unique_id": "macro.dbt.is_incremental", "package_name": "dbt", "path": "macros/materializations/models/incremental/is_incremental.sql", "original_file_path": "macros/materializations/models/incremental/is_incremental.sql", "name": "is_incremental", "macro_sql": "{% macro is_incremental() %}\n {#-- do not run introspective queries in parsing #}\n {% if not execute %}\n {{ return(False) }}\n {% else %}\n {% set relation = adapter.get_relation(this.database, this.schema, this.table) %}\n {{ return(relation is not none\n and relation.type == 'table'\n and model.config.materialized == 'incremental'\n and not should_full_refresh()) }}\n {% endif %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.should_full_refresh"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.411711, "supported_languages": null}, "macro.dbt.get_incremental_append_sql": {"unique_id": "macro.dbt.get_incremental_append_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_append_sql", "macro_sql": "{% macro get_incremental_append_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_append_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.413886, "supported_languages": null}, "macro.dbt.default__get_incremental_append_sql": {"unique_id": "macro.dbt.default__get_incremental_append_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_append_sql", "macro_sql": "{% macro default__get_incremental_append_sql(arg_dict) %}\n\n {% do return(get_insert_into_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_insert_into_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.414494, "supported_languages": null}, "macro.dbt.get_incremental_delete_insert_sql": {"unique_id": "macro.dbt.get_incremental_delete_insert_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_delete_insert_sql", "macro_sql": "{% macro get_incremental_delete_insert_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_delete_insert_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_incremental_delete_insert_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.414966, "supported_languages": null}, "macro.dbt.default__get_incremental_delete_insert_sql": {"unique_id": "macro.dbt.default__get_incremental_delete_insert_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_delete_insert_sql", "macro_sql": "{% macro default__get_incremental_delete_insert_sql(arg_dict) %}\n\n {% do return(get_delete_insert_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4156349, "supported_languages": null}, "macro.dbt.get_incremental_merge_sql": {"unique_id": "macro.dbt.get_incremental_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_merge_sql", "macro_sql": "{% macro get_incremental_merge_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_merge_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_incremental_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.416111, "supported_languages": null}, "macro.dbt.default__get_incremental_merge_sql": {"unique_id": "macro.dbt.default__get_incremental_merge_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_merge_sql", "macro_sql": "{% macro default__get_incremental_merge_sql(arg_dict) %}\n\n {% do return(get_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.416785, "supported_languages": null}, "macro.dbt.get_incremental_insert_overwrite_sql": {"unique_id": "macro.dbt.get_incremental_insert_overwrite_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_insert_overwrite_sql", "macro_sql": "{% macro get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_insert_overwrite_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_incremental_insert_overwrite_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4172869, "supported_languages": null}, "macro.dbt.default__get_incremental_insert_overwrite_sql": {"unique_id": "macro.dbt.default__get_incremental_insert_overwrite_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_insert_overwrite_sql", "macro_sql": "{% macro default__get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {% do return(get_insert_overwrite_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"], arg_dict[\"predicates\"])) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.418015, "supported_languages": null}, "macro.dbt.get_incremental_default_sql": {"unique_id": "macro.dbt.get_incremental_default_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_incremental_default_sql", "macro_sql": "{% macro get_incremental_default_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_default_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_incremental_default_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.418626, "supported_languages": null}, "macro.dbt.default__get_incremental_default_sql": {"unique_id": "macro.dbt.default__get_incremental_default_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "default__get_incremental_default_sql", "macro_sql": "{% macro default__get_incremental_default_sql(arg_dict) %}\n\n {% do return(get_incremental_append_sql(arg_dict)) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.419035, "supported_languages": null}, "macro.dbt.get_insert_into_sql": {"unique_id": "macro.dbt.get_insert_into_sql", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "name": "get_insert_into_sql", "macro_sql": "{% macro get_insert_into_sql(target_relation, temp_relation, dest_columns) %}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n insert into {{ target_relation }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ temp_relation }}\n )\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.419761, "supported_languages": null}, "macro.dbt.materialization_incremental_default": {"unique_id": "macro.dbt.materialization_incremental_default", "package_name": "dbt", "path": "macros/materializations/models/incremental/incremental.sql", "original_file_path": "macros/materializations/models/incremental/incremental.sql", "name": "materialization_incremental_default", "macro_sql": "{% materialization incremental, default -%}\n\n -- relations\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') -%}\n {%- set temp_relation = make_temp_relation(target_relation)-%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation)-%}\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n\n -- configs\n {%- set unique_key = config.get('unique_key') -%}\n {%- set full_refresh_mode = (should_full_refresh() or existing_relation.is_view) -%}\n {%- set on_schema_change = incremental_validate_on_schema_change(config.get('on_schema_change'), default='ignore') -%}\n\n -- the temp_ and backup_ relations should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation. This has to happen before\n -- BEGIN, in a separate transaction\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation)-%}\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set to_drop = [] %}\n\n {% if existing_relation is none %}\n {% set build_sql = get_create_table_as_sql(False, target_relation, sql) %}\n {% elif full_refresh_mode %}\n {% set build_sql = get_create_table_as_sql(False, intermediate_relation, sql) %}\n {% set need_swap = true %}\n {% else %}\n {% do run_query(get_create_table_as_sql(True, temp_relation, sql)) %}\n {% do adapter.expand_target_column_types(\n from_relation=temp_relation,\n to_relation=target_relation) %}\n {#-- Process schema changes. Returns dict of changes if successful. Use source columns for upserting/merging --#}\n {% set dest_columns = process_schema_changes(on_schema_change, temp_relation, existing_relation) %}\n {% if not dest_columns %}\n {% set dest_columns = adapter.get_columns_in_relation(existing_relation) %}\n {% endif %}\n\n {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#}\n {% set incremental_strategy = config.get('incremental_strategy') or 'default' %}\n {% set incremental_predicates = config.get('incremental_predicates', none) %}\n {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %}\n {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'predicates': incremental_predicates }) %}\n {% set build_sql = strategy_sql_macro_func(strategy_arg_dict) %}\n\n {% endif %}\n\n {% call statement(\"main\") %}\n {{ build_sql }}\n {% endcall %}\n\n {% if need_swap %}\n {% do adapter.rename_relation(target_relation, backup_relation) %}\n {% do adapter.rename_relation(intermediate_relation, target_relation) %}\n {% do to_drop.append(backup_relation) %}\n {% endif %}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if existing_relation is none or existing_relation.is_view or should_full_refresh() %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {% do adapter.commit() %}\n\n {% for rel in to_drop %}\n {% do adapter.drop_relation(rel) %}\n {% endfor %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_temp_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.should_full_refresh", "macro.dbt.incremental_validate_on_schema_change", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.get_create_table_as_sql", "macro.dbt.run_query", "macro.dbt.process_schema_changes", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.431493, "supported_languages": ["sql"]}, "macro.dbt.incremental_validate_on_schema_change": {"unique_id": "macro.dbt.incremental_validate_on_schema_change", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "name": "incremental_validate_on_schema_change", "macro_sql": "{% macro incremental_validate_on_schema_change(on_schema_change, default='ignore') %}\n\n {% if on_schema_change not in ['sync_all_columns', 'append_new_columns', 'fail', 'ignore'] %}\n\n {% set log_message = 'Invalid value for on_schema_change (%s) specified. Setting default value of %s.' % (on_schema_change, default) %}\n {% do log(log_message) %}\n\n {{ return(default) }}\n\n {% else %}\n\n {{ return(on_schema_change) }}\n\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.441895, "supported_languages": null}, "macro.dbt.check_for_schema_changes": {"unique_id": "macro.dbt.check_for_schema_changes", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "name": "check_for_schema_changes", "macro_sql": "{% macro check_for_schema_changes(source_relation, target_relation) %}\n\n {% set schema_changed = False %}\n\n {%- set source_columns = adapter.get_columns_in_relation(source_relation) -%}\n {%- set target_columns = adapter.get_columns_in_relation(target_relation) -%}\n {%- set source_not_in_target = diff_columns(source_columns, target_columns) -%}\n {%- set target_not_in_source = diff_columns(target_columns, source_columns) -%}\n\n {% set new_target_types = diff_column_data_types(source_columns, target_columns) %}\n\n {% if source_not_in_target != [] %}\n {% set schema_changed = True %}\n {% elif target_not_in_source != [] or new_target_types != [] %}\n {% set schema_changed = True %}\n {% elif new_target_types != [] %}\n {% set schema_changed = True %}\n {% endif %}\n\n {% set changes_dict = {\n 'schema_changed': schema_changed,\n 'source_not_in_target': source_not_in_target,\n 'target_not_in_source': target_not_in_source,\n 'source_columns': source_columns,\n 'target_columns': target_columns,\n 'new_target_types': new_target_types\n } %}\n\n {% set msg %}\n In {{ target_relation }}:\n Schema changed: {{ schema_changed }}\n Source columns not in target: {{ source_not_in_target }}\n Target columns not in source: {{ target_not_in_source }}\n New column types: {{ new_target_types }}\n {% endset %}\n\n {% do log(msg) %}\n\n {{ return(changes_dict) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.diff_columns", "macro.dbt.diff_column_data_types"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.444941, "supported_languages": null}, "macro.dbt.sync_column_schemas": {"unique_id": "macro.dbt.sync_column_schemas", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "name": "sync_column_schemas", "macro_sql": "{% macro sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {%- set add_to_target_arr = schema_changes_dict['source_not_in_target'] -%}\n\n {%- if on_schema_change == 'append_new_columns'-%}\n {%- if add_to_target_arr | length > 0 -%}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, none) -%}\n {%- endif -%}\n\n {% elif on_schema_change == 'sync_all_columns' %}\n {%- set remove_from_target_arr = schema_changes_dict['target_not_in_source'] -%}\n {%- set new_target_types = schema_changes_dict['new_target_types'] -%}\n\n {% if add_to_target_arr | length > 0 or remove_from_target_arr | length > 0 %}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, remove_from_target_arr) -%}\n {% endif %}\n\n {% if new_target_types != [] %}\n {% for ntt in new_target_types %}\n {% set column_name = ntt['column_name'] %}\n {% set new_type = ntt['new_type'] %}\n {% do alter_column_type(target_relation, column_name, new_type) %}\n {% endfor %}\n {% endif %}\n\n {% endif %}\n\n {% set schema_change_message %}\n In {{ target_relation }}:\n Schema change approach: {{ on_schema_change }}\n Columns added: {{ add_to_target_arr }}\n Columns removed: {{ remove_from_target_arr }}\n Data types changed: {{ new_target_types }}\n {% endset %}\n\n {% do log(schema_change_message) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.alter_relation_add_remove_columns", "macro.dbt.alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.447967, "supported_languages": null}, "macro.dbt.process_schema_changes": {"unique_id": "macro.dbt.process_schema_changes", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "name": "process_schema_changes", "macro_sql": "{% macro process_schema_changes(on_schema_change, source_relation, target_relation) %}\n\n {% if on_schema_change == 'ignore' %}\n\n {{ return({}) }}\n\n {% else %}\n\n {% set schema_changes_dict = check_for_schema_changes(source_relation, target_relation) %}\n\n {% if schema_changes_dict['schema_changed'] %}\n\n {% if on_schema_change == 'fail' %}\n\n {% set fail_msg %}\n The source and target schemas on this incremental model are out of sync!\n They can be reconciled in several ways:\n - set the `on_schema_change` config to either append_new_columns or sync_all_columns, depending on your situation.\n - Re-run the incremental model with `full_refresh: True` to update the target schema.\n - update the schema manually and re-run the process.\n\n Additional troubleshooting context:\n Source columns not in target: {{ schema_changes_dict['source_not_in_target'] }}\n Target columns not in source: {{ schema_changes_dict['target_not_in_source'] }}\n New column types: {{ schema_changes_dict['new_target_types'] }}\n {% endset %}\n\n {% do exceptions.raise_compiler_error(fail_msg) %}\n\n {# -- unless we ignore, run the sync operation per the config #}\n {% else %}\n\n {% do sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {% endif %}\n\n {% endif %}\n\n {{ return(schema_changes_dict['source_columns']) }}\n\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.check_for_schema_changes", "macro.dbt.sync_column_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4501061, "supported_languages": null}, "macro.dbt.materialization_table_default": {"unique_id": "macro.dbt.materialization_table_default", "package_name": "dbt", "path": "macros/materializations/models/table/table.sql", "original_file_path": "macros/materializations/models/table/table.sql", "name": "materialization_table_default", "macro_sql": "{% materialization table, default %}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') %}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n See ../view/view.sql for more information about this relation.\n */\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_table_as_sql(False, intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% do create_indexes(target_relation) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n -- finally, drop the existing/backup relation after the commit\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n{% endmaterialization %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.statement", "macro.dbt.get_create_table_as_sql", "macro.dbt.create_indexes", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.456702, "supported_languages": ["sql"]}, "macro.dbt.get_create_table_as_sql": {"unique_id": "macro.dbt.get_create_table_as_sql", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "name": "get_create_table_as_sql", "macro_sql": "{% macro get_create_table_as_sql(temporary, relation, sql) -%}\n {{ adapter.dispatch('get_create_table_as_sql', 'dbt')(temporary, relation, sql) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_create_table_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.458294, "supported_languages": null}, "macro.dbt.default__get_create_table_as_sql": {"unique_id": "macro.dbt.default__get_create_table_as_sql", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "name": "default__get_create_table_as_sql", "macro_sql": "{% macro default__get_create_table_as_sql(temporary, relation, sql) -%}\n {{ return(create_table_as(temporary, relation, sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.458766, "supported_languages": null}, "macro.dbt.create_table_as": {"unique_id": "macro.dbt.create_table_as", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "name": "create_table_as", "macro_sql": "{% macro create_table_as(temporary, relation, compiled_code, language='sql') -%}\n {# backward compatibility for create_table_as that does not support language #}\n {% if language == \"sql\" %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code)}}\n {% else %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code, language) }}\n {% endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.459863, "supported_languages": null}, "macro.dbt.default__create_table_as": {"unique_id": "macro.dbt.default__create_table_as", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "name": "default__create_table_as", "macro_sql": "{% macro default__create_table_as(temporary, relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary: -%}temporary{%- endif %} table\n {{ relation.include(database=(not temporary), schema=(not temporary)) }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4608908, "supported_languages": null}, "macro.dbt.materialization_view_default": {"unique_id": "macro.dbt.materialization_view_default", "package_name": "dbt", "path": "macros/materializations/models/view/view.sql", "original_file_path": "macros/materializations/models/view/view.sql", "name": "materialization_view_default", "macro_sql": "{%- materialization view, default -%}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='view') -%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n This relation (probably) doesn't exist yet. If it does exist, it's a leftover from\n a previous run, and we're going to try to drop it immediately. At the end of this\n materialization, we're going to rename the \"existing_relation\" to this identifier,\n and then we're going to drop it. In order to make sure we run the correct one of:\n - drop view ...\n - drop table ...\n\n We need to set the type of this relation to be the type of the existing_relation, if it exists,\n or else \"view\" as a sane default if it does not. Note that if the existing_relation does not\n exist, then there is nothing to move out of the way and subsequentally drop. In that case,\n this relation will be effectively unused.\n */\n {%- set backup_relation_type = 'view' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n -- move the existing view out of the way\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.run_hooks", "macro.dbt.drop_relation_if_exists", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4670231, "supported_languages": ["sql"]}, "macro.dbt.handle_existing_table": {"unique_id": "macro.dbt.handle_existing_table", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "name": "handle_existing_table", "macro_sql": "{% macro handle_existing_table(full_refresh, old_relation) %}\n {{ adapter.dispatch('handle_existing_table', 'dbt')(full_refresh, old_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__handle_existing_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.468151, "supported_languages": null}, "macro.dbt.default__handle_existing_table": {"unique_id": "macro.dbt.default__handle_existing_table", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "name": "default__handle_existing_table", "macro_sql": "{% macro default__handle_existing_table(full_refresh, old_relation) %}\n {{ log(\"Dropping relation \" ~ old_relation ~ \" because it is of type \" ~ old_relation.type) }}\n {{ adapter.drop_relation(old_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.468713, "supported_languages": null}, "macro.dbt.create_or_replace_view": {"unique_id": "macro.dbt.create_or_replace_view", "package_name": "dbt", "path": "macros/materializations/models/view/create_or_replace_view.sql", "original_file_path": "macros/materializations/models/view/create_or_replace_view.sql", "name": "create_or_replace_view", "macro_sql": "{% macro create_or_replace_view() %}\n {%- set identifier = model['alias'] -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database,\n type='view') -%}\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks) }}\n\n -- If there's a table with the same name and we weren't told to full refresh,\n -- that's an error. If we were told to full refresh, drop it. This behavior differs\n -- for Snowflake and BigQuery, so multiple dispatch is used.\n {%- if old_relation is not none and old_relation.is_table -%}\n {{ handle_existing_table(should_full_refresh(), old_relation) }}\n {%- endif -%}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(target_relation, sql) }}\n {%- endcall %}\n\n {% set should_revoke = should_revoke(exists_as_view, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=True) %}\n\n {{ run_hooks(post_hooks) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.run_hooks", "macro.dbt.handle_existing_table", "macro.dbt.should_full_refresh", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.472539, "supported_languages": null}, "macro.dbt.get_create_view_as_sql": {"unique_id": "macro.dbt.get_create_view_as_sql", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "name": "get_create_view_as_sql", "macro_sql": "{% macro get_create_view_as_sql(relation, sql) -%}\n {{ adapter.dispatch('get_create_view_as_sql', 'dbt')(relation, sql) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_create_view_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.473769, "supported_languages": null}, "macro.dbt.default__get_create_view_as_sql": {"unique_id": "macro.dbt.default__get_create_view_as_sql", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "name": "default__get_create_view_as_sql", "macro_sql": "{% macro default__get_create_view_as_sql(relation, sql) -%}\n {{ return(create_view_as(relation, sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.474185, "supported_languages": null}, "macro.dbt.create_view_as": {"unique_id": "macro.dbt.create_view_as", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "name": "create_view_as", "macro_sql": "{% macro create_view_as(relation, sql) -%}\n {{ adapter.dispatch('create_view_as', 'dbt')(relation, sql) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.474647, "supported_languages": null}, "macro.dbt.default__create_view_as": {"unique_id": "macro.dbt.default__create_view_as", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "name": "default__create_view_as", "macro_sql": "{% macro default__create_view_as(relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n create view {{ relation }} as (\n {{ sql }}\n );\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.475304, "supported_languages": null}, "macro.dbt.materialization_seed_default": {"unique_id": "macro.dbt.materialization_seed_default", "package_name": "dbt", "path": "macros/materializations/seeds/seed.sql", "original_file_path": "macros/materializations/seeds/seed.sql", "name": "materialization_seed_default", "macro_sql": "{% materialization seed, default %}\n\n {%- set identifier = model['alias'] -%}\n {%- set full_refresh_mode = (should_full_refresh()) -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n\n {%- set exists_as_table = (old_relation is not none and old_relation.is_table) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set grant_config = config.get('grants') -%}\n {%- set agate_table = load_agate_table() -%}\n -- grab current tables grants config for comparision later on\n\n {%- do store_result('agate_table', response='OK', agate_table=agate_table) -%}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% set create_table_sql = \"\" %}\n {% if exists_as_view %}\n {{ exceptions.raise_compiler_error(\"Cannot seed to '{}', it is a view\".format(old_relation)) }}\n {% elif exists_as_table %}\n {% set create_table_sql = reset_csv_table(model, full_refresh_mode, old_relation, agate_table) %}\n {% else %}\n {% set create_table_sql = create_csv_table(model, agate_table) %}\n {% endif %}\n\n {% set code = 'CREATE' if full_refresh_mode else 'INSERT' %}\n {% set rows_affected = (agate_table.rows | length) %}\n {% set sql = load_csv_rows(model, agate_table) %}\n\n {% call noop_statement('main', code ~ ' ' ~ rows_affected, code, rows_affected) %}\n {{ get_csv_sql(create_table_sql, sql) }};\n {% endcall %}\n\n {% set target_relation = this.incorporate(type='table') %}\n\n {% set should_revoke = should_revoke(old_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if full_refresh_mode or not exists_as_table %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.should_full_refresh", "macro.dbt.run_hooks", "macro.dbt.reset_csv_table", "macro.dbt.create_csv_table", "macro.dbt.load_csv_rows", "macro.dbt.noop_statement", "macro.dbt.get_csv_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.48314, "supported_languages": ["sql"]}, "macro.dbt.create_csv_table": {"unique_id": "macro.dbt.create_csv_table", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "create_csv_table", "macro_sql": "{% macro create_csv_table(model, agate_table) -%}\n {{ adapter.dispatch('create_csv_table', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.492415, "supported_languages": null}, "macro.dbt.default__create_csv_table": {"unique_id": "macro.dbt.default__create_csv_table", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__create_csv_table", "macro_sql": "{% macro default__create_csv_table(model, agate_table) %}\n {%- set column_override = model['config'].get('column_types', {}) -%}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n\n {% set sql %}\n create table {{ this.render() }} (\n {%- for col_name in agate_table.column_names -%}\n {%- set inferred_type = adapter.convert_type(agate_table, loop.index0) -%}\n {%- set type = column_override.get(col_name, inferred_type) -%}\n {%- set column_name = (col_name | string) -%}\n {{ adapter.quote_seed_column(column_name, quote_seed_column) }} {{ type }} {%- if not loop.last -%}, {%- endif -%}\n {%- endfor -%}\n )\n {% endset %}\n\n {% call statement('_') -%}\n {{ sql }}\n {%- endcall %}\n\n {{ return(sql) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.494709, "supported_languages": null}, "macro.dbt.reset_csv_table": {"unique_id": "macro.dbt.reset_csv_table", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "reset_csv_table", "macro_sql": "{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}\n {{ adapter.dispatch('reset_csv_table', 'dbt')(model, full_refresh, old_relation, agate_table) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__reset_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.495312, "supported_languages": null}, "macro.dbt.default__reset_csv_table": {"unique_id": "macro.dbt.default__reset_csv_table", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__reset_csv_table", "macro_sql": "{% macro default__reset_csv_table(model, full_refresh, old_relation, agate_table) %}\n {% set sql = \"\" %}\n {% if full_refresh %}\n {{ adapter.drop_relation(old_relation) }}\n {% set sql = create_csv_table(model, agate_table) %}\n {% else %}\n {{ adapter.truncate_relation(old_relation) }}\n {% set sql = \"truncate table \" ~ old_relation %}\n {% endif %}\n\n {{ return(sql) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.496661, "supported_languages": null}, "macro.dbt.get_csv_sql": {"unique_id": "macro.dbt.get_csv_sql", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "get_csv_sql", "macro_sql": "{% macro get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ adapter.dispatch('get_csv_sql', 'dbt')(create_or_truncate_sql, insert_sql) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_csv_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4971678, "supported_languages": null}, "macro.dbt.default__get_csv_sql": {"unique_id": "macro.dbt.default__get_csv_sql", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__get_csv_sql", "macro_sql": "{% macro default__get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ create_or_truncate_sql }};\n -- dbt seed --\n {{ insert_sql }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.497521, "supported_languages": null}, "macro.dbt.get_binding_char": {"unique_id": "macro.dbt.get_binding_char", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "get_binding_char", "macro_sql": "{% macro get_binding_char() -%}\n {{ adapter.dispatch('get_binding_char', 'dbt')() }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4978828, "supported_languages": null}, "macro.dbt.default__get_binding_char": {"unique_id": "macro.dbt.default__get_binding_char", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__get_binding_char", "macro_sql": "{% macro default__get_binding_char() %}\n {{ return('%s') }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.498185, "supported_languages": null}, "macro.dbt.get_batch_size": {"unique_id": "macro.dbt.get_batch_size", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "get_batch_size", "macro_sql": "{% macro get_batch_size() -%}\n {{ return(adapter.dispatch('get_batch_size', 'dbt')()) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_batch_size"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.4986072, "supported_languages": null}, "macro.dbt.default__get_batch_size": {"unique_id": "macro.dbt.default__get_batch_size", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__get_batch_size", "macro_sql": "{% macro default__get_batch_size() %}\n {{ return(10000) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.498921, "supported_languages": null}, "macro.dbt.get_seed_column_quoted_csv": {"unique_id": "macro.dbt.get_seed_column_quoted_csv", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "get_seed_column_quoted_csv", "macro_sql": "{% macro get_seed_column_quoted_csv(model, column_names) %}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote_seed_column(col, quote_seed_column)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.500187, "supported_languages": null}, "macro.dbt.load_csv_rows": {"unique_id": "macro.dbt.load_csv_rows", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "load_csv_rows", "macro_sql": "{% macro load_csv_rows(model, agate_table) -%}\n {{ adapter.dispatch('load_csv_rows', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__load_csv_rows"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.500671, "supported_languages": null}, "macro.dbt.default__load_csv_rows": {"unique_id": "macro.dbt.default__load_csv_rows", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "name": "default__load_csv_rows", "macro_sql": "{% macro default__load_csv_rows(model, agate_table) %}\n\n {% set batch_size = get_batch_size() %}\n\n {% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}\n {% set bindings = [] %}\n\n {% set statements = [] %}\n\n {% for chunk in agate_table.rows | batch(batch_size) %}\n {% set bindings = [] %}\n\n {% for row in chunk %}\n {% do bindings.extend(row) %}\n {% endfor %}\n\n {% set sql %}\n insert into {{ this.render() }} ({{ cols_sql }}) values\n {% for row in chunk -%}\n ({%- for column in agate_table.column_names -%}\n {{ get_binding_char() }}\n {%- if not loop.last%},{%- endif %}\n {%- endfor -%})\n {%- if not loop.last%},{%- endif %}\n {%- endfor %}\n {% endset %}\n\n {% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}\n\n {% if loop.index0 == 0 %}\n {% do statements.append(sql) %}\n {% endif %}\n {% endfor %}\n\n {# Return SQL so we can render it out into the compiled files #}\n {{ return(statements[0]) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_batch_size", "macro.dbt.get_seed_column_quoted_csv", "macro.dbt.get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.504315, "supported_languages": null}, "macro.dbt.generate_alias_name": {"unique_id": "macro.dbt.generate_alias_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "name": "generate_alias_name", "macro_sql": "{% macro generate_alias_name(custom_alias_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_alias_name', 'dbt')(custom_alias_name, node)) %}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__generate_alias_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.505684, "supported_languages": null}, "macro.dbt.default__generate_alias_name": {"unique_id": "macro.dbt.default__generate_alias_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "name": "default__generate_alias_name", "macro_sql": "{% macro default__generate_alias_name(custom_alias_name=none, node=none) -%}\n\n {%- if custom_alias_name is none -%}\n\n {{ node.name }}\n\n {%- else -%}\n\n {{ custom_alias_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.506274, "supported_languages": null}, "macro.dbt.generate_schema_name": {"unique_id": "macro.dbt.generate_schema_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "name": "generate_schema_name", "macro_sql": "{% macro generate_schema_name(custom_schema_name=none, node=none) -%}\n {{ return(adapter.dispatch('generate_schema_name', 'dbt')(custom_schema_name, node)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__generate_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5077639, "supported_languages": null}, "macro.dbt.default__generate_schema_name": {"unique_id": "macro.dbt.default__generate_schema_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "name": "default__generate_schema_name", "macro_sql": "{% macro default__generate_schema_name(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if custom_schema_name is none -%}\n\n {{ default_schema }}\n\n {%- else -%}\n\n {{ default_schema }}_{{ custom_schema_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5084338, "supported_languages": null}, "macro.dbt.generate_schema_name_for_env": {"unique_id": "macro.dbt.generate_schema_name_for_env", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "name": "generate_schema_name_for_env", "macro_sql": "{% macro generate_schema_name_for_env(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if target.name == 'prod' and custom_schema_name is not none -%}\n\n {{ custom_schema_name | trim }}\n\n {%- else -%}\n\n {{ default_schema }}\n\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5091588, "supported_languages": null}, "macro.dbt.generate_database_name": {"unique_id": "macro.dbt.generate_database_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "name": "generate_database_name", "macro_sql": "{% macro generate_database_name(custom_database_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_database_name', 'dbt')(custom_database_name, node)) %}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__generate_database_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.510411, "supported_languages": null}, "macro.dbt.default__generate_database_name": {"unique_id": "macro.dbt.default__generate_database_name", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "name": "default__generate_database_name", "macro_sql": "{% macro default__generate_database_name(custom_database_name=none, node=none) -%}\n {%- set default_database = target.database -%}\n {%- if custom_database_name is none -%}\n\n {{ default_database }}\n\n {%- else -%}\n\n {{ custom_database_name }}\n\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.51106, "supported_languages": null}, "macro.dbt.default__test_relationships": {"unique_id": "macro.dbt.default__test_relationships", "package_name": "dbt", "path": "macros/generic_test_sql/relationships.sql", "original_file_path": "macros/generic_test_sql/relationships.sql", "name": "default__test_relationships", "macro_sql": "{% macro default__test_relationships(model, column_name, to, field) %}\n\nwith child as (\n select {{ column_name }} as from_field\n from {{ model }}\n where {{ column_name }} is not null\n),\n\nparent as (\n select {{ field }} as to_field\n from {{ to }}\n)\n\nselect\n from_field\n\nfrom child\nleft join parent\n on child.from_field = parent.to_field\n\nwhere parent.to_field is null\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.512234, "supported_languages": null}, "macro.dbt.default__test_not_null": {"unique_id": "macro.dbt.default__test_not_null", "package_name": "dbt", "path": "macros/generic_test_sql/not_null.sql", "original_file_path": "macros/generic_test_sql/not_null.sql", "name": "default__test_not_null", "macro_sql": "{% macro default__test_not_null(model, column_name) %}\n\n{% set column_list = '*' if should_store_failures() else column_name %}\n\nselect {{ column_list }}\nfrom {{ model }}\nwhere {{ column_name }} is null\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.should_store_failures"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5134442, "supported_languages": null}, "macro.dbt.default__test_unique": {"unique_id": "macro.dbt.default__test_unique", "package_name": "dbt", "path": "macros/generic_test_sql/unique.sql", "original_file_path": "macros/generic_test_sql/unique.sql", "name": "default__test_unique", "macro_sql": "{% macro default__test_unique(model, column_name) %}\n\nselect\n {{ column_name }} as unique_field,\n count(*) as n_records\n\nfrom {{ model }}\nwhere {{ column_name }} is not null\ngroup by {{ column_name }}\nhaving count(*) > 1\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.514476, "supported_languages": null}, "macro.dbt.default__test_accepted_values": {"unique_id": "macro.dbt.default__test_accepted_values", "package_name": "dbt", "path": "macros/generic_test_sql/accepted_values.sql", "original_file_path": "macros/generic_test_sql/accepted_values.sql", "name": "default__test_accepted_values", "macro_sql": "{% macro default__test_accepted_values(model, column_name, values, quote=True) %}\n\nwith all_values as (\n\n select\n {{ column_name }} as value_field,\n count(*) as n_records\n\n from {{ model }}\n group by {{ column_name }}\n\n)\n\nselect *\nfrom all_values\nwhere value_field not in (\n {% for value in values -%}\n {% if quote -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif -%}\n {%- if not loop.last -%},{%- endif %}\n {%- endfor %}\n)\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5161521, "supported_languages": null}, "macro.dbt.statement": {"unique_id": "macro.dbt.statement", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "name": "statement", "macro_sql": "\n{%- macro statement(name=None, fetch_result=False, auto_begin=True, language='sql') -%}\n {%- if execute: -%}\n {%- set compiled_code = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime {} for node \"{}\"'.format(language, model['unique_id'])) }}\n {{ write(compiled_code) }}\n {%- endif -%}\n {%- if language == 'sql'-%}\n {%- set res, table = adapter.execute(compiled_code, auto_begin=auto_begin, fetch=fetch_result) -%}\n {%- elif language == 'python' -%}\n {%- set res = submit_python_job(model, compiled_code) -%}\n {#-- TODO: What should table be for python models? --#}\n {%- set table = None -%}\n {%- else -%}\n {% do exceptions.raise_compiler_error(\"statement macro didn't get supported language\") %}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_result(name, response=res, agate_table=table) }}\n {%- endif -%}\n\n {%- endif -%}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.520122, "supported_languages": null}, "macro.dbt.noop_statement": {"unique_id": "macro.dbt.noop_statement", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "name": "noop_statement", "macro_sql": "{% macro noop_statement(name=None, message=None, code=None, rows_affected=None, res=None) -%}\n {%- set sql = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime SQL for node \"{}\"'.format(model['unique_id'])) }}\n {{ write(sql) }}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_raw_result(name, message=message, code=code, rows_affected=rows_affected, agate_table=res) }}\n {%- endif -%}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.521693, "supported_languages": null}, "macro.dbt.run_query": {"unique_id": "macro.dbt.run_query", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "name": "run_query", "macro_sql": "{% macro run_query(sql) %}\n {% call statement(\"run_query_statement\", fetch_result=true, auto_begin=false) %}\n {{ sql }}\n {% endcall %}\n\n {% do return(load_result(\"run_query_statement\").table) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5224488, "supported_languages": null}, "macro.dbt.convert_datetime": {"unique_id": "macro.dbt.convert_datetime", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "name": "convert_datetime", "macro_sql": "{% macro convert_datetime(date_str, date_fmt) %}\n\n {% set error_msg -%}\n The provided partition date '{{ date_str }}' does not match the expected format '{{ date_fmt }}'\n {%- endset %}\n\n {% set res = try_or_compiler_error(error_msg, modules.datetime.datetime.strptime, date_str.strip(), date_fmt) %}\n {{ return(res) }}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.526434, "supported_languages": null}, "macro.dbt.dates_in_range": {"unique_id": "macro.dbt.dates_in_range", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "name": "dates_in_range", "macro_sql": "{% macro dates_in_range(start_date_str, end_date_str=none, in_fmt=\"%Y%m%d\", out_fmt=\"%Y%m%d\") %}\n {% set end_date_str = start_date_str if end_date_str is none else end_date_str %}\n\n {% set start_date = convert_datetime(start_date_str, in_fmt) %}\n {% set end_date = convert_datetime(end_date_str, in_fmt) %}\n\n {% set day_count = (end_date - start_date).days %}\n {% if day_count < 0 %}\n {% set msg -%}\n Partiton start date is after the end date ({{ start_date }}, {{ end_date }})\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg, model) }}\n {% endif %}\n\n {% set date_list = [] %}\n {% for i in range(0, day_count + 1) %}\n {% set the_date = (modules.datetime.timedelta(days=i) + start_date) %}\n {% if not out_fmt %}\n {% set _ = date_list.append(the_date) %}\n {% else %}\n {% set _ = date_list.append(the_date.strftime(out_fmt)) %}\n {% endif %}\n {% endfor %}\n\n {{ return(date_list) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.convert_datetime"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.529401, "supported_languages": null}, "macro.dbt.partition_range": {"unique_id": "macro.dbt.partition_range", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "name": "partition_range", "macro_sql": "{% macro partition_range(raw_partition_date, date_fmt='%Y%m%d') %}\n {% set partition_range = (raw_partition_date | string).split(\",\") %}\n\n {% if (partition_range | length) == 1 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = none %}\n {% elif (partition_range | length) == 2 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = partition_range[1] %}\n {% else %}\n {{ exceptions.raise_compiler_error(\"Invalid partition time. Expected format: {Start Date}[,{End Date}]. Got: \" ~ raw_partition_date) }}\n {% endif %}\n\n {{ return(dates_in_range(start_date, end_date, in_fmt=date_fmt)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.dates_in_range"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.531286, "supported_languages": null}, "macro.dbt.py_current_timestring": {"unique_id": "macro.dbt.py_current_timestring", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "name": "py_current_timestring", "macro_sql": "{% macro py_current_timestring() %}\n {% set dt = modules.datetime.datetime.now() %}\n {% do return(dt.strftime(\"%Y%m%d%H%M%S%f\")) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.531877, "supported_languages": null}, "macro.dbt.except": {"unique_id": "macro.dbt.except", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "name": "except", "macro_sql": "{% macro except() %}\n {{ return(adapter.dispatch('except', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__except"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5329752, "supported_languages": null}, "macro.dbt.default__except": {"unique_id": "macro.dbt.default__except", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "name": "default__except", "macro_sql": "{% macro default__except() %}\n\n except\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.533185, "supported_languages": null}, "macro.dbt.replace": {"unique_id": "macro.dbt.replace", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "name": "replace", "macro_sql": "{% macro replace(field, old_chars, new_chars) -%}\n {{ return(adapter.dispatch('replace', 'dbt') (field, old_chars, new_chars)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__replace"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.534307, "supported_languages": null}, "macro.dbt.default__replace": {"unique_id": "macro.dbt.default__replace", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "name": "default__replace", "macro_sql": "{% macro default__replace(field, old_chars, new_chars) %}\n\n replace(\n {{ field }},\n {{ old_chars }},\n {{ new_chars }}\n )\n\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.534719, "supported_languages": null}, "macro.dbt.concat": {"unique_id": "macro.dbt.concat", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "name": "concat", "macro_sql": "{% macro concat(fields) -%}\n {{ return(adapter.dispatch('concat', 'dbt')(fields)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.535649, "supported_languages": null}, "macro.dbt.default__concat": {"unique_id": "macro.dbt.default__concat", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "name": "default__concat", "macro_sql": "{% macro default__concat(fields) -%}\n {{ fields|join(' || ') }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5359669, "supported_languages": null}, "macro.dbt.length": {"unique_id": "macro.dbt.length", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "name": "length", "macro_sql": "{% macro length(expression) -%}\n {{ return(adapter.dispatch('length', 'dbt') (expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__length"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5369081, "supported_languages": null}, "macro.dbt.default__length": {"unique_id": "macro.dbt.default__length", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "name": "default__length", "macro_sql": "{% macro default__length(expression) %}\n\n length(\n {{ expression }}\n )\n\n{%- endmacro -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.53718, "supported_languages": null}, "macro.dbt.dateadd": {"unique_id": "macro.dbt.dateadd", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "name": "dateadd", "macro_sql": "{% macro dateadd(datepart, interval, from_date_or_timestamp) %}\n {{ return(adapter.dispatch('dateadd', 'dbt')(datepart, interval, from_date_or_timestamp)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__dateadd"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.538305, "supported_languages": null}, "macro.dbt.default__dateadd": {"unique_id": "macro.dbt.default__dateadd", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "name": "default__dateadd", "macro_sql": "{% macro default__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n dateadd(\n {{ datepart }},\n {{ interval }},\n {{ from_date_or_timestamp }}\n )\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5387268, "supported_languages": null}, "macro.dbt.intersect": {"unique_id": "macro.dbt.intersect", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "name": "intersect", "macro_sql": "{% macro intersect() %}\n {{ return(adapter.dispatch('intersect', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__intersect"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.539616, "supported_languages": null}, "macro.dbt.default__intersect": {"unique_id": "macro.dbt.default__intersect", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "name": "default__intersect", "macro_sql": "{% macro default__intersect() %}\n\n intersect\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.539814, "supported_languages": null}, "macro.dbt.escape_single_quotes": {"unique_id": "macro.dbt.escape_single_quotes", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "name": "escape_single_quotes", "macro_sql": "{% macro escape_single_quotes(expression) %}\n {{ return(adapter.dispatch('escape_single_quotes', 'dbt') (expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__escape_single_quotes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.540791, "supported_languages": null}, "macro.dbt.default__escape_single_quotes": {"unique_id": "macro.dbt.default__escape_single_quotes", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "name": "default__escape_single_quotes", "macro_sql": "{% macro default__escape_single_quotes(expression) -%}\n{{ expression | replace(\"'\",\"''\") }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.541302, "supported_languages": null}, "macro.dbt.right": {"unique_id": "macro.dbt.right", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "name": "right", "macro_sql": "{% macro right(string_text, length_expression) -%}\n {{ return(adapter.dispatch('right', 'dbt') (string_text, length_expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__right"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.542329, "supported_languages": null}, "macro.dbt.default__right": {"unique_id": "macro.dbt.default__right", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "name": "default__right", "macro_sql": "{% macro default__right(string_text, length_expression) %}\n\n right(\n {{ string_text }},\n {{ length_expression }}\n )\n\n{%- endmacro -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.54267, "supported_languages": null}, "macro.dbt.listagg": {"unique_id": "macro.dbt.listagg", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "name": "listagg", "macro_sql": "{% macro listagg(measure, delimiter_text=\"','\", order_by_clause=none, limit_num=none) -%}\n {{ return(adapter.dispatch('listagg', 'dbt') (measure, delimiter_text, order_by_clause, limit_num)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__listagg"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.544319, "supported_languages": null}, "macro.dbt.default__listagg": {"unique_id": "macro.dbt.default__listagg", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "name": "default__listagg", "macro_sql": "{% macro default__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n array_slice(\n array_agg(\n {{ measure }}\n ){% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n ,0\n ,{{ limit_num }}\n ),\n {{ delimiter_text }}\n )\n {%- else %}\n listagg(\n {{ measure }},\n {{ delimiter_text }}\n )\n {% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n {%- endif %}\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.545324, "supported_languages": null}, "macro.dbt.datediff": {"unique_id": "macro.dbt.datediff", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "name": "datediff", "macro_sql": "{% macro datediff(first_date, second_date, datepart) %}\n {{ return(adapter.dispatch('datediff', 'dbt')(first_date, second_date, datepart)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.546518, "supported_languages": null}, "macro.dbt.default__datediff": {"unique_id": "macro.dbt.default__datediff", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "name": "default__datediff", "macro_sql": "{% macro default__datediff(first_date, second_date, datepart) -%}\n\n datediff(\n {{ datepart }},\n {{ first_date }},\n {{ second_date }}\n )\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5469708, "supported_languages": null}, "macro.dbt.safe_cast": {"unique_id": "macro.dbt.safe_cast", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "name": "safe_cast", "macro_sql": "{% macro safe_cast(field, type) %}\n {{ return(adapter.dispatch('safe_cast', 'dbt') (field, type)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__safe_cast"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.548071, "supported_languages": null}, "macro.dbt.default__safe_cast": {"unique_id": "macro.dbt.default__safe_cast", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "name": "default__safe_cast", "macro_sql": "{% macro default__safe_cast(field, type) %}\n {# most databases don't support this function yet\n so we just need to use cast #}\n cast({{field}} as {{type}})\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.548503, "supported_languages": null}, "macro.dbt.hash": {"unique_id": "macro.dbt.hash", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "name": "hash", "macro_sql": "{% macro hash(field) -%}\n {{ return(adapter.dispatch('hash', 'dbt') (field)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__hash"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5495412, "supported_languages": null}, "macro.dbt.default__hash": {"unique_id": "macro.dbt.default__hash", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "name": "default__hash", "macro_sql": "{% macro default__hash(field) -%}\n md5(cast({{ field }} as {{ api.Column.translate_type('string') }}))\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.549956, "supported_languages": null}, "macro.dbt.cast_bool_to_text": {"unique_id": "macro.dbt.cast_bool_to_text", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "name": "cast_bool_to_text", "macro_sql": "{% macro cast_bool_to_text(field) %}\n {{ adapter.dispatch('cast_bool_to_text', 'dbt') (field) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__cast_bool_to_text"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.550951, "supported_languages": null}, "macro.dbt.default__cast_bool_to_text": {"unique_id": "macro.dbt.default__cast_bool_to_text", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "name": "default__cast_bool_to_text", "macro_sql": "{% macro default__cast_bool_to_text(field) %}\n cast({{ field }} as {{ api.Column.translate_type('string') }})\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.551501, "supported_languages": null}, "macro.dbt.any_value": {"unique_id": "macro.dbt.any_value", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "name": "any_value", "macro_sql": "{% macro any_value(expression) -%}\n {{ return(adapter.dispatch('any_value', 'dbt') (expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__any_value"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5527098, "supported_languages": null}, "macro.dbt.default__any_value": {"unique_id": "macro.dbt.default__any_value", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "name": "default__any_value", "macro_sql": "{% macro default__any_value(expression) -%}\n\n any_value({{ expression }})\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.553046, "supported_languages": null}, "macro.dbt.position": {"unique_id": "macro.dbt.position", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "name": "position", "macro_sql": "{% macro position(substring_text, string_text) -%}\n {{ return(adapter.dispatch('position', 'dbt') (substring_text, string_text)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__position"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.554094, "supported_languages": null}, "macro.dbt.default__position": {"unique_id": "macro.dbt.default__position", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "name": "default__position", "macro_sql": "{% macro default__position(substring_text, string_text) %}\n\n position(\n {{ substring_text }} in {{ string_text }}\n )\n\n{%- endmacro -%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.55445, "supported_languages": null}, "macro.dbt.string_literal": {"unique_id": "macro.dbt.string_literal", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "name": "string_literal", "macro_sql": "{%- macro string_literal(value) -%}\n {{ return(adapter.dispatch('string_literal', 'dbt') (value)) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__string_literal"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.555383, "supported_languages": null}, "macro.dbt.default__string_literal": {"unique_id": "macro.dbt.default__string_literal", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "name": "default__string_literal", "macro_sql": "{% macro default__string_literal(value) -%}\n '{{ value }}'\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.555779, "supported_languages": null}, "macro.dbt.type_string": {"unique_id": "macro.dbt.type_string", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_string", "macro_sql": "\n\n{%- macro type_string() -%}\n {{ return(adapter.dispatch('type_string', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_string"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5583549, "supported_languages": null}, "macro.dbt.default__type_string": {"unique_id": "macro.dbt.default__type_string", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_string", "macro_sql": "{% macro default__type_string() %}\n {{ return(api.Column.translate_type(\"string\")) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.558912, "supported_languages": null}, "macro.dbt.type_timestamp": {"unique_id": "macro.dbt.type_timestamp", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_timestamp", "macro_sql": "\n\n{%- macro type_timestamp() -%}\n {{ return(adapter.dispatch('type_timestamp', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.559475, "supported_languages": null}, "macro.dbt.default__type_timestamp": {"unique_id": "macro.dbt.default__type_timestamp", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_timestamp", "macro_sql": "{% macro default__type_timestamp() %}\n {{ return(api.Column.translate_type(\"timestamp\")) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.559979, "supported_languages": null}, "macro.dbt.type_float": {"unique_id": "macro.dbt.type_float", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_float", "macro_sql": "\n\n{%- macro type_float() -%}\n {{ return(adapter.dispatch('type_float', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_float"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.56046, "supported_languages": null}, "macro.dbt.default__type_float": {"unique_id": "macro.dbt.default__type_float", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_float", "macro_sql": "{% macro default__type_float() %}\n {{ return(api.Column.translate_type(\"float\")) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.56098, "supported_languages": null}, "macro.dbt.type_numeric": {"unique_id": "macro.dbt.type_numeric", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_numeric", "macro_sql": "\n\n{%- macro type_numeric() -%}\n {{ return(adapter.dispatch('type_numeric', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_numeric"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.561585, "supported_languages": null}, "macro.dbt.default__type_numeric": {"unique_id": "macro.dbt.default__type_numeric", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_numeric", "macro_sql": "{% macro default__type_numeric() %}\n {{ return(api.Column.numeric_type(\"numeric\", 28, 6)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5620632, "supported_languages": null}, "macro.dbt.type_bigint": {"unique_id": "macro.dbt.type_bigint", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_bigint", "macro_sql": "\n\n{%- macro type_bigint() -%}\n {{ return(adapter.dispatch('type_bigint', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_bigint"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.562495, "supported_languages": null}, "macro.dbt.default__type_bigint": {"unique_id": "macro.dbt.default__type_bigint", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_bigint", "macro_sql": "{% macro default__type_bigint() %}\n {{ return(api.Column.translate_type(\"bigint\")) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.562887, "supported_languages": null}, "macro.dbt.type_int": {"unique_id": "macro.dbt.type_int", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_int", "macro_sql": "\n\n{%- macro type_int() -%}\n {{ return(adapter.dispatch('type_int', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_int"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.563301, "supported_languages": null}, "macro.dbt.default__type_int": {"unique_id": "macro.dbt.default__type_int", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_int", "macro_sql": "{%- macro default__type_int() -%}\n {{ return(api.Column.translate_type(\"integer\")) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.563683, "supported_languages": null}, "macro.dbt.type_boolean": {"unique_id": "macro.dbt.type_boolean", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "type_boolean", "macro_sql": "\n\n{%- macro type_boolean() -%}\n {{ return(adapter.dispatch('type_boolean', 'dbt')()) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__type_boolean"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.564095, "supported_languages": null}, "macro.dbt.default__type_boolean": {"unique_id": "macro.dbt.default__type_boolean", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "name": "default__type_boolean", "macro_sql": "{%- macro default__type_boolean() -%}\n {{ return(api.Column.translate_type(\"boolean\")) }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.564472, "supported_languages": null}, "macro.dbt.array_concat": {"unique_id": "macro.dbt.array_concat", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "name": "array_concat", "macro_sql": "{% macro array_concat(array_1, array_2) -%}\n {{ return(adapter.dispatch('array_concat', 'dbt')(array_1, array_2)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__array_concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5655942, "supported_languages": null}, "macro.dbt.default__array_concat": {"unique_id": "macro.dbt.default__array_concat", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "name": "default__array_concat", "macro_sql": "{% macro default__array_concat(array_1, array_2) -%}\n array_cat({{ array_1 }}, {{ array_2 }})\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5660028, "supported_languages": null}, "macro.dbt.bool_or": {"unique_id": "macro.dbt.bool_or", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "name": "bool_or", "macro_sql": "{% macro bool_or(expression) -%}\n {{ return(adapter.dispatch('bool_or', 'dbt') (expression)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__bool_or"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.567043, "supported_languages": null}, "macro.dbt.default__bool_or": {"unique_id": "macro.dbt.default__bool_or", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "name": "default__bool_or", "macro_sql": "{% macro default__bool_or(expression) -%}\n\n bool_or({{ expression }})\n\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.567328, "supported_languages": null}, "macro.dbt.last_day": {"unique_id": "macro.dbt.last_day", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "name": "last_day", "macro_sql": "{% macro last_day(date, datepart) %}\n {{ return(adapter.dispatch('last_day', 'dbt') (date, datepart)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.568495, "supported_languages": null}, "macro.dbt.default_last_day": {"unique_id": "macro.dbt.default_last_day", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "name": "default_last_day", "macro_sql": "\n\n{%- macro default_last_day(date, datepart) -%}\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd(datepart, '1', dbt.date_trunc(datepart, date))\n )}}\n as date)\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.56929, "supported_languages": null}, "macro.dbt.default__last_day": {"unique_id": "macro.dbt.default__last_day", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "name": "default__last_day", "macro_sql": "{% macro default__last_day(date, datepart) -%}\n {{dbt.default_last_day(date, datepart)}}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.569667, "supported_languages": null}, "macro.dbt.split_part": {"unique_id": "macro.dbt.split_part", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "name": "split_part", "macro_sql": "{% macro split_part(string_text, delimiter_text, part_number) %}\n {{ return(adapter.dispatch('split_part', 'dbt') (string_text, delimiter_text, part_number)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__split_part"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.571143, "supported_languages": null}, "macro.dbt.default__split_part": {"unique_id": "macro.dbt.default__split_part", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "name": "default__split_part", "macro_sql": "{% macro default__split_part(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n {{ part_number }}\n )\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.571575, "supported_languages": null}, "macro.dbt._split_part_negative": {"unique_id": "macro.dbt._split_part_negative", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "name": "_split_part_negative", "macro_sql": "{% macro _split_part_negative(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n length({{ string_text }})\n - length(\n replace({{ string_text }}, {{ delimiter_text }}, '')\n ) + 2 {{ part_number }}\n )\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.57214, "supported_languages": null}, "macro.dbt.date_trunc": {"unique_id": "macro.dbt.date_trunc", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "name": "date_trunc", "macro_sql": "{% macro date_trunc(datepart, date) -%}\n {{ return(adapter.dispatch('date_trunc', 'dbt') (datepart, date)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5731661, "supported_languages": null}, "macro.dbt.default__date_trunc": {"unique_id": "macro.dbt.default__date_trunc", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "name": "default__date_trunc", "macro_sql": "{% macro default__date_trunc(datepart, date) -%}\n date_trunc('{{datepart}}', {{date}})\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.573505, "supported_languages": null}, "macro.dbt.array_construct": {"unique_id": "macro.dbt.array_construct", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "name": "array_construct", "macro_sql": "{% macro array_construct(inputs=[], data_type=api.Column.translate_type('integer')) -%}\n {{ return(adapter.dispatch('array_construct', 'dbt')(inputs, data_type)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__array_construct"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5747578, "supported_languages": null}, "macro.dbt.default__array_construct": {"unique_id": "macro.dbt.default__array_construct", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "name": "default__array_construct", "macro_sql": "{% macro default__array_construct(inputs, data_type) -%}\n {% if inputs|length > 0 %}\n array[ {{ inputs|join(' , ') }} ]\n {% else %}\n array[]::{{data_type}}[]\n {% endif %}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.57538, "supported_languages": null}, "macro.dbt.array_append": {"unique_id": "macro.dbt.array_append", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "name": "array_append", "macro_sql": "{% macro array_append(array, new_element) -%}\n {{ return(adapter.dispatch('array_append', 'dbt')(array, new_element)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__array_append"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.576419, "supported_languages": null}, "macro.dbt.default__array_append": {"unique_id": "macro.dbt.default__array_append", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "name": "default__array_append", "macro_sql": "{% macro default__array_append(array, new_element) -%}\n array_append({{ array }}, {{ new_element }})\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.576768, "supported_languages": null}, "macro.dbt.create_schema": {"unique_id": "macro.dbt.create_schema", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "name": "create_schema", "macro_sql": "{% macro create_schema(relation) -%}\n {{ adapter.dispatch('create_schema', 'dbt')(relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.577954, "supported_languages": null}, "macro.dbt.default__create_schema": {"unique_id": "macro.dbt.default__create_schema", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "name": "default__create_schema", "macro_sql": "{% macro default__create_schema(relation) -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier() }}\n {% endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.578434, "supported_languages": null}, "macro.dbt.drop_schema": {"unique_id": "macro.dbt.drop_schema", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "name": "drop_schema", "macro_sql": "{% macro drop_schema(relation) -%}\n {{ adapter.dispatch('drop_schema', 'dbt')(relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__drop_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.578851, "supported_languages": null}, "macro.dbt.default__drop_schema": {"unique_id": "macro.dbt.default__drop_schema", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "name": "default__drop_schema", "macro_sql": "{% macro default__drop_schema(relation) -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier() }} cascade\n {% endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5793228, "supported_languages": null}, "macro.dbt.current_timestamp": {"unique_id": "macro.dbt.current_timestamp", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "current_timestamp", "macro_sql": "{%- macro current_timestamp() -%}\n {{ adapter.dispatch('current_timestamp', 'dbt')() }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.580731, "supported_languages": null}, "macro.dbt.default__current_timestamp": {"unique_id": "macro.dbt.default__current_timestamp", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "default__current_timestamp", "macro_sql": "{% macro default__current_timestamp() -%}\n {{ exceptions.raise_not_implemented(\n 'current_timestamp macro not implemented for adapter ' + adapter.type()) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.581128, "supported_languages": null}, "macro.dbt.snapshot_get_time": {"unique_id": "macro.dbt.snapshot_get_time", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "snapshot_get_time", "macro_sql": "\n\n{%- macro snapshot_get_time() -%}\n {{ adapter.dispatch('snapshot_get_time', 'dbt')() }}\n{%- endmacro -%}\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.581523, "supported_languages": null}, "macro.dbt.default__snapshot_get_time": {"unique_id": "macro.dbt.default__snapshot_get_time", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "default__snapshot_get_time", "macro_sql": "{% macro default__snapshot_get_time() %}\n {{ current_timestamp() }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.581808, "supported_languages": null}, "macro.dbt.current_timestamp_backcompat": {"unique_id": "macro.dbt.current_timestamp_backcompat", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "current_timestamp_backcompat", "macro_sql": "{% macro current_timestamp_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5822232, "supported_languages": null}, "macro.dbt.default__current_timestamp_backcompat": {"unique_id": "macro.dbt.default__current_timestamp_backcompat", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "default__current_timestamp_backcompat", "macro_sql": "{% macro default__current_timestamp_backcompat() %}\n current_timestamp::timestamp\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5825438, "supported_languages": null}, "macro.dbt.current_timestamp_in_utc_backcompat": {"unique_id": "macro.dbt.current_timestamp_in_utc_backcompat", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "current_timestamp_in_utc_backcompat", "macro_sql": "{% macro current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_in_utc_backcompat', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.582957, "supported_languages": null}, "macro.dbt.default__current_timestamp_in_utc_backcompat": {"unique_id": "macro.dbt.default__current_timestamp_in_utc_backcompat", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "name": "default__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro default__current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.current_timestamp_backcompat", "macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.583432, "supported_languages": null}, "macro.dbt.get_create_index_sql": {"unique_id": "macro.dbt.get_create_index_sql", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "name": "get_create_index_sql", "macro_sql": "{% macro get_create_index_sql(relation, index_dict) -%}\n {{ return(adapter.dispatch('get_create_index_sql', 'dbt')(relation, index_dict)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_create_index_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.585173, "supported_languages": null}, "macro.dbt.default__get_create_index_sql": {"unique_id": "macro.dbt.default__get_create_index_sql", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "name": "default__get_create_index_sql", "macro_sql": "{% macro default__get_create_index_sql(relation, index_dict) -%}\n {% do return(None) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5855622, "supported_languages": null}, "macro.dbt.create_indexes": {"unique_id": "macro.dbt.create_indexes", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "name": "create_indexes", "macro_sql": "{% macro create_indexes(relation) -%}\n {{ adapter.dispatch('create_indexes', 'dbt')(relation) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5859761, "supported_languages": null}, "macro.dbt.default__create_indexes": {"unique_id": "macro.dbt.default__create_indexes", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "name": "default__create_indexes", "macro_sql": "{% macro default__create_indexes(relation) -%}\n {%- set _indexes = config.get('indexes', default=[]) -%}\n\n {% for _index_dict in _indexes %}\n {% set create_index_sql = get_create_index_sql(relation, _index_dict) %}\n {% if create_index_sql %}\n {% do run_query(create_index_sql) %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.get_create_index_sql", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.587023, "supported_languages": null}, "macro.dbt.make_intermediate_relation": {"unique_id": "macro.dbt.make_intermediate_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "make_intermediate_relation", "macro_sql": "{% macro make_intermediate_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_intermediate_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_intermediate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5934248, "supported_languages": null}, "macro.dbt.default__make_intermediate_relation": {"unique_id": "macro.dbt.default__make_intermediate_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__make_intermediate_relation", "macro_sql": "{% macro default__make_intermediate_relation(base_relation, suffix) %}\n {{ return(default__make_temp_relation(base_relation, suffix)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5938601, "supported_languages": null}, "macro.dbt.make_temp_relation": {"unique_id": "macro.dbt.make_temp_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "make_temp_relation", "macro_sql": "{% macro make_temp_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_temp_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5944152, "supported_languages": null}, "macro.dbt.default__make_temp_relation": {"unique_id": "macro.dbt.default__make_temp_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__make_temp_relation", "macro_sql": "{% macro default__make_temp_relation(base_relation, suffix) %}\n {%- set temp_identifier = base_relation.identifier ~ suffix -%}\n {%- set temp_relation = base_relation.incorporate(\n path={\"identifier\": temp_identifier}) -%}\n\n {{ return(temp_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.595192, "supported_languages": null}, "macro.dbt.make_backup_relation": {"unique_id": "macro.dbt.make_backup_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "make_backup_relation", "macro_sql": "{% macro make_backup_relation(base_relation, backup_relation_type, suffix='__dbt_backup') %}\n {{ return(adapter.dispatch('make_backup_relation', 'dbt')(base_relation, backup_relation_type, suffix)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_backup_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.595815, "supported_languages": null}, "macro.dbt.default__make_backup_relation": {"unique_id": "macro.dbt.default__make_backup_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__make_backup_relation", "macro_sql": "{% macro default__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {%- set backup_identifier = base_relation.identifier ~ suffix -%}\n {%- set backup_relation = base_relation.incorporate(\n path={\"identifier\": backup_identifier},\n type=backup_relation_type\n ) -%}\n {{ return(backup_relation) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5966232, "supported_languages": null}, "macro.dbt.drop_relation": {"unique_id": "macro.dbt.drop_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "drop_relation", "macro_sql": "{% macro drop_relation(relation) -%}\n {{ return(adapter.dispatch('drop_relation', 'dbt')(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__drop_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.597093, "supported_languages": null}, "macro.dbt.default__drop_relation": {"unique_id": "macro.dbt.default__drop_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__drop_relation", "macro_sql": "{% macro default__drop_relation(relation) -%}\n {% call statement('drop_relation', auto_begin=False) -%}\n drop {{ relation.type }} if exists {{ relation }} cascade\n {%- endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.597645, "supported_languages": null}, "macro.dbt.truncate_relation": {"unique_id": "macro.dbt.truncate_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "truncate_relation", "macro_sql": "{% macro truncate_relation(relation) -%}\n {{ return(adapter.dispatch('truncate_relation', 'dbt')(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__truncate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.598109, "supported_languages": null}, "macro.dbt.default__truncate_relation": {"unique_id": "macro.dbt.default__truncate_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__truncate_relation", "macro_sql": "{% macro default__truncate_relation(relation) -%}\n {% call statement('truncate_relation') -%}\n truncate table {{ relation }}\n {%- endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.598536, "supported_languages": null}, "macro.dbt.rename_relation": {"unique_id": "macro.dbt.rename_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "rename_relation", "macro_sql": "{% macro rename_relation(from_relation, to_relation) -%}\n {{ return(adapter.dispatch('rename_relation', 'dbt')(from_relation, to_relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__rename_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.5990558, "supported_languages": null}, "macro.dbt.default__rename_relation": {"unique_id": "macro.dbt.default__rename_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__rename_relation", "macro_sql": "{% macro default__rename_relation(from_relation, to_relation) -%}\n {% set target_name = adapter.quote_as_configured(to_relation.identifier, 'identifier') %}\n {% call statement('rename_relation') -%}\n alter table {{ from_relation }} rename to {{ target_name }}\n {%- endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.60004, "supported_languages": null}, "macro.dbt.get_or_create_relation": {"unique_id": "macro.dbt.get_or_create_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "get_or_create_relation", "macro_sql": "{% macro get_or_create_relation(database, schema, identifier, type) -%}\n {{ return(adapter.dispatch('get_or_create_relation', 'dbt')(database, schema, identifier, type)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_or_create_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.600681, "supported_languages": null}, "macro.dbt.default__get_or_create_relation": {"unique_id": "macro.dbt.default__get_or_create_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "default__get_or_create_relation", "macro_sql": "{% macro default__get_or_create_relation(database, schema, identifier, type) %}\n {%- set target_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n\n {% if target_relation %}\n {% do return([true, target_relation]) %}\n {% endif %}\n\n {%- set new_relation = api.Relation.create(\n database=database,\n schema=schema,\n identifier=identifier,\n type=type\n ) -%}\n {% do return([false, new_relation]) %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.602067, "supported_languages": null}, "macro.dbt.load_cached_relation": {"unique_id": "macro.dbt.load_cached_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "load_cached_relation", "macro_sql": "{% macro load_cached_relation(relation) %}\n {% do return(adapter.get_relation(\n database=relation.database,\n schema=relation.schema,\n identifier=relation.identifier\n )) -%}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.602644, "supported_languages": null}, "macro.dbt.load_relation": {"unique_id": "macro.dbt.load_relation", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "load_relation", "macro_sql": "{% macro load_relation(relation) %}\n {{ return(load_cached_relation(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.load_cached_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.603012, "supported_languages": null}, "macro.dbt.drop_relation_if_exists": {"unique_id": "macro.dbt.drop_relation_if_exists", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "name": "drop_relation_if_exists", "macro_sql": "{% macro drop_relation_if_exists(relation) %}\n {% if relation is not none %}\n {{ adapter.drop_relation(relation) }}\n {% endif %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.603508, "supported_languages": null}, "macro.dbt.collect_freshness": {"unique_id": "macro.dbt.collect_freshness", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "name": "collect_freshness", "macro_sql": "{% macro collect_freshness(source, loaded_at_field, filter) %}\n {{ return(adapter.dispatch('collect_freshness', 'dbt')(source, loaded_at_field, filter))}}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__collect_freshness"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6050408, "supported_languages": null}, "macro.dbt.default__collect_freshness": {"unique_id": "macro.dbt.default__collect_freshness", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "name": "default__collect_freshness", "macro_sql": "{% macro default__collect_freshness(source, loaded_at_field, filter) %}\n {% call statement('collect_freshness', fetch_result=True, auto_begin=False) -%}\n select\n max({{ loaded_at_field }}) as max_loaded_at,\n {{ current_timestamp() }} as snapshotted_at\n from {{ source }}\n {% if filter %}\n where {{ filter }}\n {% endif %}\n {% endcall %}\n {{ return(load_result('collect_freshness').table) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.606111, "supported_languages": null}, "macro.dbt.copy_grants": {"unique_id": "macro.dbt.copy_grants", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "copy_grants", "macro_sql": "{% macro copy_grants() %}\n {{ return(adapter.dispatch('copy_grants', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.609764, "supported_languages": null}, "macro.dbt.default__copy_grants": {"unique_id": "macro.dbt.default__copy_grants", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__copy_grants", "macro_sql": "{% macro default__copy_grants() %}\n {{ return(True) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.610076, "supported_languages": null}, "macro.dbt.support_multiple_grantees_per_dcl_statement": {"unique_id": "macro.dbt.support_multiple_grantees_per_dcl_statement", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "support_multiple_grantees_per_dcl_statement", "macro_sql": "{% macro support_multiple_grantees_per_dcl_statement() %}\n {{ return(adapter.dispatch('support_multiple_grantees_per_dcl_statement', 'dbt')()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.610507, "supported_languages": null}, "macro.dbt.default__support_multiple_grantees_per_dcl_statement": {"unique_id": "macro.dbt.default__support_multiple_grantees_per_dcl_statement", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__support_multiple_grantees_per_dcl_statement", "macro_sql": "\n\n{%- macro default__support_multiple_grantees_per_dcl_statement() -%}\n {{ return(True) }}\n{%- endmacro -%}\n\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6107981, "supported_languages": null}, "macro.dbt.should_revoke": {"unique_id": "macro.dbt.should_revoke", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "should_revoke", "macro_sql": "{% macro should_revoke(existing_relation, full_refresh_mode=True) %}\n\n {% if not existing_relation %}\n {#-- The table doesn't already exist, so no grants to copy over --#}\n {{ return(False) }}\n {% elif full_refresh_mode %}\n {#-- The object is being REPLACED -- whether grants are copied over depends on the value of user config --#}\n {{ return(copy_grants()) }}\n {% else %}\n {#-- The table is being merged/upserted/inserted -- grants will be carried over --#}\n {{ return(True) }}\n {% endif %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.611671, "supported_languages": null}, "macro.dbt.get_show_grant_sql": {"unique_id": "macro.dbt.get_show_grant_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "get_show_grant_sql", "macro_sql": "{% macro get_show_grant_sql(relation) %}\n {{ return(adapter.dispatch(\"get_show_grant_sql\", \"dbt\")(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_show_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.612134, "supported_languages": null}, "macro.dbt.default__get_show_grant_sql": {"unique_id": "macro.dbt.default__get_show_grant_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__get_show_grant_sql", "macro_sql": "{% macro default__get_show_grant_sql(relation) %}\n show grants on {{ relation }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.612397, "supported_languages": null}, "macro.dbt.get_grant_sql": {"unique_id": "macro.dbt.get_grant_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "get_grant_sql", "macro_sql": "{% macro get_grant_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_grant_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6130052, "supported_languages": null}, "macro.dbt.default__get_grant_sql": {"unique_id": "macro.dbt.default__get_grant_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__get_grant_sql", "macro_sql": "\n\n{%- macro default__get_grant_sql(relation, privilege, grantees) -%}\n grant {{ privilege }} on {{ relation }} to {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.61358, "supported_languages": null}, "macro.dbt.get_revoke_sql": {"unique_id": "macro.dbt.get_revoke_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "get_revoke_sql", "macro_sql": "{% macro get_revoke_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_revoke_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_revoke_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6141498, "supported_languages": null}, "macro.dbt.default__get_revoke_sql": {"unique_id": "macro.dbt.default__get_revoke_sql", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__get_revoke_sql", "macro_sql": "\n\n{%- macro default__get_revoke_sql(relation, privilege, grantees) -%}\n revoke {{ privilege }} on {{ relation }} from {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.61463, "supported_languages": null}, "macro.dbt.get_dcl_statement_list": {"unique_id": "macro.dbt.get_dcl_statement_list", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "get_dcl_statement_list", "macro_sql": "{% macro get_dcl_statement_list(relation, grant_config, get_dcl_macro) %}\n {{ return(adapter.dispatch('get_dcl_statement_list', 'dbt')(relation, grant_config, get_dcl_macro)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_dcl_statement_list"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6152372, "supported_languages": null}, "macro.dbt.default__get_dcl_statement_list": {"unique_id": "macro.dbt.default__get_dcl_statement_list", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__get_dcl_statement_list", "macro_sql": "\n\n{%- macro default__get_dcl_statement_list(relation, grant_config, get_dcl_macro) -%}\n {#\n -- Unpack grant_config into specific privileges and the set of users who need them granted/revoked.\n -- Depending on whether this database supports multiple grantees per statement, pass in the list of\n -- all grantees per privilege, or (if not) template one statement per privilege-grantee pair.\n -- `get_dcl_macro` will be either `get_grant_sql` or `get_revoke_sql`\n #}\n {%- set dcl_statements = [] -%}\n {%- for privilege, grantees in grant_config.items() %}\n {%- if support_multiple_grantees_per_dcl_statement() and grantees -%}\n {%- set dcl = get_dcl_macro(relation, privilege, grantees) -%}\n {%- do dcl_statements.append(dcl) -%}\n {%- else -%}\n {%- for grantee in grantees -%}\n {% set dcl = get_dcl_macro(relation, privilege, [grantee]) %}\n {%- do dcl_statements.append(dcl) -%}\n {% endfor -%}\n {%- endif -%}\n {%- endfor -%}\n {{ return(dcl_statements) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.617053, "supported_languages": null}, "macro.dbt.call_dcl_statements": {"unique_id": "macro.dbt.call_dcl_statements", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "call_dcl_statements", "macro_sql": "{% macro call_dcl_statements(dcl_statement_list) %}\n {{ return(adapter.dispatch(\"call_dcl_statements\", \"dbt\")(dcl_statement_list)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6175182, "supported_languages": null}, "macro.dbt.default__call_dcl_statements": {"unique_id": "macro.dbt.default__call_dcl_statements", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__call_dcl_statements", "macro_sql": "{% macro default__call_dcl_statements(dcl_statement_list) %}\n {#\n -- By default, supply all grant + revoke statements in a single semicolon-separated block,\n -- so that they're all processed together.\n\n -- Some databases do not support this. Those adapters will need to override this macro\n -- to run each statement individually.\n #}\n {% call statement('grants') %}\n {% for dcl_statement in dcl_statement_list %}\n {{ dcl_statement }};\n {% endfor %}\n {% endcall %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.61847, "supported_languages": null}, "macro.dbt.apply_grants": {"unique_id": "macro.dbt.apply_grants", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "apply_grants", "macro_sql": "{% macro apply_grants(relation, grant_config, should_revoke) %}\n {{ return(adapter.dispatch(\"apply_grants\", \"dbt\")(relation, grant_config, should_revoke)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.619071, "supported_languages": null}, "macro.dbt.default__apply_grants": {"unique_id": "macro.dbt.default__apply_grants", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "name": "default__apply_grants", "macro_sql": "{% macro default__apply_grants(relation, grant_config, should_revoke=True) %}\n {#-- If grant_config is {} or None, this is a no-op --#}\n {% if grant_config %}\n {% if should_revoke %}\n {#-- We think previous grants may have carried over --#}\n {#-- Show current grants and calculate diffs --#}\n {% set current_grants_table = run_query(get_show_grant_sql(relation)) %}\n {% set current_grants_dict = adapter.standardize_grants_dict(current_grants_table) %}\n {% set needs_granting = diff_of_two_dicts(grant_config, current_grants_dict) %}\n {% set needs_revoking = diff_of_two_dicts(current_grants_dict, grant_config) %}\n {% if not (needs_granting or needs_revoking) %}\n {{ log('On ' ~ relation ~': All grants are in place, no revocation or granting needed.')}}\n {% endif %}\n {% else %}\n {#-- We don't think there's any chance of previous grants having carried over. --#}\n {#-- Jump straight to granting what the user has configured. --#}\n {% set needs_revoking = {} %}\n {% set needs_granting = grant_config %}\n {% endif %}\n {% if needs_granting or needs_revoking %}\n {% set revoke_statement_list = get_dcl_statement_list(relation, needs_revoking, get_revoke_sql) %}\n {% set grant_statement_list = get_dcl_statement_list(relation, needs_granting, get_grant_sql) %}\n {% set dcl_statement_list = revoke_statement_list + grant_statement_list %}\n {% if dcl_statement_list %}\n {{ call_dcl_statements(dcl_statement_list) }}\n {% endif %}\n {% endif %}\n {% endif %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.get_show_grant_sql", "macro.dbt.get_dcl_statement_list", "macro.dbt.call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6220548, "supported_languages": null}, "macro.dbt.alter_column_comment": {"unique_id": "macro.dbt.alter_column_comment", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "alter_column_comment", "macro_sql": "{% macro alter_column_comment(relation, column_dict) -%}\n {{ return(adapter.dispatch('alter_column_comment', 'dbt')(relation, column_dict)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.623905, "supported_languages": null}, "macro.dbt.default__alter_column_comment": {"unique_id": "macro.dbt.default__alter_column_comment", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "default__alter_column_comment", "macro_sql": "{% macro default__alter_column_comment(relation, column_dict) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_column_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.62433, "supported_languages": null}, "macro.dbt.alter_relation_comment": {"unique_id": "macro.dbt.alter_relation_comment", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "alter_relation_comment", "macro_sql": "{% macro alter_relation_comment(relation, relation_comment) -%}\n {{ return(adapter.dispatch('alter_relation_comment', 'dbt')(relation, relation_comment)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_relation_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6248372, "supported_languages": null}, "macro.dbt.default__alter_relation_comment": {"unique_id": "macro.dbt.default__alter_relation_comment", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "default__alter_relation_comment", "macro_sql": "{% macro default__alter_relation_comment(relation, relation_comment) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_relation_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.62526, "supported_languages": null}, "macro.dbt.persist_docs": {"unique_id": "macro.dbt.persist_docs", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "persist_docs", "macro_sql": "{% macro persist_docs(relation, model, for_relation=true, for_columns=true) -%}\n {{ return(adapter.dispatch('persist_docs', 'dbt')(relation, model, for_relation, for_columns)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.625922, "supported_languages": null}, "macro.dbt.default__persist_docs": {"unique_id": "macro.dbt.default__persist_docs", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "name": "default__persist_docs", "macro_sql": "{% macro default__persist_docs(relation, model, for_relation, for_columns) -%}\n {% if for_relation and config.persist_relation_docs() and model.description %}\n {% do run_query(alter_relation_comment(relation, model.description)) %}\n {% endif %}\n\n {% if for_columns and config.persist_column_docs() and model.columns %}\n {% do run_query(alter_column_comment(relation, model.columns)) %}\n {% endif %}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.alter_relation_comment", "macro.dbt.alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.627101, "supported_languages": null}, "macro.dbt.get_catalog": {"unique_id": "macro.dbt.get_catalog", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "get_catalog", "macro_sql": "{% macro get_catalog(information_schema, schemas) -%}\n {{ return(adapter.dispatch('get_catalog', 'dbt')(information_schema, schemas)) }}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_catalog"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.630411, "supported_languages": null}, "macro.dbt.default__get_catalog": {"unique_id": "macro.dbt.default__get_catalog", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__get_catalog", "macro_sql": "{% macro default__get_catalog(information_schema, schemas) -%}\n\n {% set typename = adapter.type() %}\n {% set msg -%}\n get_catalog not implemented for {{ typename }}\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.631063, "supported_languages": null}, "macro.dbt.information_schema_name": {"unique_id": "macro.dbt.information_schema_name", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "information_schema_name", "macro_sql": "{% macro information_schema_name(database) %}\n {{ return(adapter.dispatch('information_schema_name', 'dbt')(database)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__information_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.631524, "supported_languages": null}, "macro.dbt.default__information_schema_name": {"unique_id": "macro.dbt.default__information_schema_name", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__information_schema_name", "macro_sql": "{% macro default__information_schema_name(database) -%}\n {%- if database -%}\n {{ database }}.INFORMATION_SCHEMA\n {%- else -%}\n INFORMATION_SCHEMA\n {%- endif -%}\n{%- endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6319208, "supported_languages": null}, "macro.dbt.list_schemas": {"unique_id": "macro.dbt.list_schemas", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "list_schemas", "macro_sql": "{% macro list_schemas(database) -%}\n {{ return(adapter.dispatch('list_schemas', 'dbt')(database)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.632376, "supported_languages": null}, "macro.dbt.default__list_schemas": {"unique_id": "macro.dbt.default__list_schemas", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__list_schemas", "macro_sql": "{% macro default__list_schemas(database) -%}\n {% set sql %}\n select distinct schema_name\n from {{ information_schema_name(database) }}.SCHEMATA\n where catalog_name ilike '{{ database }}'\n {% endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.information_schema_name", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.633168, "supported_languages": null}, "macro.dbt.check_schema_exists": {"unique_id": "macro.dbt.check_schema_exists", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "check_schema_exists", "macro_sql": "{% macro check_schema_exists(information_schema, schema) -%}\n {{ return(adapter.dispatch('check_schema_exists', 'dbt')(information_schema, schema)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__check_schema_exists"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.633694, "supported_languages": null}, "macro.dbt.default__check_schema_exists": {"unique_id": "macro.dbt.default__check_schema_exists", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__check_schema_exists", "macro_sql": "{% macro default__check_schema_exists(information_schema, schema) -%}\n {% set sql -%}\n select count(*)\n from {{ information_schema.replace(information_schema_view='SCHEMATA') }}\n where catalog_name='{{ information_schema.database }}'\n and schema_name='{{ schema }}'\n {%- endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.replace", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.63451, "supported_languages": null}, "macro.dbt.list_relations_without_caching": {"unique_id": "macro.dbt.list_relations_without_caching", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "list_relations_without_caching", "macro_sql": "{% macro list_relations_without_caching(schema_relation) %}\n {{ return(adapter.dispatch('list_relations_without_caching', 'dbt')(schema_relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_relations_without_caching"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.634974, "supported_languages": null}, "macro.dbt.default__list_relations_without_caching": {"unique_id": "macro.dbt.default__list_relations_without_caching", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "name": "default__list_relations_without_caching", "macro_sql": "{% macro default__list_relations_without_caching(schema_relation) %}\n {{ exceptions.raise_not_implemented(\n 'list_relations_without_caching macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.635378, "supported_languages": null}, "macro.dbt.get_columns_in_relation": {"unique_id": "macro.dbt.get_columns_in_relation", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "get_columns_in_relation", "macro_sql": "{% macro get_columns_in_relation(relation) -%}\n {{ return(adapter.dispatch('get_columns_in_relation', 'dbt')(relation)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.63885, "supported_languages": null}, "macro.dbt.default__get_columns_in_relation": {"unique_id": "macro.dbt.default__get_columns_in_relation", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "default__get_columns_in_relation", "macro_sql": "{% macro default__get_columns_in_relation(relation) -%}\n {{ exceptions.raise_not_implemented(\n 'get_columns_in_relation macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.639395, "supported_languages": null}, "macro.dbt.sql_convert_columns_in_relation": {"unique_id": "macro.dbt.sql_convert_columns_in_relation", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "sql_convert_columns_in_relation", "macro_sql": "{% macro sql_convert_columns_in_relation(table) -%}\n {% set columns = [] %}\n {% for row in table %}\n {% do columns.append(api.Column(*row)) %}\n {% endfor %}\n {{ return(columns) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6401641, "supported_languages": null}, "macro.dbt.get_columns_in_query": {"unique_id": "macro.dbt.get_columns_in_query", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "get_columns_in_query", "macro_sql": "{% macro get_columns_in_query(select_sql) -%}\n {{ return(adapter.dispatch('get_columns_in_query', 'dbt')(select_sql)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.640618, "supported_languages": null}, "macro.dbt.default__get_columns_in_query": {"unique_id": "macro.dbt.default__get_columns_in_query", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "default__get_columns_in_query", "macro_sql": "{% macro default__get_columns_in_query(select_sql) %}\n {% call statement('get_columns_in_query', fetch_result=True, auto_begin=False) -%}\n select * from (\n {{ select_sql }}\n ) as __dbt_sbq\n where false\n limit 0\n {% endcall %}\n\n {{ return(load_result('get_columns_in_query').table.columns | map(attribute='name') | list) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.64155, "supported_languages": null}, "macro.dbt.alter_column_type": {"unique_id": "macro.dbt.alter_column_type", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "alter_column_type", "macro_sql": "{% macro alter_column_type(relation, column_name, new_column_type) -%}\n {{ return(adapter.dispatch('alter_column_type', 'dbt')(relation, column_name, new_column_type)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.642115, "supported_languages": null}, "macro.dbt.default__alter_column_type": {"unique_id": "macro.dbt.default__alter_column_type", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "default__alter_column_type", "macro_sql": "{% macro default__alter_column_type(relation, column_name, new_column_type) -%}\n {#\n 1. Create a new column (w/ temp name and correct type)\n 2. Copy data over to it\n 3. Drop the existing column (cascade!)\n 4. Rename the new column to existing column\n #}\n {%- set tmp_column = column_name + \"__dbt_alter\" -%}\n\n {% call statement('alter_column_type') %}\n alter table {{ relation }} add column {{ adapter.quote(tmp_column) }} {{ new_column_type }};\n update {{ relation }} set {{ adapter.quote(tmp_column) }} = {{ adapter.quote(column_name) }};\n alter table {{ relation }} drop column {{ adapter.quote(column_name) }} cascade;\n alter table {{ relation }} rename column {{ adapter.quote(tmp_column) }} to {{ adapter.quote(column_name) }}\n {% endcall %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.643635, "supported_languages": null}, "macro.dbt.alter_relation_add_remove_columns": {"unique_id": "macro.dbt.alter_relation_add_remove_columns", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "alter_relation_add_remove_columns", "macro_sql": "{% macro alter_relation_add_remove_columns(relation, add_columns = none, remove_columns = none) -%}\n {{ return(adapter.dispatch('alter_relation_add_remove_columns', 'dbt')(relation, add_columns, remove_columns)) }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__alter_relation_add_remove_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.644268, "supported_languages": null}, "macro.dbt.default__alter_relation_add_remove_columns": {"unique_id": "macro.dbt.default__alter_relation_add_remove_columns", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "name": "default__alter_relation_add_remove_columns", "macro_sql": "{% macro default__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}\n\n {% if add_columns is none %}\n {% set add_columns = [] %}\n {% endif %}\n {% if remove_columns is none %}\n {% set remove_columns = [] %}\n {% endif %}\n\n {% set sql -%}\n\n alter {{ relation.type }} {{ relation }}\n\n {% for column in add_columns %}\n add column {{ column.name }} {{ column.data_type }}{{ ',' if not loop.last }}\n {% endfor %}{{ ',' if add_columns and remove_columns }}\n\n {% for column in remove_columns %}\n drop column {{ column.name }}{{ ',' if not loop.last }}\n {% endfor %}\n\n {%- endset -%}\n\n {% do run_query(sql) %}\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.646218, "supported_languages": null}, "macro.dbt.build_ref_function": {"unique_id": "macro.dbt.build_ref_function", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "build_ref_function", "macro_sql": "{% macro build_ref_function(model) %}\n\n {%- set ref_dict = {} -%}\n {%- for _ref in model.refs -%}\n {%- set resolved = ref(*_ref) -%}\n {%- do ref_dict.update({_ref | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef ref(*args,dbt_load_df_function):\n refs = {{ ref_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(refs[key])\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.649544, "supported_languages": null}, "macro.dbt.build_source_function": {"unique_id": "macro.dbt.build_source_function", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "build_source_function", "macro_sql": "{% macro build_source_function(model) %}\n\n {%- set source_dict = {} -%}\n {%- for _source in model.sources -%}\n {%- set resolved = source(*_source) -%}\n {%- do source_dict.update({_source | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef source(*args, dbt_load_df_function):\n sources = {{ source_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(sources[key])\n\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6507342, "supported_languages": null}, "macro.dbt.build_config_dict": {"unique_id": "macro.dbt.build_config_dict", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "build_config_dict", "macro_sql": "{% macro build_config_dict(model) %}\n {%- set config_dict = {} -%}\n {%- for key in model.config.config_keys_used -%}\n {# weird type testing with enum, would be much easier to write this logic in Python! #}\n {%- if key == 'language' -%}\n {%- set value = 'python' -%}\n {%- endif -%}\n {%- set value = model.config[key] -%}\n {%- do config_dict.update({key: value}) -%}\n {%- endfor -%}\nconfig_dict = {{ config_dict }}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.652128, "supported_languages": null}, "macro.dbt.py_script_postfix": {"unique_id": "macro.dbt.py_script_postfix", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "py_script_postfix", "macro_sql": "{% macro py_script_postfix(model) %}\n# This part is user provided model code\n# you will need to copy the next section to run the code\n# COMMAND ----------\n# this part is dbt logic for get ref work, do not modify\n\n{{ build_ref_function(model ) }}\n{{ build_source_function(model ) }}\n{{ build_config_dict(model) }}\n\nclass config:\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def get(key, default=None):\n return config_dict.get(key, default)\n\nclass this:\n \"\"\"dbt.this() or dbt.this.identifier\"\"\"\n database = '{{ this.database }}'\n schema = '{{ this.schema }}'\n identifier = '{{ this.identifier }}'\n def __repr__(self):\n return '{{ this }}'\n\n\nclass dbtObj:\n def __init__(self, load_df_function) -> None:\n self.source = lambda *args: source(*args, dbt_load_df_function=load_df_function)\n self.ref = lambda *args: ref(*args, dbt_load_df_function=load_df_function)\n self.config = config\n self.this = this()\n self.is_incremental = {{ is_incremental() }}\n\n# COMMAND ----------\n{{py_script_comment()}}\n{% endmacro %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.build_ref_function", "macro.dbt.build_source_function", "macro.dbt.build_config_dict", "macro.dbt.is_incremental", "macro.dbt.py_script_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.653216, "supported_languages": null}, "macro.dbt.py_script_comment": {"unique_id": "macro.dbt.py_script_comment", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "name": "py_script_comment", "macro_sql": "{%macro py_script_comment()%}\n{%endmacro%}", "resource_type": "macro", "tags": [], "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.653416, "supported_languages": null}, "macro.dbt.test_unique": {"unique_id": "macro.dbt.test_unique", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "name": "test_unique", "macro_sql": "{% test unique(model, column_name) %}\n {% set macro = adapter.dispatch('test_unique', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__test_unique"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.655508, "supported_languages": null}, "macro.dbt.test_not_null": {"unique_id": "macro.dbt.test_not_null", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "name": "test_not_null", "macro_sql": "{% test not_null(model, column_name) %}\n {% set macro = adapter.dispatch('test_not_null', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__test_not_null"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.656121, "supported_languages": null}, "macro.dbt.test_accepted_values": {"unique_id": "macro.dbt.test_accepted_values", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "name": "test_accepted_values", "macro_sql": "{% test accepted_values(model, column_name, values, quote=True) %}\n {% set macro = adapter.dispatch('test_accepted_values', 'dbt') %}\n {{ macro(model, column_name, values, quote) }}\n{% endtest %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__test_accepted_values"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.6568232, "supported_languages": null}, "macro.dbt.test_relationships": {"unique_id": "macro.dbt.test_relationships", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "name": "test_relationships", "macro_sql": "{% test relationships(model, column_name, to, field) %}\n {% set macro = adapter.dispatch('test_relationships', 'dbt') %}\n {{ macro(model, column_name, to, field) }}\n{% endtest %}", "resource_type": "macro", "tags": [], "depends_on": {"macros": ["macro.dbt.default__test_relationships"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1667573258.657504, "supported_languages": null}}, "docs": {"dbt.__overview__": {"unique_id": "dbt.__overview__", "package_name": "dbt", "path": "overview.md", "original_file_path": "docs/overview.md", "name": "__overview__", "block_contents": "### Welcome!\n\nWelcome to the auto-generated documentation for your dbt project!\n\n### Navigation\n\nYou can use the `Project` and `Database` navigation tabs on the left side of the window to explore the models\nin your project.\n\n#### Project Tab\nThe `Project` tab mirrors the directory structure of your dbt project. In this tab, you can see all of the\nmodels defined in your dbt project, as well as models imported from dbt packages.\n\n#### Database Tab\nThe `Database` tab also exposes your models, but in a format that looks more like a database explorer. This view\nshows relations (tables and views) grouped into database schemas. Note that ephemeral models are _not_ shown\nin this interface, as they do not exist in the database.\n\n### Graph Exploration\nYou can click the blue icon on the bottom-right corner of the page to view the lineage graph of your models.\n\nOn model pages, you'll see the immediate parents and children of the model you're exploring. By clicking the `Expand`\nbutton at the top-right of this lineage pane, you'll be able to see all of the models that are used to build,\nor are built from, the model you're exploring.\n\nOnce expanded, you'll be able to use the `--select` and `--exclude` model selection syntax to filter the\nmodels in the graph. For more information on model selection, check out the [dbt docs](https://docs.getdbt.com/docs/model-selection-syntax).\n\nNote that you can also right-click on models to interactively filter and explore the graph.\n\n---\n\n### More information\n\n- [What is dbt](https://docs.getdbt.com/docs/introduction)?\n- Read the [dbt viewpoint](https://docs.getdbt.com/docs/viewpoint)\n- [Installation](https://docs.getdbt.com/docs/installation)\n- Join the [dbt Community](https://www.getdbt.com/community/) for questions and discussion"}}, "exposures": {}, "metrics": {"metric.test.my_metric": {"fqn": ["test", "my_metric"], "unique_id": "metric.test.my_metric", "package_name": "test", "path": "metric.yml", "original_file_path": "models/metric.yml", "name": "my_metric", "description": "", "label": "Count records", "calculation_method": "count", "timestamp": "updated_at", "expression": "*", "filters": [], "time_grains": ["day"], "dimensions": [], "window": null, "model": "ref('my_model')", "model_unique_id": null, "resource_type": "metric", "meta": {}, "tags": [], "config": {"enabled": true}, "unrendered_config": {}, "sources": [], "depends_on": {"macros": [], "nodes": ["model.test.my_model"]}, "refs": [["my_model"]], "metrics": [], "created_at": 1667573259.027725}}, "selectors": {}, "disabled": {}, "parent_map": {"model.test.my_model": [], "metric.test.my_metric": ["model.test.my_model"]}, "child_map": {"model.test.my_model": ["metric.test.my_metric"], "metric.test.my_metric": []}} +{"metadata": {"dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v8.json", "dbt_version": "1.4.0a1", "generated_at": "2022-12-12T13:54:37.804887Z", "invocation_id": "843eaaec-db3b-4406-87ec-a3651f124d69", "env": {}, "project_id": "098f6bcd4621d373cade4e832627b4f6", "user_id": null, "send_anonymous_usage_stats": false, "adapter_type": "postgres"}, "nodes": {"model.test.my_model": {"database": "dbt", "schema": "test16708532772964762671_test_previous_version_state", "name": "my_model", "resource_type": "model", "package_name": "test", "path": "my_model.sql", "original_file_path": "models/my_model.sql", "unique_id": "model.test.my_model", "fqn": ["test", "my_model"], "alias": "my_model", "checksum": {"name": "sha256", "checksum": "2b9123e04ab8bb798f7c565afdc3ee0e56fcd66b4bfbdb435b4891c878d947c5"}, "config": {"enabled": true, "alias": null, "schema": null, "database": null, "tags": [], "meta": {}, "materialized": "view", "incremental_strategy": null, "persist_docs": {}, "quoting": {}, "column_types": {}, "full_refresh": null, "unique_key": null, "on_schema_change": "ignore", "grants": {}, "packages": [], "docs": {"show": true, "node_color": null}, "post-hook": [], "pre-hook": []}, "tags": [], "description": "", "columns": {}, "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "build_path": null, "deferred": false, "unrendered_config": {}, "created_at": 1670853278.478401, "relation_name": "\"dbt\".\"test16708532772964762671_test_previous_version_state\".\"my_model\"", "raw_code": "select 1 as id", "language": "sql", "refs": [], "sources": [], "metrics": [], "depends_on": {"macros": [], "nodes": []}, "compiled_path": null}}, "sources": {}, "macros": {"macro.dbt_postgres.postgres__current_timestamp": {"name": "postgres__current_timestamp", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__current_timestamp", "macro_sql": "{% macro postgres__current_timestamp() -%}\n now()\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.828495, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_string_as_time": {"name": "postgres__snapshot_string_as_time", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__snapshot_string_as_time", "macro_sql": "{% macro postgres__snapshot_string_as_time(timestamp) -%}\n {%- set result = \"'\" ~ timestamp ~ \"'::timestamp without time zone\" -%}\n {{ return(result) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829041, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_get_time": {"name": "postgres__snapshot_get_time", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__snapshot_get_time", "macro_sql": "{% macro postgres__snapshot_get_time() -%}\n {{ current_timestamp() }}::timestamp without time zone\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829317, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_backcompat": {"name": "postgres__current_timestamp_backcompat", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__current_timestamp_backcompat", "macro_sql": "{% macro postgres__current_timestamp_backcompat() %}\n current_timestamp::{{ type_timestamp() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829592, "supported_languages": null}, "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat": {"name": "postgres__current_timestamp_in_utc_backcompat", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/timestamps.sql", "original_file_path": "macros/timestamps.sql", "unique_id": "macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro postgres__current_timestamp_in_utc_backcompat() %}\n (current_timestamp at time zone 'utc')::{{ type_timestamp() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.829864, "supported_languages": null}, "macro.dbt_postgres.postgres__get_catalog": {"name": "postgres__get_catalog", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/catalog.sql", "original_file_path": "macros/catalog.sql", "unique_id": "macro.dbt_postgres.postgres__get_catalog", "macro_sql": "{% macro postgres__get_catalog(information_schema, schemas) -%}\n\n {%- call statement('catalog', fetch_result=True) -%}\n {#\n If the user has multiple databases set and the first one is wrong, this will fail.\n But we won't fail in the case where there are multiple quoting-difference-only dbs, which is better.\n #}\n {% set database = information_schema.database %}\n {{ adapter.verify_database(database) }}\n\n select\n '{{ database }}' as table_database,\n sch.nspname as table_schema,\n tbl.relname as table_name,\n case tbl.relkind\n when 'v' then 'VIEW'\n else 'BASE TABLE'\n end as table_type,\n tbl_desc.description as table_comment,\n col.attname as column_name,\n col.attnum as column_index,\n pg_catalog.format_type(col.atttypid, col.atttypmod) as column_type,\n col_desc.description as column_comment,\n pg_get_userbyid(tbl.relowner) as table_owner\n\n from pg_catalog.pg_namespace sch\n join pg_catalog.pg_class tbl on tbl.relnamespace = sch.oid\n join pg_catalog.pg_attribute col on col.attrelid = tbl.oid\n left outer join pg_catalog.pg_description tbl_desc on (tbl_desc.objoid = tbl.oid and tbl_desc.objsubid = 0)\n left outer join pg_catalog.pg_description col_desc on (col_desc.objoid = tbl.oid and col_desc.objsubid = col.attnum)\n\n where (\n {%- for schema in schemas -%}\n upper(sch.nspname) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n )\n and not pg_is_other_temp_schema(sch.oid) -- not a temporary schema belonging to another session\n and tbl.relpersistence in ('p', 'u') -- [p]ermanent table or [u]nlogged table. Exclude [t]emporary tables\n and tbl.relkind in ('r', 'v', 'f', 'p') -- o[r]dinary table, [v]iew, [f]oreign table, [p]artitioned table. Other values are [i]ndex, [S]equence, [c]omposite type, [t]OAST table, [m]aterialized view\n and col.attnum > 0 -- negative numbers are used for system columns such as oid\n and not col.attisdropped -- column as not been dropped\n\n order by\n sch.nspname,\n tbl.relname,\n col.attnum\n\n {%- endcall -%}\n\n {{ return(load_result('catalog').table) }}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.832119, "supported_languages": null}, "macro.dbt_postgres.postgres_get_relations": {"name": "postgres_get_relations", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/relations.sql", "original_file_path": "macros/relations.sql", "unique_id": "macro.dbt_postgres.postgres_get_relations", "macro_sql": "{% macro postgres_get_relations () -%}\n\n {#\n -- in pg_depend, objid is the dependent, refobjid is the referenced object\n -- > a pg_depend entry indicates that the referenced object cannot be\n -- > dropped without also dropping the dependent object.\n #}\n\n {%- call statement('relations', fetch_result=True) -%}\n with relation as (\n select\n pg_rewrite.ev_class as class,\n pg_rewrite.oid as id\n from pg_rewrite\n ),\n class as (\n select\n oid as id,\n relname as name,\n relnamespace as schema,\n relkind as kind\n from pg_class\n ),\n dependency as (\n select distinct\n pg_depend.objid as id,\n pg_depend.refobjid as ref\n from pg_depend\n ),\n schema as (\n select\n pg_namespace.oid as id,\n pg_namespace.nspname as name\n from pg_namespace\n where nspname != 'information_schema' and nspname not like 'pg\\_%'\n ),\n referenced as (\n select\n relation.id AS id,\n referenced_class.name ,\n referenced_class.schema ,\n referenced_class.kind\n from relation\n join class as referenced_class on relation.class=referenced_class.id\n where referenced_class.kind in ('r', 'v')\n ),\n relationships as (\n select\n referenced.name as referenced_name,\n referenced.schema as referenced_schema_id,\n dependent_class.name as dependent_name,\n dependent_class.schema as dependent_schema_id,\n referenced.kind as kind\n from referenced\n join dependency on referenced.id=dependency.id\n join class as dependent_class on dependency.ref=dependent_class.id\n where\n (referenced.name != dependent_class.name or\n referenced.schema != dependent_class.schema)\n )\n\n select\n referenced_schema.name as referenced_schema,\n relationships.referenced_name as referenced_name,\n dependent_schema.name as dependent_schema,\n relationships.dependent_name as dependent_name\n from relationships\n join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id\n join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id\n group by referenced_schema, referenced_name, dependent_schema, dependent_name\n order by referenced_schema, referenced_name, dependent_schema, dependent_name;\n\n {%- endcall -%}\n\n {{ return(load_result('relations').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.833379, "supported_languages": null}, "macro.dbt_postgres.postgres__create_table_as": {"name": "postgres__create_table_as", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__create_table_as", "macro_sql": "{% macro postgres__create_table_as(temporary, relation, sql) -%}\n {%- set unlogged = config.get('unlogged', default=false) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary -%}\n temporary\n {%- elif unlogged -%}\n unlogged\n {%- endif %} table {{ relation }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.844162, "supported_languages": null}, "macro.dbt_postgres.postgres__get_create_index_sql": {"name": "postgres__get_create_index_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__get_create_index_sql", "macro_sql": "{% macro postgres__get_create_index_sql(relation, index_dict) -%}\n {%- set index_config = adapter.parse_index(index_dict) -%}\n {%- set comma_separated_columns = \", \".join(index_config.columns) -%}\n {%- set index_name = index_config.render(relation) -%}\n\n create {% if index_config.unique -%}\n unique\n {%- endif %} index if not exists\n \"{{ index_name }}\"\n on {{ relation }} {% if index_config.type -%}\n using {{ index_config.type }}\n {%- endif %}\n ({{ comma_separated_columns }});\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.84543, "supported_languages": null}, "macro.dbt_postgres.postgres__create_schema": {"name": "postgres__create_schema", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__create_schema", "macro_sql": "{% macro postgres__create_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier().include(database=False) }}\n {%- endcall -%}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.846217, "supported_languages": null}, "macro.dbt_postgres.postgres__drop_schema": {"name": "postgres__drop_schema", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__drop_schema", "macro_sql": "{% macro postgres__drop_schema(relation) -%}\n {% if relation.database -%}\n {{ adapter.verify_database(relation.database) }}\n {%- endif -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier().include(database=False) }} cascade\n {%- endcall -%}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.847004, "supported_languages": null}, "macro.dbt_postgres.postgres__get_columns_in_relation": {"name": "postgres__get_columns_in_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__get_columns_in_relation", "macro_sql": "{% macro postgres__get_columns_in_relation(relation) -%}\n {% call statement('get_columns_in_relation', fetch_result=True) %}\n select\n column_name,\n data_type,\n character_maximum_length,\n numeric_precision,\n numeric_scale\n\n from {{ relation.information_schema('columns') }}\n where table_name = '{{ relation.identifier }}'\n {% if relation.schema %}\n and table_schema = '{{ relation.schema }}'\n {% endif %}\n order by ordinal_position\n\n {% endcall %}\n {% set table = load_result('get_columns_in_relation').table %}\n {{ return(sql_convert_columns_in_relation(table)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.sql_convert_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8481832, "supported_languages": null}, "macro.dbt_postgres.postgres__list_relations_without_caching": {"name": "postgres__list_relations_without_caching", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__list_relations_without_caching", "macro_sql": "{% macro postgres__list_relations_without_caching(schema_relation) %}\n {% call statement('list_relations_without_caching', fetch_result=True) -%}\n select\n '{{ schema_relation.database }}' as database,\n tablename as name,\n schemaname as schema,\n 'table' as type\n from pg_tables\n where schemaname ilike '{{ schema_relation.schema }}'\n union all\n select\n '{{ schema_relation.database }}' as database,\n viewname as name,\n schemaname as schema,\n 'view' as type\n from pg_views\n where schemaname ilike '{{ schema_relation.schema }}'\n {% endcall %}\n {{ return(load_result('list_relations_without_caching').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8492, "supported_languages": null}, "macro.dbt_postgres.postgres__information_schema_name": {"name": "postgres__information_schema_name", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__information_schema_name", "macro_sql": "{% macro postgres__information_schema_name(database) -%}\n {% if database_name -%}\n {{ adapter.verify_database(database_name) }}\n {%- endif -%}\n information_schema\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.849643, "supported_languages": null}, "macro.dbt_postgres.postgres__list_schemas": {"name": "postgres__list_schemas", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__list_schemas", "macro_sql": "{% macro postgres__list_schemas(database) %}\n {% if database -%}\n {{ adapter.verify_database(database) }}\n {%- endif -%}\n {% call statement('list_schemas', fetch_result=True, auto_begin=False) %}\n select distinct nspname from pg_namespace\n {% endcall %}\n {{ return(load_result('list_schemas').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8505101, "supported_languages": null}, "macro.dbt_postgres.postgres__check_schema_exists": {"name": "postgres__check_schema_exists", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__check_schema_exists", "macro_sql": "{% macro postgres__check_schema_exists(information_schema, schema) -%}\n {% if information_schema.database -%}\n {{ adapter.verify_database(information_schema.database) }}\n {%- endif -%}\n {% call statement('check_schema_exists', fetch_result=True, auto_begin=False) %}\n select count(*) from pg_namespace where nspname = '{{ schema }}'\n {% endcall %}\n {{ return(load_result('check_schema_exists').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.851476, "supported_languages": null}, "macro.dbt_postgres.postgres__make_relation_with_suffix": {"name": "postgres__make_relation_with_suffix", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_relation_with_suffix", "macro_sql": "{% macro postgres__make_relation_with_suffix(base_relation, suffix, dstring) %}\n {% if dstring %}\n {% set dt = modules.datetime.datetime.now() %}\n {% set dtstring = dt.strftime(\"%H%M%S%f\") %}\n {% set suffix = suffix ~ dtstring %}\n {% endif %}\n {% set suffix_length = suffix|length %}\n {% set relation_max_name_length = base_relation.relation_max_name_length() %}\n {% if suffix_length > relation_max_name_length %}\n {% do exceptions.raise_compiler_error('Relation suffix is too long (' ~ suffix_length ~ ' characters). Maximum length is ' ~ relation_max_name_length ~ ' characters.') %}\n {% endif %}\n {% set identifier = base_relation.identifier[:relation_max_name_length - suffix_length] ~ suffix %}\n\n {{ return(base_relation.incorporate(path={\"identifier\": identifier })) }}\n\n {% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.853593, "supported_languages": null}, "macro.dbt_postgres.postgres__make_intermediate_relation": {"name": "postgres__make_intermediate_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_intermediate_relation", "macro_sql": "{% macro postgres__make_intermediate_relation(base_relation, suffix) %}\n {{ return(postgres__make_relation_with_suffix(base_relation, suffix, dstring=False)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.854094, "supported_languages": null}, "macro.dbt_postgres.postgres__make_temp_relation": {"name": "postgres__make_temp_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_temp_relation", "macro_sql": "{% macro postgres__make_temp_relation(base_relation, suffix) %}\n {% set temp_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=True) %}\n {{ return(temp_relation.incorporate(path={\"schema\": none,\n \"database\": none})) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.855048, "supported_languages": null}, "macro.dbt_postgres.postgres__make_backup_relation": {"name": "postgres__make_backup_relation", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__make_backup_relation", "macro_sql": "{% macro postgres__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {% set backup_relation = postgres__make_relation_with_suffix(base_relation, suffix, dstring=False) %}\n {{ return(backup_relation.incorporate(type=backup_relation_type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_relation_with_suffix"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.855862, "supported_languages": null}, "macro.dbt_postgres.postgres_escape_comment": {"name": "postgres_escape_comment", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres_escape_comment", "macro_sql": "{% macro postgres_escape_comment(comment) -%}\n {% if comment is not string %}\n {% do exceptions.raise_compiler_error('cannot escape a non-string: ' ~ comment) %}\n {% endif %}\n {%- set magic = '$dbt_comment_literal_block$' -%}\n {%- if magic in comment -%}\n {%- do exceptions.raise_compiler_error('The string ' ~ magic ~ ' is not allowed in comments.') -%}\n {%- endif -%}\n {{ magic }}{{ comment }}{{ magic }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.856958, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_relation_comment": {"name": "postgres__alter_relation_comment", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__alter_relation_comment", "macro_sql": "{% macro postgres__alter_relation_comment(relation, comment) %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on {{ relation.type }} {{ relation }} is {{ escaped_comment }};\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8575392, "supported_languages": null}, "macro.dbt_postgres.postgres__alter_column_comment": {"name": "postgres__alter_column_comment", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__alter_column_comment", "macro_sql": "{% macro postgres__alter_column_comment(relation, column_dict) %}\n {% set existing_columns = adapter.get_columns_in_relation(relation) | map(attribute=\"name\") | list %}\n {% for column_name in column_dict if (column_name in existing_columns) %}\n {% set comment = column_dict[column_name]['description'] %}\n {% set escaped_comment = postgres_escape_comment(comment) %}\n comment on column {{ relation }}.{{ adapter.quote(column_name) if column_dict[column_name]['quote'] else column_name }} is {{ escaped_comment }};\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres_escape_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8590431, "supported_languages": null}, "macro.dbt_postgres.postgres__get_show_grant_sql": {"name": "postgres__get_show_grant_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__get_show_grant_sql", "macro_sql": "\n\n{%- macro postgres__get_show_grant_sql(relation) -%}\n select grantee, privilege_type\n from {{ relation.information_schema('role_table_grants') }}\n where grantor = current_role\n and grantee != current_role\n and table_schema = '{{ relation.schema }}'\n and table_name = '{{ relation.identifier }}'\n{%- endmacro -%}\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8596349, "supported_languages": null}, "macro.dbt_postgres.postgres__copy_grants": {"name": "postgres__copy_grants", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/adapters.sql", "original_file_path": "macros/adapters.sql", "unique_id": "macro.dbt_postgres.postgres__copy_grants", "macro_sql": "{% macro postgres__copy_grants() %}\n {{ return(False) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.85995, "supported_languages": null}, "macro.dbt_postgres.postgres__get_incremental_default_sql": {"name": "postgres__get_incremental_default_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/materializations/incremental_strategies.sql", "original_file_path": "macros/materializations/incremental_strategies.sql", "unique_id": "macro.dbt_postgres.postgres__get_incremental_default_sql", "macro_sql": "{% macro postgres__get_incremental_default_sql(arg_dict) %}\n\n {% if arg_dict[\"unique_key\"] %}\n {% do return(get_incremental_delete_insert_sql(arg_dict)) %}\n {% else %}\n {% do return(get_incremental_append_sql(arg_dict)) %}\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_incremental_delete_insert_sql", "macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.860895, "supported_languages": null}, "macro.dbt_postgres.postgres__snapshot_merge_sql": {"name": "postgres__snapshot_merge_sql", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/materializations/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshot_merge.sql", "unique_id": "macro.dbt_postgres.postgres__snapshot_merge_sql", "macro_sql": "{% macro postgres__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n update {{ target }}\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_scd_id::text = {{ target }}.dbt_scd_id::text\n and DBT_INTERNAL_SOURCE.dbt_change_type::text in ('update'::text, 'delete'::text)\n and {{ target }}.dbt_valid_to is null;\n\n insert into {{ target }} ({{ insert_cols_csv }})\n select {% for column in insert_cols -%}\n DBT_INTERNAL_SOURCE.{{ column }} {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n from {{ source }} as DBT_INTERNAL_SOURCE\n where DBT_INTERNAL_SOURCE.dbt_change_type::text = 'insert'::text;\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8624861, "supported_languages": null}, "macro.dbt_postgres.postgres__dateadd": {"name": "postgres__dateadd", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "unique_id": "macro.dbt_postgres.postgres__dateadd", "macro_sql": "{% macro postgres__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n {{ from_date_or_timestamp }} + ((interval '1 {{ datepart }}') * ({{ interval }}))\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.863015, "supported_languages": null}, "macro.dbt_postgres.postgres__listagg": {"name": "postgres__listagg", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "unique_id": "macro.dbt_postgres.postgres__listagg", "macro_sql": "{% macro postgres__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n (array_agg(\n {{ measure }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n ))[1:{{ limit_num }}],\n {{ delimiter_text }}\n )\n {%- else %}\n string_agg(\n {{ measure }},\n {{ delimiter_text }}\n {% if order_by_clause -%}\n {{ order_by_clause }}\n {%- endif %}\n )\n {%- endif %}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.864436, "supported_languages": null}, "macro.dbt_postgres.postgres__datediff": {"name": "postgres__datediff", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "unique_id": "macro.dbt_postgres.postgres__datediff", "macro_sql": "{% macro postgres__datediff(first_date, second_date, datepart) -%}\n\n {% if datepart == 'year' %}\n (date_part('year', ({{second_date}})::date) - date_part('year', ({{first_date}})::date))\n {% elif datepart == 'quarter' %}\n ({{ datediff(first_date, second_date, 'year') }} * 4 + date_part('quarter', ({{second_date}})::date) - date_part('quarter', ({{first_date}})::date))\n {% elif datepart == 'month' %}\n ({{ datediff(first_date, second_date, 'year') }} * 12 + date_part('month', ({{second_date}})::date) - date_part('month', ({{first_date}})::date))\n {% elif datepart == 'day' %}\n (({{second_date}})::date - ({{first_date}})::date)\n {% elif datepart == 'week' %}\n ({{ datediff(first_date, second_date, 'day') }} / 7 + case\n when date_part('dow', ({{first_date}})::timestamp) <= date_part('dow', ({{second_date}})::timestamp) then\n case when {{first_date}} <= {{second_date}} then 0 else -1 end\n else\n case when {{first_date}} <= {{second_date}} then 1 else 0 end\n end)\n {% elif datepart == 'hour' %}\n ({{ datediff(first_date, second_date, 'day') }} * 24 + date_part('hour', ({{second_date}})::timestamp) - date_part('hour', ({{first_date}})::timestamp))\n {% elif datepart == 'minute' %}\n ({{ datediff(first_date, second_date, 'hour') }} * 60 + date_part('minute', ({{second_date}})::timestamp) - date_part('minute', ({{first_date}})::timestamp))\n {% elif datepart == 'second' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60 + floor(date_part('second', ({{second_date}})::timestamp)) - floor(date_part('second', ({{first_date}})::timestamp)))\n {% elif datepart == 'millisecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000 + floor(date_part('millisecond', ({{second_date}})::timestamp)) - floor(date_part('millisecond', ({{first_date}})::timestamp)))\n {% elif datepart == 'microsecond' %}\n ({{ datediff(first_date, second_date, 'minute') }} * 60000000 + floor(date_part('microsecond', ({{second_date}})::timestamp)) - floor(date_part('microsecond', ({{first_date}})::timestamp)))\n {% else %}\n {{ exceptions.raise_compiler_error(\"Unsupported datepart for macro datediff in postgres: {!r}\".format(datepart)) }}\n {% endif %}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.871541, "supported_languages": null}, "macro.dbt_postgres.postgres__any_value": {"name": "postgres__any_value", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "unique_id": "macro.dbt_postgres.postgres__any_value", "macro_sql": "{% macro postgres__any_value(expression) -%}\n\n min({{ expression }})\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8719308, "supported_languages": null}, "macro.dbt_postgres.postgres__last_day": {"name": "postgres__last_day", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt_postgres.postgres__last_day", "macro_sql": "{% macro postgres__last_day(date, datepart) -%}\n\n {%- if datepart == 'quarter' -%}\n -- postgres dateadd does not support quarter interval.\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd('month', '3', dbt.date_trunc(datepart, date))\n )}}\n as date)\n {%- else -%}\n {{dbt.default_last_day(date, datepart)}}\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc", "macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8731148, "supported_languages": null}, "macro.dbt_postgres.postgres__split_part": {"name": "postgres__split_part", "resource_type": "macro", "package_name": "dbt_postgres", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt_postgres.postgres__split_part", "macro_sql": "{% macro postgres__split_part(string_text, delimiter_text, part_number) %}\n\n {% if part_number >= 0 %}\n {{ dbt.default__split_part(string_text, delimiter_text, part_number) }}\n {% else %}\n {{ dbt._split_part_negative(string_text, delimiter_text, part_number) }}\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__split_part", "macro.dbt._split_part_negative"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8740962, "supported_languages": null}, "macro.dbt.run_hooks": {"name": "run_hooks", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.run_hooks", "macro_sql": "{% macro run_hooks(hooks, inside_transaction=True) %}\n {% for hook in hooks | selectattr('transaction', 'equalto', inside_transaction) %}\n {% if not inside_transaction and loop.first %}\n {% call statement(auto_begin=inside_transaction) %}\n commit;\n {% endcall %}\n {% endif %}\n {% set rendered = render(hook.get('sql')) | trim %}\n {% if (rendered | length) > 0 %}\n {% call statement(auto_begin=inside_transaction) %}\n {{ rendered }}\n {% endcall %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.876752, "supported_languages": null}, "macro.dbt.make_hook_config": {"name": "make_hook_config", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.make_hook_config", "macro_sql": "{% macro make_hook_config(sql, inside_transaction) %}\n {{ tojson({\"sql\": sql, \"transaction\": inside_transaction}) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8772988, "supported_languages": null}, "macro.dbt.before_begin": {"name": "before_begin", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.before_begin", "macro_sql": "{% macro before_begin(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.877683, "supported_languages": null}, "macro.dbt.in_transaction": {"name": "in_transaction", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.in_transaction", "macro_sql": "{% macro in_transaction(sql) %}\n {{ make_hook_config(sql, inside_transaction=True) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.878058, "supported_languages": null}, "macro.dbt.after_commit": {"name": "after_commit", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/hooks.sql", "original_file_path": "macros/materializations/hooks.sql", "unique_id": "macro.dbt.after_commit", "macro_sql": "{% macro after_commit(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_hook_config"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.878428, "supported_languages": null}, "macro.dbt.set_sql_header": {"name": "set_sql_header", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "unique_id": "macro.dbt.set_sql_header", "macro_sql": "{% macro set_sql_header(config) -%}\n {{ config.set('sql_header', caller()) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.8792682, "supported_languages": null}, "macro.dbt.should_full_refresh": {"name": "should_full_refresh", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "unique_id": "macro.dbt.should_full_refresh", "macro_sql": "{% macro should_full_refresh() %}\n {% set config_full_refresh = config.get('full_refresh') %}\n {% if config_full_refresh is none %}\n {% set config_full_refresh = flags.FULL_REFRESH %}\n {% endif %}\n {% do return(config_full_refresh) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.880049, "supported_languages": null}, "macro.dbt.should_store_failures": {"name": "should_store_failures", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/configs.sql", "original_file_path": "macros/materializations/configs.sql", "unique_id": "macro.dbt.should_store_failures", "macro_sql": "{% macro should_store_failures() %}\n {% set config_store_failures = config.get('store_failures') %}\n {% if config_store_failures is none %}\n {% set config_store_failures = flags.STORE_FAILURES %}\n {% endif %}\n {% do return(config_store_failures) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.880833, "supported_languages": null}, "macro.dbt.snapshot_merge_sql": {"name": "snapshot_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "unique_id": "macro.dbt.snapshot_merge_sql", "macro_sql": "{% macro snapshot_merge_sql(target, source, insert_cols) -%}\n {{ adapter.dispatch('snapshot_merge_sql', 'dbt')(target, source, insert_cols) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.881776, "supported_languages": null}, "macro.dbt.default__snapshot_merge_sql": {"name": "default__snapshot_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot_merge.sql", "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", "unique_id": "macro.dbt.default__snapshot_merge_sql", "macro_sql": "{% macro default__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on DBT_INTERNAL_SOURCE.dbt_scd_id = DBT_INTERNAL_DEST.dbt_scd_id\n\n when matched\n and DBT_INTERNAL_DEST.dbt_valid_to is null\n and DBT_INTERNAL_SOURCE.dbt_change_type in ('update', 'delete')\n then update\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n\n when not matched\n and DBT_INTERNAL_SOURCE.dbt_change_type = 'insert'\n then insert ({{ insert_cols_csv }})\n values ({{ insert_cols_csv }})\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.882448, "supported_languages": null}, "macro.dbt.strategy_dispatch": {"name": "strategy_dispatch", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.strategy_dispatch", "macro_sql": "{% macro strategy_dispatch(name) -%}\n{% set original_name = name %}\n {% if '.' in name %}\n {% set package_name, name = name.split(\".\", 1) %}\n {% else %}\n {% set package_name = none %}\n {% endif %}\n\n {% if package_name is none %}\n {% set package_context = context %}\n {% elif package_name in context %}\n {% set package_context = context[package_name] %}\n {% else %}\n {% set error_msg %}\n Could not find package '{{package_name}}', called with '{{original_name}}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n\n {%- set search_name = 'snapshot_' ~ name ~ '_strategy' -%}\n\n {% if search_name not in package_context %}\n {% set error_msg %}\n The specified strategy macro '{{name}}' was not found in package '{{ package_name }}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n {{ return(package_context[search_name]) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.88957, "supported_languages": null}, "macro.dbt.snapshot_hash_arguments": {"name": "snapshot_hash_arguments", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_hash_arguments", "macro_sql": "{% macro snapshot_hash_arguments(args) -%}\n {{ adapter.dispatch('snapshot_hash_arguments', 'dbt')(args) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.89017, "supported_languages": null}, "macro.dbt.default__snapshot_hash_arguments": {"name": "default__snapshot_hash_arguments", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.default__snapshot_hash_arguments", "macro_sql": "{% macro default__snapshot_hash_arguments(args) -%}\n md5({%- for arg in args -%}\n coalesce(cast({{ arg }} as varchar ), '')\n {% if not loop.last %} || '|' || {% endif %}\n {%- endfor -%})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.890751, "supported_languages": null}, "macro.dbt.snapshot_timestamp_strategy": {"name": "snapshot_timestamp_strategy", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_timestamp_strategy", "macro_sql": "{% macro snapshot_timestamp_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set primary_key = config['unique_key'] %}\n {% set updated_at = config['updated_at'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n\n {#/*\n The snapshot relation might not have an {{ updated_at }} value if the\n snapshot strategy is changed from `check` to `timestamp`. We\n should use a dbt-created column for the comparison in the snapshot\n table instead of assuming that the user-supplied {{ updated_at }}\n will be present in the historical data.\n\n See https://github.com/dbt-labs/dbt-core/issues/2350\n */ #}\n {% set row_changed_expr -%}\n ({{ snapshotted_rel }}.dbt_valid_from < {{ current_rel }}.{{ updated_at }})\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.892682, "supported_languages": null}, "macro.dbt.snapshot_string_as_time": {"name": "snapshot_string_as_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_string_as_time", "macro_sql": "{% macro snapshot_string_as_time(timestamp) -%}\n {{ adapter.dispatch('snapshot_string_as_time', 'dbt')(timestamp) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_string_as_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.893109, "supported_languages": null}, "macro.dbt.default__snapshot_string_as_time": {"name": "default__snapshot_string_as_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.default__snapshot_string_as_time", "macro_sql": "{% macro default__snapshot_string_as_time(timestamp) %}\n {% do exceptions.raise_not_implemented(\n 'snapshot_string_as_time macro not implemented for adapter '+adapter.type()\n ) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.893554, "supported_languages": null}, "macro.dbt.snapshot_check_all_get_existing_columns": {"name": "snapshot_check_all_get_existing_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_check_all_get_existing_columns", "macro_sql": "{% macro snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) -%}\n {%- if not target_exists -%}\n {#-- no table yet -> return whatever the query does --#}\n {{ return((false, query_columns)) }}\n {%- endif -%}\n\n {#-- handle any schema changes --#}\n {%- set target_relation = adapter.get_relation(database=node.database, schema=node.schema, identifier=node.alias) -%}\n\n {% if check_cols_config == 'all' %}\n {%- set query_columns = get_columns_in_query(node['compiled_code']) -%}\n\n {% elif check_cols_config is iterable and (check_cols_config | length) > 0 %}\n {#-- query for proper casing/quoting, to support comparison below --#}\n {%- set select_check_cols_from_target -%}\n select {{ check_cols_config | join(', ') }} from ({{ node['compiled_code'] }}) subq\n {%- endset -%}\n {% set query_columns = get_columns_in_query(select_check_cols_from_target) %}\n\n {% else %}\n {% do exceptions.raise_compiler_error(\"Invalid value for 'check_cols': \" ~ check_cols_config) %}\n {% endif %}\n\n {%- set existing_cols = adapter.get_columns_in_relation(target_relation) | map(attribute = 'name') | list -%}\n {%- set ns = namespace() -%} {#-- handle for-loop scoping with a namespace --#}\n {%- set ns.column_added = false -%}\n\n {%- set intersection = [] -%}\n {%- for col in query_columns -%}\n {%- if col in existing_cols -%}\n {%- do intersection.append(adapter.quote(col)) -%}\n {%- else -%}\n {% set ns.column_added = true %}\n {%- endif -%}\n {%- endfor -%}\n {{ return((ns.column_added, intersection)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.897105, "supported_languages": null}, "macro.dbt.snapshot_check_strategy": {"name": "snapshot_check_strategy", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/strategies.sql", "original_file_path": "macros/materializations/snapshots/strategies.sql", "unique_id": "macro.dbt.snapshot_check_strategy", "macro_sql": "{% macro snapshot_check_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set check_cols_config = config['check_cols'] %}\n {% set primary_key = config['unique_key'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n {% set updated_at = config.get('updated_at', snapshot_get_time()) %}\n\n {% set column_added = false %}\n\n {% set column_added, check_cols = snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) %}\n\n {%- set row_changed_expr -%}\n (\n {%- if column_added -%}\n {{ get_true_sql() }}\n {%- else -%}\n {%- for col in check_cols -%}\n {{ snapshotted_rel }}.{{ col }} != {{ current_rel }}.{{ col }}\n or\n (\n (({{ snapshotted_rel }}.{{ col }} is null) and not ({{ current_rel }}.{{ col }} is null))\n or\n ((not {{ snapshotted_rel }}.{{ col }} is null) and ({{ current_rel }}.{{ col }} is null))\n )\n {%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n {%- endif -%}\n )\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.snapshot_get_time", "macro.dbt.snapshot_check_all_get_existing_columns", "macro.dbt.get_true_sql", "macro.dbt.snapshot_hash_arguments"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.90045, "supported_languages": null}, "macro.dbt.create_columns": {"name": "create_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.create_columns", "macro_sql": "{% macro create_columns(relation, columns) %}\n {{ adapter.dispatch('create_columns', 'dbt')(relation, columns) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9079978, "supported_languages": null}, "macro.dbt.default__create_columns": {"name": "default__create_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__create_columns", "macro_sql": "{% macro default__create_columns(relation, columns) %}\n {% for column in columns %}\n {% call statement() %}\n alter table {{ relation }} add column \"{{ column.name }}\" {{ column.data_type }};\n {% endcall %}\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.908781, "supported_languages": null}, "macro.dbt.post_snapshot": {"name": "post_snapshot", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.post_snapshot", "macro_sql": "{% macro post_snapshot(staging_relation) %}\n {{ adapter.dispatch('post_snapshot', 'dbt')(staging_relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.909224, "supported_languages": null}, "macro.dbt.default__post_snapshot": {"name": "default__post_snapshot", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__post_snapshot", "macro_sql": "{% macro default__post_snapshot(staging_relation) %}\n {# no-op #}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.909463, "supported_languages": null}, "macro.dbt.get_true_sql": {"name": "get_true_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.get_true_sql", "macro_sql": "{% macro get_true_sql() %}\n {{ adapter.dispatch('get_true_sql', 'dbt')() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_true_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9098508, "supported_languages": null}, "macro.dbt.default__get_true_sql": {"name": "default__get_true_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__get_true_sql", "macro_sql": "{% macro default__get_true_sql() %}\n {{ return('TRUE') }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9101608, "supported_languages": null}, "macro.dbt.snapshot_staging_table": {"name": "snapshot_staging_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.snapshot_staging_table", "macro_sql": "{% macro snapshot_staging_table(strategy, source_sql, target_relation) -%}\n {{ adapter.dispatch('snapshot_staging_table', 'dbt')(strategy, source_sql, target_relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__snapshot_staging_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.910683, "supported_languages": null}, "macro.dbt.default__snapshot_staging_table": {"name": "default__snapshot_staging_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__snapshot_staging_table", "macro_sql": "{% macro default__snapshot_staging_table(strategy, source_sql, target_relation) -%}\n\n with snapshot_query as (\n\n {{ source_sql }}\n\n ),\n\n snapshotted_data as (\n\n select *,\n {{ strategy.unique_key }} as dbt_unique_key\n\n from {{ target_relation }}\n where dbt_valid_to is null\n\n ),\n\n insertions_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to,\n {{ strategy.scd_id }} as dbt_scd_id\n\n from snapshot_query\n ),\n\n updates_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n {{ strategy.updated_at }} as dbt_valid_to\n\n from snapshot_query\n ),\n\n {%- if strategy.invalidate_hard_deletes %}\n\n deletes_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key\n from snapshot_query\n ),\n {% endif %}\n\n insertions as (\n\n select\n 'insert' as dbt_change_type,\n source_data.*\n\n from insertions_source_data as source_data\n left outer join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where snapshotted_data.dbt_unique_key is null\n or (\n snapshotted_data.dbt_unique_key is not null\n and (\n {{ strategy.row_changed }}\n )\n )\n\n ),\n\n updates as (\n\n select\n 'update' as dbt_change_type,\n source_data.*,\n snapshotted_data.dbt_scd_id\n\n from updates_source_data as source_data\n join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where (\n {{ strategy.row_changed }}\n )\n )\n\n {%- if strategy.invalidate_hard_deletes -%}\n ,\n\n deletes as (\n\n select\n 'delete' as dbt_change_type,\n source_data.*,\n {{ snapshot_get_time() }} as dbt_valid_from,\n {{ snapshot_get_time() }} as dbt_updated_at,\n {{ snapshot_get_time() }} as dbt_valid_to,\n snapshotted_data.dbt_scd_id\n\n from snapshotted_data\n left join deletes_source_data as source_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where source_data.dbt_unique_key is null\n )\n {%- endif %}\n\n select * from insertions\n union all\n select * from updates\n {%- if strategy.invalidate_hard_deletes %}\n union all\n select * from deletes\n {%- endif %}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.912821, "supported_languages": null}, "macro.dbt.build_snapshot_table": {"name": "build_snapshot_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.build_snapshot_table", "macro_sql": "{% macro build_snapshot_table(strategy, sql) -%}\n {{ adapter.dispatch('build_snapshot_table', 'dbt')(strategy, sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__build_snapshot_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.913307, "supported_languages": null}, "macro.dbt.default__build_snapshot_table": {"name": "default__build_snapshot_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.default__build_snapshot_table", "macro_sql": "{% macro default__build_snapshot_table(strategy, sql) %}\n\n select *,\n {{ strategy.scd_id }} as dbt_scd_id,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to\n from (\n {{ sql }}\n ) sbq\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.913965, "supported_languages": null}, "macro.dbt.build_snapshot_staging_table": {"name": "build_snapshot_staging_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/helpers.sql", "original_file_path": "macros/materializations/snapshots/helpers.sql", "unique_id": "macro.dbt.build_snapshot_staging_table", "macro_sql": "{% macro build_snapshot_staging_table(strategy, sql, target_relation) %}\n {% set temp_relation = make_temp_relation(target_relation) %}\n\n {% set select = snapshot_staging_table(strategy, sql, target_relation) %}\n\n {% call statement('build_snapshot_staging_relation') %}\n {{ create_table_as(True, temp_relation, select) }}\n {% endcall %}\n\n {% do return(temp_relation) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.make_temp_relation", "macro.dbt.snapshot_staging_table", "macro.dbt.statement", "macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.915048, "supported_languages": null}, "macro.dbt.materialization_snapshot_default": {"name": "materialization_snapshot_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/snapshots/snapshot.sql", "original_file_path": "macros/materializations/snapshots/snapshot.sql", "unique_id": "macro.dbt.materialization_snapshot_default", "macro_sql": "{% materialization snapshot, default %}\n {%- set config = model['config'] -%}\n\n {%- set target_table = model.get('alias', model.get('name')) -%}\n\n {%- set strategy_name = config.get('strategy') -%}\n {%- set unique_key = config.get('unique_key') %}\n -- grab current tables grants config for comparision later on\n {%- set grant_config = config.get('grants') -%}\n\n {% set target_relation_exists, target_relation = get_or_create_relation(\n database=model.database,\n schema=model.schema,\n identifier=target_table,\n type='table') -%}\n\n {%- if not target_relation.is_table -%}\n {% do exceptions.relation_wrong_type(target_relation, 'table') %}\n {%- endif -%}\n\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set strategy_macro = strategy_dispatch(strategy_name) %}\n {% set strategy = strategy_macro(model, \"snapshotted_data\", \"source_data\", config, target_relation_exists) %}\n\n {% if not target_relation_exists %}\n\n {% set build_sql = build_snapshot_table(strategy, model['compiled_code']) %}\n {% set final_sql = create_table_as(False, target_relation, build_sql) %}\n\n {% else %}\n\n {{ adapter.valid_snapshot_target(target_relation) }}\n\n {% set staging_table = build_snapshot_staging_table(strategy, sql, target_relation) %}\n\n -- this may no-op if the database does not require column expansion\n {% do adapter.expand_target_column_types(from_relation=staging_table,\n to_relation=target_relation) %}\n\n {% set missing_columns = adapter.get_missing_columns(staging_table, target_relation)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% do create_columns(target_relation, missing_columns) %}\n\n {% set source_columns = adapter.get_columns_in_relation(staging_table)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% set quoted_source_columns = [] %}\n {% for column in source_columns %}\n {% do quoted_source_columns.append(adapter.quote(column.name)) %}\n {% endfor %}\n\n {% set final_sql = snapshot_merge_sql(\n target = target_relation,\n source = staging_table,\n insert_cols = quoted_source_columns\n )\n %}\n\n {% endif %}\n\n {% call statement('main') %}\n {{ final_sql }}\n {% endcall %}\n\n {% set should_revoke = should_revoke(target_relation_exists, full_refresh_mode=False) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if not target_relation_exists %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {% if staging_table is defined %}\n {% do post_snapshot(staging_table) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "depends_on": {"macros": ["macro.dbt.get_or_create_relation", "macro.dbt.run_hooks", "macro.dbt.strategy_dispatch", "macro.dbt.build_snapshot_table", "macro.dbt.create_table_as", "macro.dbt.build_snapshot_staging_table", "macro.dbt.create_columns", "macro.dbt.snapshot_merge_sql", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes", "macro.dbt.post_snapshot"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.928437, "supported_languages": ["sql"]}, "macro.dbt.materialization_test_default": {"name": "materialization_test_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/test.sql", "original_file_path": "macros/materializations/tests/test.sql", "unique_id": "macro.dbt.materialization_test_default", "macro_sql": "{%- materialization test, default -%}\n\n {% set relations = [] %}\n\n {% if should_store_failures() %}\n\n {% set identifier = model['alias'] %}\n {% set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n {% set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database, type='table') -%} %}\n\n {% if old_relation %}\n {% do adapter.drop_relation(old_relation) %}\n {% endif %}\n\n {% call statement(auto_begin=True) %}\n {{ create_table_as(False, target_relation, sql) }}\n {% endcall %}\n\n {% do relations.append(target_relation) %}\n\n {% set main_sql %}\n select *\n from {{ target_relation }}\n {% endset %}\n\n {{ adapter.commit() }}\n\n {% else %}\n\n {% set main_sql = sql %}\n\n {% endif %}\n\n {% set limit = config.get('limit') %}\n {% set fail_calc = config.get('fail_calc') %}\n {% set warn_if = config.get('warn_if') %}\n {% set error_if = config.get('error_if') %}\n\n {% call statement('main', fetch_result=True) -%}\n\n {{ get_test_sql(main_sql, fail_calc, warn_if, error_if, limit)}}\n\n {%- endcall %}\n\n {{ return({'relations': relations}) }}\n\n{%- endmaterialization -%}", "depends_on": {"macros": ["macro.dbt.should_store_failures", "macro.dbt.statement", "macro.dbt.create_table_as", "macro.dbt.get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.933413, "supported_languages": ["sql"]}, "macro.dbt.get_test_sql": {"name": "get_test_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "unique_id": "macro.dbt.get_test_sql", "macro_sql": "{% macro get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n {{ adapter.dispatch('get_test_sql', 'dbt')(main_sql, fail_calc, warn_if, error_if, limit) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_test_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.934442, "supported_languages": null}, "macro.dbt.default__get_test_sql": {"name": "default__get_test_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/helpers.sql", "original_file_path": "macros/materializations/tests/helpers.sql", "unique_id": "macro.dbt.default__get_test_sql", "macro_sql": "{% macro default__get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n select\n {{ fail_calc }} as failures,\n {{ fail_calc }} {{ warn_if }} as should_warn,\n {{ fail_calc }} {{ error_if }} as should_error\n from (\n {{ main_sql }}\n {{ \"limit \" ~ limit if limit != none }}\n ) dbt_internal_test\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.93521, "supported_languages": null}, "macro.dbt.get_where_subquery": {"name": "get_where_subquery", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "unique_id": "macro.dbt.get_where_subquery", "macro_sql": "{% macro get_where_subquery(relation) -%}\n {% do return(adapter.dispatch('get_where_subquery', 'dbt')(relation)) %}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_where_subquery"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9360561, "supported_languages": null}, "macro.dbt.default__get_where_subquery": {"name": "default__get_where_subquery", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/tests/where_subquery.sql", "original_file_path": "macros/materializations/tests/where_subquery.sql", "unique_id": "macro.dbt.default__get_where_subquery", "macro_sql": "{% macro default__get_where_subquery(relation) -%}\n {% set where = config.get('where', '') %}\n {% if where %}\n {%- set filtered -%}\n (select * from {{ relation }} where {{ where }}) dbt_subquery\n {%- endset -%}\n {% do return(filtered) %}\n {%- else -%}\n {% do return(relation) %}\n {%- endif -%}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9370618, "supported_languages": null}, "macro.dbt.get_quoted_csv": {"name": "get_quoted_csv", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.get_quoted_csv", "macro_sql": "{% macro get_quoted_csv(column_names) %}\n\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote(col)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.940309, "supported_languages": null}, "macro.dbt.diff_columns": {"name": "diff_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.diff_columns", "macro_sql": "{% macro diff_columns(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% set source_names = source_columns | map(attribute = 'column') | list %}\n {% set target_names = target_columns | map(attribute = 'column') | list %}\n\n {# --check whether the name attribute exists in the target - this does not perform a data type check #}\n {% for sc in source_columns %}\n {% if sc.name not in target_names %}\n {{ result.append(sc) }}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9417028, "supported_languages": null}, "macro.dbt.diff_column_data_types": {"name": "diff_column_data_types", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.diff_column_data_types", "macro_sql": "{% macro diff_column_data_types(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% for sc in source_columns %}\n {% set tc = target_columns | selectattr(\"name\", \"equalto\", sc.name) | list | first %}\n {% if tc %}\n {% if sc.data_type != tc.data_type and not sc.can_expand_to(other_column=tc) %}\n {{ result.append( { 'column_name': tc.name, 'new_type': sc.data_type } ) }}\n {% endif %}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.943423, "supported_languages": null}, "macro.dbt.get_merge_update_columns": {"name": "get_merge_update_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.get_merge_update_columns", "macro_sql": "{% macro get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {{ return(adapter.dispatch('get_merge_update_columns', 'dbt')(merge_update_columns, merge_exclude_columns, dest_columns)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.944038, "supported_languages": null}, "macro.dbt.default__get_merge_update_columns": {"name": "default__get_merge_update_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/column_helpers.sql", "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", "unique_id": "macro.dbt.default__get_merge_update_columns", "macro_sql": "{% macro default__get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {%- set default_cols = dest_columns | map(attribute=\"quoted\") | list -%}\n\n {%- if merge_update_columns and merge_exclude_columns -%}\n {{ exceptions.raise_compiler_error(\n 'Model cannot specify merge_update_columns and merge_exclude_columns. Please update model to use only one config'\n )}}\n {%- elif merge_update_columns -%}\n {%- set update_columns = merge_update_columns -%}\n {%- elif merge_exclude_columns -%}\n {%- set update_columns = [] -%}\n {%- for column in dest_columns -%}\n {% if column.column | lower not in merge_exclude_columns | map(\"lower\") | list %}\n {%- do update_columns.append(column.quoted) -%}\n {% endif %}\n {%- endfor -%}\n {%- else -%}\n {%- set update_columns = default_cols -%}\n {%- endif -%}\n\n {{ return(update_columns) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9458601, "supported_languages": null}, "macro.dbt.get_merge_sql": {"name": "get_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.get_merge_sql", "macro_sql": "{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%}\n {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.954162, "supported_languages": null}, "macro.dbt.default__get_merge_sql": {"name": "default__get_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.default__get_merge_sql", "macro_sql": "{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%}\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set merge_update_columns = config.get('merge_update_columns') -%}\n {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%}\n {%- set update_columns = get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not mapping and unique_key is not string %}\n {% for key in unique_key %}\n {% set this_key_match %}\n DBT_INTERNAL_SOURCE.{{ key }} = DBT_INTERNAL_DEST.{{ key }}\n {% endset %}\n {% do predicates.append(this_key_match) %}\n {% endfor %}\n {% else %}\n {% set unique_key_match %}\n DBT_INTERNAL_SOURCE.{{ unique_key }} = DBT_INTERNAL_DEST.{{ unique_key }}\n {% endset %}\n {% do predicates.append(unique_key_match) %}\n {% endif %}\n {% else %}\n {% do predicates.append('FALSE') %}\n {% endif %}\n\n {{ sql_header if sql_header is not none }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on {{ predicates | join(' and ') }}\n\n {% if unique_key %}\n when matched then update set\n {% for column_name in update_columns -%}\n {{ column_name }} = DBT_INTERNAL_SOURCE.{{ column_name }}\n {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n {% endif %}\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv", "macro.dbt.get_merge_update_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.958357, "supported_languages": null}, "macro.dbt.get_delete_insert_merge_sql": {"name": "get_delete_insert_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.get_delete_insert_merge_sql", "macro_sql": "{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.959069, "supported_languages": null}, "macro.dbt.default__get_delete_insert_merge_sql": {"name": "default__get_delete_insert_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.default__get_delete_insert_merge_sql", "macro_sql": "{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not string %}\n delete from {{target }}\n using {{ source }}\n where (\n {% for key in unique_key %}\n {{ source }}.{{ key }} = {{ target }}.{{ key }}\n {{ \"and \" if not loop.last }}\n {% endfor %}\n );\n {% else %}\n delete from {{ target }}\n where (\n {{ unique_key }}) in (\n select ({{ unique_key }})\n from {{ source }}\n );\n\n {% endif %}\n {% endif %}\n\n insert into {{ target }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ source }}\n )\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9617019, "supported_languages": null}, "macro.dbt.get_insert_overwrite_merge_sql": {"name": "get_insert_overwrite_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.get_insert_overwrite_merge_sql", "macro_sql": "{% macro get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header=false) -%}\n {{ adapter.dispatch('get_insert_overwrite_merge_sql', 'dbt')(target, source, dest_columns, predicates, include_sql_header) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9624152, "supported_languages": null}, "macro.dbt.default__get_insert_overwrite_merge_sql": {"name": "default__get_insert_overwrite_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/merge.sql", "original_file_path": "macros/materializations/models/incremental/merge.sql", "unique_id": "macro.dbt.default__get_insert_overwrite_merge_sql", "macro_sql": "{% macro default__get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header) -%}\n {#-- The only time include_sql_header is True: --#}\n {#-- BigQuery + insert_overwrite strategy + \"static\" partitions config --#}\n {#-- We should consider including the sql header at the materialization level instead --#}\n\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none and include_sql_header }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on FALSE\n\n when not matched by source\n {% if predicates %} and {{ predicates | join(' and ') }} {% endif %}\n then delete\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.964027, "supported_languages": null}, "macro.dbt.is_incremental": {"name": "is_incremental", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/is_incremental.sql", "original_file_path": "macros/materializations/models/incremental/is_incremental.sql", "unique_id": "macro.dbt.is_incremental", "macro_sql": "{% macro is_incremental() %}\n {#-- do not run introspective queries in parsing #}\n {% if not execute %}\n {{ return(False) }}\n {% else %}\n {% set relation = adapter.get_relation(this.database, this.schema, this.table) %}\n {{ return(relation is not none\n and relation.type == 'table'\n and model.config.materialized == 'incremental'\n and not should_full_refresh()) }}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.should_full_refresh"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9655309, "supported_languages": null}, "macro.dbt.get_incremental_append_sql": {"name": "get_incremental_append_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_append_sql", "macro_sql": "{% macro get_incremental_append_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_append_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.967351, "supported_languages": null}, "macro.dbt.default__get_incremental_append_sql": {"name": "default__get_incremental_append_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_append_sql", "macro_sql": "{% macro default__get_incremental_append_sql(arg_dict) %}\n\n {% do return(get_insert_into_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_insert_into_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.967958, "supported_languages": null}, "macro.dbt.get_incremental_delete_insert_sql": {"name": "get_incremental_delete_insert_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_delete_insert_sql", "macro_sql": "{% macro get_incremental_delete_insert_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_delete_insert_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_delete_insert_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9684448, "supported_languages": null}, "macro.dbt.default__get_incremental_delete_insert_sql": {"name": "default__get_incremental_delete_insert_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_delete_insert_sql", "macro_sql": "{% macro default__get_incremental_delete_insert_sql(arg_dict) %}\n\n {% do return(get_delete_insert_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_delete_insert_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.969121, "supported_languages": null}, "macro.dbt.get_incremental_merge_sql": {"name": "get_incremental_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_merge_sql", "macro_sql": "{% macro get_incremental_merge_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_merge_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9696, "supported_languages": null}, "macro.dbt.default__get_incremental_merge_sql": {"name": "default__get_incremental_merge_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_merge_sql", "macro_sql": "{% macro default__get_incremental_merge_sql(arg_dict) %}\n\n {% do return(get_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.970359, "supported_languages": null}, "macro.dbt.get_incremental_insert_overwrite_sql": {"name": "get_incremental_insert_overwrite_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_insert_overwrite_sql", "macro_sql": "{% macro get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_insert_overwrite_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_incremental_insert_overwrite_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.970861, "supported_languages": null}, "macro.dbt.default__get_incremental_insert_overwrite_sql": {"name": "default__get_incremental_insert_overwrite_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_insert_overwrite_sql", "macro_sql": "{% macro default__get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {% do return(get_insert_overwrite_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"], arg_dict[\"predicates\"])) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_insert_overwrite_merge_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.971532, "supported_languages": null}, "macro.dbt.get_incremental_default_sql": {"name": "get_incremental_default_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_incremental_default_sql", "macro_sql": "{% macro get_incremental_default_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_default_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_incremental_default_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9720068, "supported_languages": null}, "macro.dbt.default__get_incremental_default_sql": {"name": "default__get_incremental_default_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.default__get_incremental_default_sql", "macro_sql": "{% macro default__get_incremental_default_sql(arg_dict) %}\n\n {% do return(get_incremental_append_sql(arg_dict)) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_incremental_append_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.972397, "supported_languages": null}, "macro.dbt.get_insert_into_sql": {"name": "get_insert_into_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/strategies.sql", "original_file_path": "macros/materializations/models/incremental/strategies.sql", "unique_id": "macro.dbt.get_insert_into_sql", "macro_sql": "{% macro get_insert_into_sql(target_relation, temp_relation, dest_columns) %}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n insert into {{ target_relation }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ temp_relation }}\n )\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_quoted_csv"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9731112, "supported_languages": null}, "macro.dbt.materialization_incremental_default": {"name": "materialization_incremental_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/incremental.sql", "original_file_path": "macros/materializations/models/incremental/incremental.sql", "unique_id": "macro.dbt.materialization_incremental_default", "macro_sql": "{% materialization incremental, default -%}\n\n -- relations\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') -%}\n {%- set temp_relation = make_temp_relation(target_relation)-%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation)-%}\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n\n -- configs\n {%- set unique_key = config.get('unique_key') -%}\n {%- set full_refresh_mode = (should_full_refresh() or existing_relation.is_view) -%}\n {%- set on_schema_change = incremental_validate_on_schema_change(config.get('on_schema_change'), default='ignore') -%}\n\n -- the temp_ and backup_ relations should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation. This has to happen before\n -- BEGIN, in a separate transaction\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation)-%}\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set to_drop = [] %}\n\n {% if existing_relation is none %}\n {% set build_sql = get_create_table_as_sql(False, target_relation, sql) %}\n {% elif full_refresh_mode %}\n {% set build_sql = get_create_table_as_sql(False, intermediate_relation, sql) %}\n {% set need_swap = true %}\n {% else %}\n {% do run_query(get_create_table_as_sql(True, temp_relation, sql)) %}\n {% do adapter.expand_target_column_types(\n from_relation=temp_relation,\n to_relation=target_relation) %}\n {#-- Process schema changes. Returns dict of changes if successful. Use source columns for upserting/merging --#}\n {% set dest_columns = process_schema_changes(on_schema_change, temp_relation, existing_relation) %}\n {% if not dest_columns %}\n {% set dest_columns = adapter.get_columns_in_relation(existing_relation) %}\n {% endif %}\n\n {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#}\n {% set incremental_strategy = config.get('incremental_strategy') or 'default' %}\n {% set incremental_predicates = config.get('incremental_predicates', none) %}\n {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %}\n {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'predicates': incremental_predicates }) %}\n {% set build_sql = strategy_sql_macro_func(strategy_arg_dict) %}\n\n {% endif %}\n\n {% call statement(\"main\") %}\n {{ build_sql }}\n {% endcall %}\n\n {% if need_swap %}\n {% do adapter.rename_relation(target_relation, backup_relation) %}\n {% do adapter.rename_relation(intermediate_relation, target_relation) %}\n {% do to_drop.append(backup_relation) %}\n {% endif %}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if existing_relation is none or existing_relation.is_view or should_full_refresh() %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {% do adapter.commit() %}\n\n {% for rel in to_drop %}\n {% do adapter.drop_relation(rel) %}\n {% endfor %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization %}", "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_temp_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.should_full_refresh", "macro.dbt.incremental_validate_on_schema_change", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.get_create_table_as_sql", "macro.dbt.run_query", "macro.dbt.process_schema_changes", "macro.dbt.statement", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.984826, "supported_languages": ["sql"]}, "macro.dbt.incremental_validate_on_schema_change": {"name": "incremental_validate_on_schema_change", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.incremental_validate_on_schema_change", "macro_sql": "{% macro incremental_validate_on_schema_change(on_schema_change, default='ignore') %}\n\n {% if on_schema_change not in ['sync_all_columns', 'append_new_columns', 'fail', 'ignore'] %}\n\n {% set log_message = 'Invalid value for on_schema_change (%s) specified. Setting default value of %s.' % (on_schema_change, default) %}\n {% do log(log_message) %}\n\n {{ return(default) }}\n\n {% else %}\n\n {{ return(on_schema_change) }}\n\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.995018, "supported_languages": null}, "macro.dbt.check_for_schema_changes": {"name": "check_for_schema_changes", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.check_for_schema_changes", "macro_sql": "{% macro check_for_schema_changes(source_relation, target_relation) %}\n\n {% set schema_changed = False %}\n\n {%- set source_columns = adapter.get_columns_in_relation(source_relation) -%}\n {%- set target_columns = adapter.get_columns_in_relation(target_relation) -%}\n {%- set source_not_in_target = diff_columns(source_columns, target_columns) -%}\n {%- set target_not_in_source = diff_columns(target_columns, source_columns) -%}\n\n {% set new_target_types = diff_column_data_types(source_columns, target_columns) %}\n\n {% if source_not_in_target != [] %}\n {% set schema_changed = True %}\n {% elif target_not_in_source != [] or new_target_types != [] %}\n {% set schema_changed = True %}\n {% elif new_target_types != [] %}\n {% set schema_changed = True %}\n {% endif %}\n\n {% set changes_dict = {\n 'schema_changed': schema_changed,\n 'source_not_in_target': source_not_in_target,\n 'target_not_in_source': target_not_in_source,\n 'source_columns': source_columns,\n 'target_columns': target_columns,\n 'new_target_types': new_target_types\n } %}\n\n {% set msg %}\n In {{ target_relation }}:\n Schema changed: {{ schema_changed }}\n Source columns not in target: {{ source_not_in_target }}\n Target columns not in source: {{ target_not_in_source }}\n New column types: {{ new_target_types }}\n {% endset %}\n\n {% do log(msg) %}\n\n {{ return(changes_dict) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.diff_columns", "macro.dbt.diff_column_data_types"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853277.9980521, "supported_languages": null}, "macro.dbt.sync_column_schemas": {"name": "sync_column_schemas", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.sync_column_schemas", "macro_sql": "{% macro sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {%- set add_to_target_arr = schema_changes_dict['source_not_in_target'] -%}\n\n {%- if on_schema_change == 'append_new_columns'-%}\n {%- if add_to_target_arr | length > 0 -%}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, none) -%}\n {%- endif -%}\n\n {% elif on_schema_change == 'sync_all_columns' %}\n {%- set remove_from_target_arr = schema_changes_dict['target_not_in_source'] -%}\n {%- set new_target_types = schema_changes_dict['new_target_types'] -%}\n\n {% if add_to_target_arr | length > 0 or remove_from_target_arr | length > 0 %}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, remove_from_target_arr) -%}\n {% endif %}\n\n {% if new_target_types != [] %}\n {% for ntt in new_target_types %}\n {% set column_name = ntt['column_name'] %}\n {% set new_type = ntt['new_type'] %}\n {% do alter_column_type(target_relation, column_name, new_type) %}\n {% endfor %}\n {% endif %}\n\n {% endif %}\n\n {% set schema_change_message %}\n In {{ target_relation }}:\n Schema change approach: {{ on_schema_change }}\n Columns added: {{ add_to_target_arr }}\n Columns removed: {{ remove_from_target_arr }}\n Data types changed: {{ new_target_types }}\n {% endset %}\n\n {% do log(schema_change_message) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.alter_relation_add_remove_columns", "macro.dbt.alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.00105, "supported_languages": null}, "macro.dbt.process_schema_changes": {"name": "process_schema_changes", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/incremental/on_schema_change.sql", "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", "unique_id": "macro.dbt.process_schema_changes", "macro_sql": "{% macro process_schema_changes(on_schema_change, source_relation, target_relation) %}\n\n {% if on_schema_change == 'ignore' %}\n\n {{ return({}) }}\n\n {% else %}\n\n {% set schema_changes_dict = check_for_schema_changes(source_relation, target_relation) %}\n\n {% if schema_changes_dict['schema_changed'] %}\n\n {% if on_schema_change == 'fail' %}\n\n {% set fail_msg %}\n The source and target schemas on this incremental model are out of sync!\n They can be reconciled in several ways:\n - set the `on_schema_change` config to either append_new_columns or sync_all_columns, depending on your situation.\n - Re-run the incremental model with `full_refresh: True` to update the target schema.\n - update the schema manually and re-run the process.\n\n Additional troubleshooting context:\n Source columns not in target: {{ schema_changes_dict['source_not_in_target'] }}\n Target columns not in source: {{ schema_changes_dict['target_not_in_source'] }}\n New column types: {{ schema_changes_dict['new_target_types'] }}\n {% endset %}\n\n {% do exceptions.raise_compiler_error(fail_msg) %}\n\n {# -- unless we ignore, run the sync operation per the config #}\n {% else %}\n\n {% do sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {% endif %}\n\n {% endif %}\n\n {{ return(schema_changes_dict['source_columns']) }}\n\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.check_for_schema_changes", "macro.dbt.sync_column_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.003159, "supported_languages": null}, "macro.dbt.materialization_table_default": {"name": "materialization_table_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/table.sql", "original_file_path": "macros/materializations/models/table/table.sql", "unique_id": "macro.dbt.materialization_table_default", "macro_sql": "{% materialization table, default %}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') %}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n See ../view/view.sql for more information about this relation.\n */\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_table_as_sql(False, intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% do create_indexes(target_relation) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n -- finally, drop the existing/backup relation after the commit\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n{% endmaterialization %}", "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks", "macro.dbt.statement", "macro.dbt.get_create_table_as_sql", "macro.dbt.create_indexes", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.009005, "supported_languages": ["sql"]}, "macro.dbt.get_create_table_as_sql": {"name": "get_create_table_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.get_create_table_as_sql", "macro_sql": "{% macro get_create_table_as_sql(temporary, relation, sql) -%}\n {{ adapter.dispatch('get_create_table_as_sql', 'dbt')(temporary, relation, sql) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_create_table_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.010164, "supported_languages": null}, "macro.dbt.default__get_create_table_as_sql": {"name": "default__get_create_table_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.default__get_create_table_as_sql", "macro_sql": "{% macro default__get_create_table_as_sql(temporary, relation, sql) -%}\n {{ return(create_table_as(temporary, relation, sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.010643, "supported_languages": null}, "macro.dbt.create_table_as": {"name": "create_table_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.create_table_as", "macro_sql": "{% macro create_table_as(temporary, relation, compiled_code, language='sql') -%}\n {# backward compatibility for create_table_as that does not support language #}\n {% if language == \"sql\" %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code)}}\n {% else %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code, language) }}\n {% endif %}\n\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_table_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.01173, "supported_languages": null}, "macro.dbt.default__create_table_as": {"name": "default__create_table_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/table/create_table_as.sql", "original_file_path": "macros/materializations/models/table/create_table_as.sql", "unique_id": "macro.dbt.default__create_table_as", "macro_sql": "{% macro default__create_table_as(temporary, relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary: -%}temporary{%- endif %} table\n {{ relation.include(database=(not temporary), schema=(not temporary)) }}\n as (\n {{ sql }}\n );\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.012759, "supported_languages": null}, "macro.dbt.materialization_view_default": {"name": "materialization_view_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/view.sql", "original_file_path": "macros/materializations/models/view/view.sql", "unique_id": "macro.dbt.materialization_view_default", "macro_sql": "{%- materialization view, default -%}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='view') -%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n This relation (probably) doesn't exist yet. If it does exist, it's a leftover from\n a previous run, and we're going to try to drop it immediately. At the end of this\n materialization, we're going to rename the \"existing_relation\" to this identifier,\n and then we're going to drop it. In order to make sure we run the correct one of:\n - drop view ...\n - drop table ...\n\n We need to set the type of this relation to be the type of the existing_relation, if it exists,\n or else \"view\" as a sane default if it does not. Note that if the existing_relation does not\n exist, then there is nothing to move out of the way and subsequentally drop. In that case,\n this relation will be effectively unused.\n */\n {%- set backup_relation_type = 'view' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n -- move the existing view out of the way\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization -%}", "depends_on": {"macros": ["macro.dbt.load_cached_relation", "macro.dbt.make_intermediate_relation", "macro.dbt.make_backup_relation", "macro.dbt.run_hooks", "macro.dbt.drop_relation_if_exists", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0184052, "supported_languages": ["sql"]}, "macro.dbt.handle_existing_table": {"name": "handle_existing_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "unique_id": "macro.dbt.handle_existing_table", "macro_sql": "{% macro handle_existing_table(full_refresh, old_relation) %}\n {{ adapter.dispatch('handle_existing_table', 'dbt')(full_refresh, old_relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__handle_existing_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.019115, "supported_languages": null}, "macro.dbt.default__handle_existing_table": {"name": "default__handle_existing_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/helpers.sql", "original_file_path": "macros/materializations/models/view/helpers.sql", "unique_id": "macro.dbt.default__handle_existing_table", "macro_sql": "{% macro default__handle_existing_table(full_refresh, old_relation) %}\n {{ log(\"Dropping relation \" ~ old_relation ~ \" because it is of type \" ~ old_relation.type) }}\n {{ adapter.drop_relation(old_relation) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.019679, "supported_languages": null}, "macro.dbt.create_or_replace_view": {"name": "create_or_replace_view", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_or_replace_view.sql", "original_file_path": "macros/materializations/models/view/create_or_replace_view.sql", "unique_id": "macro.dbt.create_or_replace_view", "macro_sql": "{% macro create_or_replace_view() %}\n {%- set identifier = model['alias'] -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database,\n type='view') -%}\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks) }}\n\n -- If there's a table with the same name and we weren't told to full refresh,\n -- that's an error. If we were told to full refresh, drop it. This behavior differs\n -- for Snowflake and BigQuery, so multiple dispatch is used.\n {%- if old_relation is not none and old_relation.is_table -%}\n {{ handle_existing_table(should_full_refresh(), old_relation) }}\n {%- endif -%}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(target_relation, sql) }}\n {%- endcall %}\n\n {% set should_revoke = should_revoke(exists_as_view, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=True) %}\n\n {{ run_hooks(post_hooks) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_hooks", "macro.dbt.handle_existing_table", "macro.dbt.should_full_refresh", "macro.dbt.statement", "macro.dbt.get_create_view_as_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0234811, "supported_languages": null}, "macro.dbt.get_create_view_as_sql": {"name": "get_create_view_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.get_create_view_as_sql", "macro_sql": "{% macro get_create_view_as_sql(relation, sql) -%}\n {{ adapter.dispatch('get_create_view_as_sql', 'dbt')(relation, sql) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_create_view_as_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0243561, "supported_languages": null}, "macro.dbt.default__get_create_view_as_sql": {"name": "default__get_create_view_as_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.default__get_create_view_as_sql", "macro_sql": "{% macro default__get_create_view_as_sql(relation, sql) -%}\n {{ return(create_view_as(relation, sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0248141, "supported_languages": null}, "macro.dbt.create_view_as": {"name": "create_view_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.create_view_as", "macro_sql": "{% macro create_view_as(relation, sql) -%}\n {{ adapter.dispatch('create_view_as', 'dbt')(relation, sql) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_view_as"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.025273, "supported_languages": null}, "macro.dbt.default__create_view_as": {"name": "default__create_view_as", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/models/view/create_view_as.sql", "original_file_path": "macros/materializations/models/view/create_view_as.sql", "unique_id": "macro.dbt.default__create_view_as", "macro_sql": "{% macro default__create_view_as(relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n create view {{ relation }} as (\n {{ sql }}\n );\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.025913, "supported_languages": null}, "macro.dbt.materialization_seed_default": {"name": "materialization_seed_default", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/seed.sql", "original_file_path": "macros/materializations/seeds/seed.sql", "unique_id": "macro.dbt.materialization_seed_default", "macro_sql": "{% materialization seed, default %}\n\n {%- set identifier = model['alias'] -%}\n {%- set full_refresh_mode = (should_full_refresh()) -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n\n {%- set exists_as_table = (old_relation is not none and old_relation.is_table) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set grant_config = config.get('grants') -%}\n {%- set agate_table = load_agate_table() -%}\n -- grab current tables grants config for comparision later on\n\n {%- do store_result('agate_table', response='OK', agate_table=agate_table) -%}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% set create_table_sql = \"\" %}\n {% if exists_as_view %}\n {{ exceptions.raise_compiler_error(\"Cannot seed to '{}', it is a view\".format(old_relation)) }}\n {% elif exists_as_table %}\n {% set create_table_sql = reset_csv_table(model, full_refresh_mode, old_relation, agate_table) %}\n {% else %}\n {% set create_table_sql = create_csv_table(model, agate_table) %}\n {% endif %}\n\n {% set code = 'CREATE' if full_refresh_mode else 'INSERT' %}\n {% set rows_affected = (agate_table.rows | length) %}\n {% set sql = load_csv_rows(model, agate_table) %}\n\n {% call noop_statement('main', code ~ ' ' ~ rows_affected, code, rows_affected) %}\n {{ get_csv_sql(create_table_sql, sql) }};\n {% endcall %}\n\n {% set target_relation = this.incorporate(type='table') %}\n\n {% set should_revoke = should_revoke(old_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if full_refresh_mode or not exists_as_table %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", "depends_on": {"macros": ["macro.dbt.should_full_refresh", "macro.dbt.run_hooks", "macro.dbt.reset_csv_table", "macro.dbt.create_csv_table", "macro.dbt.load_csv_rows", "macro.dbt.noop_statement", "macro.dbt.get_csv_sql", "macro.dbt.should_revoke", "macro.dbt.apply_grants", "macro.dbt.persist_docs", "macro.dbt.create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.033872, "supported_languages": ["sql"]}, "macro.dbt.create_csv_table": {"name": "create_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.create_csv_table", "macro_sql": "{% macro create_csv_table(model, agate_table) -%}\n {{ adapter.dispatch('create_csv_table', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0429308, "supported_languages": null}, "macro.dbt.default__create_csv_table": {"name": "default__create_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__create_csv_table", "macro_sql": "{% macro default__create_csv_table(model, agate_table) %}\n {%- set column_override = model['config'].get('column_types', {}) -%}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n\n {% set sql %}\n create table {{ this.render() }} (\n {%- for col_name in agate_table.column_names -%}\n {%- set inferred_type = adapter.convert_type(agate_table, loop.index0) -%}\n {%- set type = column_override.get(col_name, inferred_type) -%}\n {%- set column_name = (col_name | string) -%}\n {{ adapter.quote_seed_column(column_name, quote_seed_column) }} {{ type }} {%- if not loop.last -%}, {%- endif -%}\n {%- endfor -%}\n )\n {% endset %}\n\n {% call statement('_') -%}\n {{ sql }}\n {%- endcall %}\n\n {{ return(sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0452118, "supported_languages": null}, "macro.dbt.reset_csv_table": {"name": "reset_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.reset_csv_table", "macro_sql": "{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}\n {{ adapter.dispatch('reset_csv_table', 'dbt')(model, full_refresh, old_relation, agate_table) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__reset_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0458, "supported_languages": null}, "macro.dbt.default__reset_csv_table": {"name": "default__reset_csv_table", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__reset_csv_table", "macro_sql": "{% macro default__reset_csv_table(model, full_refresh, old_relation, agate_table) %}\n {% set sql = \"\" %}\n {% if full_refresh %}\n {{ adapter.drop_relation(old_relation) }}\n {% set sql = create_csv_table(model, agate_table) %}\n {% else %}\n {{ adapter.truncate_relation(old_relation) }}\n {% set sql = \"truncate table \" ~ old_relation %}\n {% endif %}\n\n {{ return(sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.create_csv_table"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.047039, "supported_languages": null}, "macro.dbt.get_csv_sql": {"name": "get_csv_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_csv_sql", "macro_sql": "{% macro get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ adapter.dispatch('get_csv_sql', 'dbt')(create_or_truncate_sql, insert_sql) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_csv_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.047529, "supported_languages": null}, "macro.dbt.default__get_csv_sql": {"name": "default__get_csv_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__get_csv_sql", "macro_sql": "{% macro default__get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ create_or_truncate_sql }};\n -- dbt seed --\n {{ insert_sql }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.047877, "supported_languages": null}, "macro.dbt.get_binding_char": {"name": "get_binding_char", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_binding_char", "macro_sql": "{% macro get_binding_char() -%}\n {{ adapter.dispatch('get_binding_char', 'dbt')() }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0482402, "supported_languages": null}, "macro.dbt.default__get_binding_char": {"name": "default__get_binding_char", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__get_binding_char", "macro_sql": "{% macro default__get_binding_char() %}\n {{ return('%s') }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.048544, "supported_languages": null}, "macro.dbt.get_batch_size": {"name": "get_batch_size", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_batch_size", "macro_sql": "{% macro get_batch_size() -%}\n {{ return(adapter.dispatch('get_batch_size', 'dbt')()) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_batch_size"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.048952, "supported_languages": null}, "macro.dbt.default__get_batch_size": {"name": "default__get_batch_size", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__get_batch_size", "macro_sql": "{% macro default__get_batch_size() %}\n {{ return(10000) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0492558, "supported_languages": null}, "macro.dbt.get_seed_column_quoted_csv": {"name": "get_seed_column_quoted_csv", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.get_seed_column_quoted_csv", "macro_sql": "{% macro get_seed_column_quoted_csv(model, column_names) %}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote_seed_column(col, quote_seed_column)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0504608, "supported_languages": null}, "macro.dbt.load_csv_rows": {"name": "load_csv_rows", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.load_csv_rows", "macro_sql": "{% macro load_csv_rows(model, agate_table) -%}\n {{ adapter.dispatch('load_csv_rows', 'dbt')(model, agate_table) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__load_csv_rows"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.050931, "supported_languages": null}, "macro.dbt.default__load_csv_rows": {"name": "default__load_csv_rows", "resource_type": "macro", "package_name": "dbt", "path": "macros/materializations/seeds/helpers.sql", "original_file_path": "macros/materializations/seeds/helpers.sql", "unique_id": "macro.dbt.default__load_csv_rows", "macro_sql": "{% macro default__load_csv_rows(model, agate_table) %}\n\n {% set batch_size = get_batch_size() %}\n\n {% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}\n {% set bindings = [] %}\n\n {% set statements = [] %}\n\n {% for chunk in agate_table.rows | batch(batch_size) %}\n {% set bindings = [] %}\n\n {% for row in chunk %}\n {% do bindings.extend(row) %}\n {% endfor %}\n\n {% set sql %}\n insert into {{ this.render() }} ({{ cols_sql }}) values\n {% for row in chunk -%}\n ({%- for column in agate_table.column_names -%}\n {{ get_binding_char() }}\n {%- if not loop.last%},{%- endif %}\n {%- endfor -%})\n {%- if not loop.last%},{%- endif %}\n {%- endfor %}\n {% endset %}\n\n {% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}\n\n {% if loop.index0 == 0 %}\n {% do statements.append(sql) %}\n {% endif %}\n {% endfor %}\n\n {# Return SQL so we can render it out into the compiled files #}\n {{ return(statements[0]) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_batch_size", "macro.dbt.get_seed_column_quoted_csv", "macro.dbt.get_binding_char"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0541089, "supported_languages": null}, "macro.dbt.generate_alias_name": {"name": "generate_alias_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "unique_id": "macro.dbt.generate_alias_name", "macro_sql": "{% macro generate_alias_name(custom_alias_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_alias_name', 'dbt')(custom_alias_name, node)) %}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__generate_alias_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0551748, "supported_languages": null}, "macro.dbt.default__generate_alias_name": {"name": "default__generate_alias_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_alias.sql", "original_file_path": "macros/get_custom_name/get_custom_alias.sql", "unique_id": "macro.dbt.default__generate_alias_name", "macro_sql": "{% macro default__generate_alias_name(custom_alias_name=none, node=none) -%}\n\n {%- if custom_alias_name is none -%}\n\n {{ node.name }}\n\n {%- else -%}\n\n {{ custom_alias_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.055804, "supported_languages": null}, "macro.dbt.generate_schema_name": {"name": "generate_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "unique_id": "macro.dbt.generate_schema_name", "macro_sql": "{% macro generate_schema_name(custom_schema_name=none, node=none) -%}\n {{ return(adapter.dispatch('generate_schema_name', 'dbt')(custom_schema_name, node)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__generate_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0569532, "supported_languages": null}, "macro.dbt.default__generate_schema_name": {"name": "default__generate_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "unique_id": "macro.dbt.default__generate_schema_name", "macro_sql": "{% macro default__generate_schema_name(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if custom_schema_name is none -%}\n\n {{ default_schema }}\n\n {%- else -%}\n\n {{ default_schema }}_{{ custom_schema_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.05763, "supported_languages": null}, "macro.dbt.generate_schema_name_for_env": {"name": "generate_schema_name_for_env", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_schema.sql", "original_file_path": "macros/get_custom_name/get_custom_schema.sql", "unique_id": "macro.dbt.generate_schema_name_for_env", "macro_sql": "{% macro generate_schema_name_for_env(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if target.name == 'prod' and custom_schema_name is not none -%}\n\n {{ custom_schema_name | trim }}\n\n {%- else -%}\n\n {{ default_schema }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0583591, "supported_languages": null}, "macro.dbt.generate_database_name": {"name": "generate_database_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "unique_id": "macro.dbt.generate_database_name", "macro_sql": "{% macro generate_database_name(custom_database_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_database_name', 'dbt')(custom_database_name, node)) %}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__generate_database_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.059273, "supported_languages": null}, "macro.dbt.default__generate_database_name": {"name": "default__generate_database_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/get_custom_name/get_custom_database.sql", "original_file_path": "macros/get_custom_name/get_custom_database.sql", "unique_id": "macro.dbt.default__generate_database_name", "macro_sql": "{% macro default__generate_database_name(custom_database_name=none, node=none) -%}\n {%- set default_database = target.database -%}\n {%- if custom_database_name is none -%}\n\n {{ default_database }}\n\n {%- else -%}\n\n {{ custom_database_name }}\n\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0599208, "supported_languages": null}, "macro.dbt.default__test_relationships": {"name": "default__test_relationships", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/relationships.sql", "original_file_path": "macros/generic_test_sql/relationships.sql", "unique_id": "macro.dbt.default__test_relationships", "macro_sql": "{% macro default__test_relationships(model, column_name, to, field) %}\n\nwith child as (\n select {{ column_name }} as from_field\n from {{ model }}\n where {{ column_name }} is not null\n),\n\nparent as (\n select {{ field }} as to_field\n from {{ to }}\n)\n\nselect\n from_field\n\nfrom child\nleft join parent\n on child.from_field = parent.to_field\n\nwhere parent.to_field is null\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.060704, "supported_languages": null}, "macro.dbt.default__test_not_null": {"name": "default__test_not_null", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/not_null.sql", "original_file_path": "macros/generic_test_sql/not_null.sql", "unique_id": "macro.dbt.default__test_not_null", "macro_sql": "{% macro default__test_not_null(model, column_name) %}\n\n{% set column_list = '*' if should_store_failures() else column_name %}\n\nselect {{ column_list }}\nfrom {{ model }}\nwhere {{ column_name }} is null\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.should_store_failures"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.061424, "supported_languages": null}, "macro.dbt.default__test_unique": {"name": "default__test_unique", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/unique.sql", "original_file_path": "macros/generic_test_sql/unique.sql", "unique_id": "macro.dbt.default__test_unique", "macro_sql": "{% macro default__test_unique(model, column_name) %}\n\nselect\n {{ column_name }} as unique_field,\n count(*) as n_records\n\nfrom {{ model }}\nwhere {{ column_name }} is not null\ngroup by {{ column_name }}\nhaving count(*) > 1\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.062015, "supported_languages": null}, "macro.dbt.default__test_accepted_values": {"name": "default__test_accepted_values", "resource_type": "macro", "package_name": "dbt", "path": "macros/generic_test_sql/accepted_values.sql", "original_file_path": "macros/generic_test_sql/accepted_values.sql", "unique_id": "macro.dbt.default__test_accepted_values", "macro_sql": "{% macro default__test_accepted_values(model, column_name, values, quote=True) %}\n\nwith all_values as (\n\n select\n {{ column_name }} as value_field,\n count(*) as n_records\n\n from {{ model }}\n group by {{ column_name }}\n\n)\n\nselect *\nfrom all_values\nwhere value_field not in (\n {% for value in values -%}\n {% if quote -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif -%}\n {%- if not loop.last -%},{%- endif %}\n {%- endfor %}\n)\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.063346, "supported_languages": null}, "macro.dbt.statement": {"name": "statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "unique_id": "macro.dbt.statement", "macro_sql": "\n{%- macro statement(name=None, fetch_result=False, auto_begin=True, language='sql') -%}\n {%- if execute: -%}\n {%- set compiled_code = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime {} for node \"{}\"'.format(language, model['unique_id'])) }}\n {{ write(compiled_code) }}\n {%- endif -%}\n {%- if language == 'sql'-%}\n {%- set res, table = adapter.execute(compiled_code, auto_begin=auto_begin, fetch=fetch_result) -%}\n {%- elif language == 'python' -%}\n {%- set res = submit_python_job(model, compiled_code) -%}\n {#-- TODO: What should table be for python models? --#}\n {%- set table = None -%}\n {%- else -%}\n {% do exceptions.raise_compiler_error(\"statement macro didn't get supported language\") %}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_result(name, response=res, agate_table=table) }}\n {%- endif -%}\n\n {%- endif -%}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.066719, "supported_languages": null}, "macro.dbt.noop_statement": {"name": "noop_statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "unique_id": "macro.dbt.noop_statement", "macro_sql": "{% macro noop_statement(name=None, message=None, code=None, rows_affected=None, res=None) -%}\n {%- set sql = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime SQL for node \"{}\"'.format(model['unique_id'])) }}\n {{ write(sql) }}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_raw_result(name, message=message, code=code, rows_affected=rows_affected, agate_table=res) }}\n {%- endif -%}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.068168, "supported_languages": null}, "macro.dbt.run_query": {"name": "run_query", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/statement.sql", "original_file_path": "macros/etc/statement.sql", "unique_id": "macro.dbt.run_query", "macro_sql": "{% macro run_query(sql) %}\n {% call statement(\"run_query_statement\", fetch_result=true, auto_begin=false) %}\n {{ sql }}\n {% endcall %}\n\n {% do return(load_result(\"run_query_statement\").table) %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.068917, "supported_languages": null}, "macro.dbt.convert_datetime": {"name": "convert_datetime", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.convert_datetime", "macro_sql": "{% macro convert_datetime(date_str, date_fmt) %}\n\n {% set error_msg -%}\n The provided partition date '{{ date_str }}' does not match the expected format '{{ date_fmt }}'\n {%- endset %}\n\n {% set res = try_or_compiler_error(error_msg, modules.datetime.datetime.strptime, date_str.strip(), date_fmt) %}\n {{ return(res) }}\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.072692, "supported_languages": null}, "macro.dbt.dates_in_range": {"name": "dates_in_range", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.dates_in_range", "macro_sql": "{% macro dates_in_range(start_date_str, end_date_str=none, in_fmt=\"%Y%m%d\", out_fmt=\"%Y%m%d\") %}\n {% set end_date_str = start_date_str if end_date_str is none else end_date_str %}\n\n {% set start_date = convert_datetime(start_date_str, in_fmt) %}\n {% set end_date = convert_datetime(end_date_str, in_fmt) %}\n\n {% set day_count = (end_date - start_date).days %}\n {% if day_count < 0 %}\n {% set msg -%}\n Partiton start date is after the end date ({{ start_date }}, {{ end_date }})\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg, model) }}\n {% endif %}\n\n {% set date_list = [] %}\n {% for i in range(0, day_count + 1) %}\n {% set the_date = (modules.datetime.timedelta(days=i) + start_date) %}\n {% if not out_fmt %}\n {% set _ = date_list.append(the_date) %}\n {% else %}\n {% set _ = date_list.append(the_date.strftime(out_fmt)) %}\n {% endif %}\n {% endfor %}\n\n {{ return(date_list) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.convert_datetime"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0760698, "supported_languages": null}, "macro.dbt.partition_range": {"name": "partition_range", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.partition_range", "macro_sql": "{% macro partition_range(raw_partition_date, date_fmt='%Y%m%d') %}\n {% set partition_range = (raw_partition_date | string).split(\",\") %}\n\n {% if (partition_range | length) == 1 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = none %}\n {% elif (partition_range | length) == 2 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = partition_range[1] %}\n {% else %}\n {{ exceptions.raise_compiler_error(\"Invalid partition time. Expected format: {Start Date}[,{End Date}]. Got: \" ~ raw_partition_date) }}\n {% endif %}\n\n {{ return(dates_in_range(start_date, end_date, in_fmt=date_fmt)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.dates_in_range"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.077999, "supported_languages": null}, "macro.dbt.py_current_timestring": {"name": "py_current_timestring", "resource_type": "macro", "package_name": "dbt", "path": "macros/etc/datetime.sql", "original_file_path": "macros/etc/datetime.sql", "unique_id": "macro.dbt.py_current_timestring", "macro_sql": "{% macro py_current_timestring() %}\n {% set dt = modules.datetime.datetime.now() %}\n {% do return(dt.strftime(\"%Y%m%d%H%M%S%f\")) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.078628, "supported_languages": null}, "macro.dbt.except": {"name": "except", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "unique_id": "macro.dbt.except", "macro_sql": "{% macro except() %}\n {{ return(adapter.dispatch('except', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__except"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.079215, "supported_languages": null}, "macro.dbt.default__except": {"name": "default__except", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/except.sql", "original_file_path": "macros/utils/except.sql", "unique_id": "macro.dbt.default__except", "macro_sql": "{% macro default__except() %}\n\n except\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.079419, "supported_languages": null}, "macro.dbt.replace": {"name": "replace", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "unique_id": "macro.dbt.replace", "macro_sql": "{% macro replace(field, old_chars, new_chars) -%}\n {{ return(adapter.dispatch('replace', 'dbt') (field, old_chars, new_chars)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__replace"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.080247, "supported_languages": null}, "macro.dbt.default__replace": {"name": "default__replace", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/replace.sql", "original_file_path": "macros/utils/replace.sql", "unique_id": "macro.dbt.default__replace", "macro_sql": "{% macro default__replace(field, old_chars, new_chars) %}\n\n replace(\n {{ field }},\n {{ old_chars }},\n {{ new_chars }}\n )\n\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.080682, "supported_languages": null}, "macro.dbt.concat": {"name": "concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "unique_id": "macro.dbt.concat", "macro_sql": "{% macro concat(fields) -%}\n {{ return(adapter.dispatch('concat', 'dbt')(fields)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.081283, "supported_languages": null}, "macro.dbt.default__concat": {"name": "default__concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/concat.sql", "original_file_path": "macros/utils/concat.sql", "unique_id": "macro.dbt.default__concat", "macro_sql": "{% macro default__concat(fields) -%}\n {{ fields|join(' || ') }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0816069, "supported_languages": null}, "macro.dbt.length": {"name": "length", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "unique_id": "macro.dbt.length", "macro_sql": "{% macro length(expression) -%}\n {{ return(adapter.dispatch('length', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__length"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.082229, "supported_languages": null}, "macro.dbt.default__length": {"name": "default__length", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/length.sql", "original_file_path": "macros/utils/length.sql", "unique_id": "macro.dbt.default__length", "macro_sql": "{% macro default__length(expression) %}\n\n length(\n {{ expression }}\n )\n\n{%- endmacro -%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.082507, "supported_languages": null}, "macro.dbt.dateadd": {"name": "dateadd", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "unique_id": "macro.dbt.dateadd", "macro_sql": "{% macro dateadd(datepart, interval, from_date_or_timestamp) %}\n {{ return(adapter.dispatch('dateadd', 'dbt')(datepart, interval, from_date_or_timestamp)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__dateadd"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.083297, "supported_languages": null}, "macro.dbt.default__dateadd": {"name": "default__dateadd", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/dateadd.sql", "original_file_path": "macros/utils/dateadd.sql", "unique_id": "macro.dbt.default__dateadd", "macro_sql": "{% macro default__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n dateadd(\n {{ datepart }},\n {{ interval }},\n {{ from_date_or_timestamp }}\n )\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0837162, "supported_languages": null}, "macro.dbt.intersect": {"name": "intersect", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "unique_id": "macro.dbt.intersect", "macro_sql": "{% macro intersect() %}\n {{ return(adapter.dispatch('intersect', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__intersect"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.084266, "supported_languages": null}, "macro.dbt.default__intersect": {"name": "default__intersect", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/intersect.sql", "original_file_path": "macros/utils/intersect.sql", "unique_id": "macro.dbt.default__intersect", "macro_sql": "{% macro default__intersect() %}\n\n intersect\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0844731, "supported_languages": null}, "macro.dbt.escape_single_quotes": {"name": "escape_single_quotes", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "unique_id": "macro.dbt.escape_single_quotes", "macro_sql": "{% macro escape_single_quotes(expression) %}\n {{ return(adapter.dispatch('escape_single_quotes', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__escape_single_quotes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.08511, "supported_languages": null}, "macro.dbt.default__escape_single_quotes": {"name": "default__escape_single_quotes", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/escape_single_quotes.sql", "original_file_path": "macros/utils/escape_single_quotes.sql", "unique_id": "macro.dbt.default__escape_single_quotes", "macro_sql": "{% macro default__escape_single_quotes(expression) -%}\n{{ expression | replace(\"'\",\"''\") }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.085469, "supported_languages": null}, "macro.dbt.right": {"name": "right", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "unique_id": "macro.dbt.right", "macro_sql": "{% macro right(string_text, length_expression) -%}\n {{ return(adapter.dispatch('right', 'dbt') (string_text, length_expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__right"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.086175, "supported_languages": null}, "macro.dbt.default__right": {"name": "default__right", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/right.sql", "original_file_path": "macros/utils/right.sql", "unique_id": "macro.dbt.default__right", "macro_sql": "{% macro default__right(string_text, length_expression) %}\n\n right(\n {{ string_text }},\n {{ length_expression }}\n )\n\n{%- endmacro -%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0865312, "supported_languages": null}, "macro.dbt.listagg": {"name": "listagg", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "unique_id": "macro.dbt.listagg", "macro_sql": "{% macro listagg(measure, delimiter_text=\"','\", order_by_clause=none, limit_num=none) -%}\n {{ return(adapter.dispatch('listagg', 'dbt') (measure, delimiter_text, order_by_clause, limit_num)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__listagg"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.087887, "supported_languages": null}, "macro.dbt.default__listagg": {"name": "default__listagg", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/listagg.sql", "original_file_path": "macros/utils/listagg.sql", "unique_id": "macro.dbt.default__listagg", "macro_sql": "{% macro default__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n array_slice(\n array_agg(\n {{ measure }}\n ){% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n ,0\n ,{{ limit_num }}\n ),\n {{ delimiter_text }}\n )\n {%- else %}\n listagg(\n {{ measure }},\n {{ delimiter_text }}\n )\n {% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n {%- endif %}\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.089178, "supported_languages": null}, "macro.dbt.datediff": {"name": "datediff", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "unique_id": "macro.dbt.datediff", "macro_sql": "{% macro datediff(first_date, second_date, datepart) %}\n {{ return(adapter.dispatch('datediff', 'dbt')(first_date, second_date, datepart)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__datediff"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.090015, "supported_languages": null}, "macro.dbt.default__datediff": {"name": "default__datediff", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/datediff.sql", "original_file_path": "macros/utils/datediff.sql", "unique_id": "macro.dbt.default__datediff", "macro_sql": "{% macro default__datediff(first_date, second_date, datepart) -%}\n\n datediff(\n {{ datepart }},\n {{ first_date }},\n {{ second_date }}\n )\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0904791, "supported_languages": null}, "macro.dbt.safe_cast": {"name": "safe_cast", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "unique_id": "macro.dbt.safe_cast", "macro_sql": "{% macro safe_cast(field, type) %}\n {{ return(adapter.dispatch('safe_cast', 'dbt') (field, type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__safe_cast"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.091284, "supported_languages": null}, "macro.dbt.default__safe_cast": {"name": "default__safe_cast", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/safe_cast.sql", "original_file_path": "macros/utils/safe_cast.sql", "unique_id": "macro.dbt.default__safe_cast", "macro_sql": "{% macro default__safe_cast(field, type) %}\n {# most databases don't support this function yet\n so we just need to use cast #}\n cast({{field}} as {{type}})\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0916579, "supported_languages": null}, "macro.dbt.hash": {"name": "hash", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "unique_id": "macro.dbt.hash", "macro_sql": "{% macro hash(field) -%}\n {{ return(adapter.dispatch('hash', 'dbt') (field)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__hash"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0922902, "supported_languages": null}, "macro.dbt.default__hash": {"name": "default__hash", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/hash.sql", "original_file_path": "macros/utils/hash.sql", "unique_id": "macro.dbt.default__hash", "macro_sql": "{% macro default__hash(field) -%}\n md5(cast({{ field }} as {{ api.Column.translate_type('string') }}))\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0927832, "supported_languages": null}, "macro.dbt.cast_bool_to_text": {"name": "cast_bool_to_text", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "unique_id": "macro.dbt.cast_bool_to_text", "macro_sql": "{% macro cast_bool_to_text(field) %}\n {{ adapter.dispatch('cast_bool_to_text', 'dbt') (field) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__cast_bool_to_text"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.093413, "supported_languages": null}, "macro.dbt.default__cast_bool_to_text": {"name": "default__cast_bool_to_text", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/cast_bool_to_text.sql", "original_file_path": "macros/utils/cast_bool_to_text.sql", "unique_id": "macro.dbt.default__cast_bool_to_text", "macro_sql": "{% macro default__cast_bool_to_text(field) %}\n cast({{ field }} as {{ api.Column.translate_type('string') }})\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0938308, "supported_languages": null}, "macro.dbt.any_value": {"name": "any_value", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "unique_id": "macro.dbt.any_value", "macro_sql": "{% macro any_value(expression) -%}\n {{ return(adapter.dispatch('any_value', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__any_value"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.094443, "supported_languages": null}, "macro.dbt.default__any_value": {"name": "default__any_value", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/any_value.sql", "original_file_path": "macros/utils/any_value.sql", "unique_id": "macro.dbt.default__any_value", "macro_sql": "{% macro default__any_value(expression) -%}\n\n any_value({{ expression }})\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.094721, "supported_languages": null}, "macro.dbt.position": {"name": "position", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "unique_id": "macro.dbt.position", "macro_sql": "{% macro position(substring_text, string_text) -%}\n {{ return(adapter.dispatch('position', 'dbt') (substring_text, string_text)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__position"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.095426, "supported_languages": null}, "macro.dbt.default__position": {"name": "default__position", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/position.sql", "original_file_path": "macros/utils/position.sql", "unique_id": "macro.dbt.default__position", "macro_sql": "{% macro default__position(substring_text, string_text) %}\n\n position(\n {{ substring_text }} in {{ string_text }}\n )\n\n{%- endmacro -%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.095783, "supported_languages": null}, "macro.dbt.string_literal": {"name": "string_literal", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "unique_id": "macro.dbt.string_literal", "macro_sql": "{%- macro string_literal(value) -%}\n {{ return(adapter.dispatch('string_literal', 'dbt') (value)) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__string_literal"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.096468, "supported_languages": null}, "macro.dbt.default__string_literal": {"name": "default__string_literal", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/literal.sql", "original_file_path": "macros/utils/literal.sql", "unique_id": "macro.dbt.default__string_literal", "macro_sql": "{% macro default__string_literal(value) -%}\n '{{ value }}'\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.096813, "supported_languages": null}, "macro.dbt.type_string": {"name": "type_string", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_string", "macro_sql": "\n\n{%- macro type_string() -%}\n {{ return(adapter.dispatch('type_string', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_string"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.098714, "supported_languages": null}, "macro.dbt.default__type_string": {"name": "default__type_string", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_string", "macro_sql": "{% macro default__type_string() %}\n {{ return(api.Column.translate_type(\"string\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.099203, "supported_languages": null}, "macro.dbt.type_timestamp": {"name": "type_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_timestamp", "macro_sql": "\n\n{%- macro type_timestamp() -%}\n {{ return(adapter.dispatch('type_timestamp', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.0996249, "supported_languages": null}, "macro.dbt.default__type_timestamp": {"name": "default__type_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_timestamp", "macro_sql": "{% macro default__type_timestamp() %}\n {{ return(api.Column.translate_type(\"timestamp\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1000152, "supported_languages": null}, "macro.dbt.type_float": {"name": "type_float", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_float", "macro_sql": "\n\n{%- macro type_float() -%}\n {{ return(adapter.dispatch('type_float', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_float"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.100458, "supported_languages": null}, "macro.dbt.default__type_float": {"name": "default__type_float", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_float", "macro_sql": "{% macro default__type_float() %}\n {{ return(api.Column.translate_type(\"float\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.10098, "supported_languages": null}, "macro.dbt.type_numeric": {"name": "type_numeric", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_numeric", "macro_sql": "\n\n{%- macro type_numeric() -%}\n {{ return(adapter.dispatch('type_numeric', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_numeric"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.101395, "supported_languages": null}, "macro.dbt.default__type_numeric": {"name": "default__type_numeric", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_numeric", "macro_sql": "{% macro default__type_numeric() %}\n {{ return(api.Column.numeric_type(\"numeric\", 28, 6)) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.101847, "supported_languages": null}, "macro.dbt.type_bigint": {"name": "type_bigint", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_bigint", "macro_sql": "\n\n{%- macro type_bigint() -%}\n {{ return(adapter.dispatch('type_bigint', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_bigint"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1022651, "supported_languages": null}, "macro.dbt.default__type_bigint": {"name": "default__type_bigint", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_bigint", "macro_sql": "{% macro default__type_bigint() %}\n {{ return(api.Column.translate_type(\"bigint\")) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.102656, "supported_languages": null}, "macro.dbt.type_int": {"name": "type_int", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_int", "macro_sql": "\n\n{%- macro type_int() -%}\n {{ return(adapter.dispatch('type_int', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_int"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.103071, "supported_languages": null}, "macro.dbt.default__type_int": {"name": "default__type_int", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_int", "macro_sql": "{%- macro default__type_int() -%}\n {{ return(api.Column.translate_type(\"integer\")) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.103501, "supported_languages": null}, "macro.dbt.type_boolean": {"name": "type_boolean", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.type_boolean", "macro_sql": "\n\n{%- macro type_boolean() -%}\n {{ return(adapter.dispatch('type_boolean', 'dbt')()) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.default__type_boolean"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.103938, "supported_languages": null}, "macro.dbt.default__type_boolean": {"name": "default__type_boolean", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/data_types.sql", "original_file_path": "macros/utils/data_types.sql", "unique_id": "macro.dbt.default__type_boolean", "macro_sql": "{%- macro default__type_boolean() -%}\n {{ return(api.Column.translate_type(\"boolean\")) }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.104328, "supported_languages": null}, "macro.dbt.array_concat": {"name": "array_concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "unique_id": "macro.dbt.array_concat", "macro_sql": "{% macro array_concat(array_1, array_2) -%}\n {{ return(adapter.dispatch('array_concat', 'dbt')(array_1, array_2)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__array_concat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.105006, "supported_languages": null}, "macro.dbt.default__array_concat": {"name": "default__array_concat", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_concat.sql", "original_file_path": "macros/utils/array_concat.sql", "unique_id": "macro.dbt.default__array_concat", "macro_sql": "{% macro default__array_concat(array_1, array_2) -%}\n array_cat({{ array_1 }}, {{ array_2 }})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.105355, "supported_languages": null}, "macro.dbt.bool_or": {"name": "bool_or", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "unique_id": "macro.dbt.bool_or", "macro_sql": "{% macro bool_or(expression) -%}\n {{ return(adapter.dispatch('bool_or', 'dbt') (expression)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__bool_or"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1059608, "supported_languages": null}, "macro.dbt.default__bool_or": {"name": "default__bool_or", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/bool_or.sql", "original_file_path": "macros/utils/bool_or.sql", "unique_id": "macro.dbt.default__bool_or", "macro_sql": "{% macro default__bool_or(expression) -%}\n\n bool_or({{ expression }})\n\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1062348, "supported_languages": null}, "macro.dbt.last_day": {"name": "last_day", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt.last_day", "macro_sql": "{% macro last_day(date, datepart) %}\n {{ return(adapter.dispatch('last_day', 'dbt') (date, datepart)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.107027, "supported_languages": null}, "macro.dbt.default_last_day": {"name": "default_last_day", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt.default_last_day", "macro_sql": "\n\n{%- macro default_last_day(date, datepart) -%}\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd(datepart, '1', dbt.date_trunc(datepart, date))\n )}}\n as date)\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1076999, "supported_languages": null}, "macro.dbt.default__last_day": {"name": "default__last_day", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/last_day.sql", "original_file_path": "macros/utils/last_day.sql", "unique_id": "macro.dbt.default__last_day", "macro_sql": "{% macro default__last_day(date, datepart) -%}\n {{dbt.default_last_day(date, datepart)}}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default_last_day"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.108077, "supported_languages": null}, "macro.dbt.split_part": {"name": "split_part", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt.split_part", "macro_sql": "{% macro split_part(string_text, delimiter_text, part_number) %}\n {{ return(adapter.dispatch('split_part', 'dbt') (string_text, delimiter_text, part_number)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__split_part"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1092212, "supported_languages": null}, "macro.dbt.default__split_part": {"name": "default__split_part", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt.default__split_part", "macro_sql": "{% macro default__split_part(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n {{ part_number }}\n )\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.109689, "supported_languages": null}, "macro.dbt._split_part_negative": {"name": "_split_part_negative", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/split_part.sql", "original_file_path": "macros/utils/split_part.sql", "unique_id": "macro.dbt._split_part_negative", "macro_sql": "{% macro _split_part_negative(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n length({{ string_text }})\n - length(\n replace({{ string_text }}, {{ delimiter_text }}, '')\n ) + 2 {{ part_number }}\n )\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.11041, "supported_languages": null}, "macro.dbt.date_trunc": {"name": "date_trunc", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "unique_id": "macro.dbt.date_trunc", "macro_sql": "{% macro date_trunc(datepart, date) -%}\n {{ return(adapter.dispatch('date_trunc', 'dbt') (datepart, date)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__date_trunc"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.111156, "supported_languages": null}, "macro.dbt.default__date_trunc": {"name": "default__date_trunc", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/date_trunc.sql", "original_file_path": "macros/utils/date_trunc.sql", "unique_id": "macro.dbt.default__date_trunc", "macro_sql": "{% macro default__date_trunc(datepart, date) -%}\n date_trunc('{{datepart}}', {{date}})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1115131, "supported_languages": null}, "macro.dbt.array_construct": {"name": "array_construct", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "unique_id": "macro.dbt.array_construct", "macro_sql": "{% macro array_construct(inputs=[], data_type=api.Column.translate_type('integer')) -%}\n {{ return(adapter.dispatch('array_construct', 'dbt')(inputs, data_type)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__array_construct"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1124191, "supported_languages": null}, "macro.dbt.default__array_construct": {"name": "default__array_construct", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_construct.sql", "original_file_path": "macros/utils/array_construct.sql", "unique_id": "macro.dbt.default__array_construct", "macro_sql": "{% macro default__array_construct(inputs, data_type) -%}\n {% if inputs|length > 0 %}\n array[ {{ inputs|join(' , ') }} ]\n {% else %}\n array[]::{{data_type}}[]\n {% endif %}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.113195, "supported_languages": null}, "macro.dbt.array_append": {"name": "array_append", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "unique_id": "macro.dbt.array_append", "macro_sql": "{% macro array_append(array, new_element) -%}\n {{ return(adapter.dispatch('array_append', 'dbt')(array, new_element)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__array_append"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.113871, "supported_languages": null}, "macro.dbt.default__array_append": {"name": "default__array_append", "resource_type": "macro", "package_name": "dbt", "path": "macros/utils/array_append.sql", "original_file_path": "macros/utils/array_append.sql", "unique_id": "macro.dbt.default__array_append", "macro_sql": "{% macro default__array_append(array, new_element) -%}\n array_append({{ array }}, {{ new_element }})\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.114215, "supported_languages": null}, "macro.dbt.create_schema": {"name": "create_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.create_schema", "macro_sql": "{% macro create_schema(relation) -%}\n {{ adapter.dispatch('create_schema', 'dbt')(relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__create_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.115037, "supported_languages": null}, "macro.dbt.default__create_schema": {"name": "default__create_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.default__create_schema", "macro_sql": "{% macro default__create_schema(relation) -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier() }}\n {% endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.115519, "supported_languages": null}, "macro.dbt.drop_schema": {"name": "drop_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.drop_schema", "macro_sql": "{% macro drop_schema(relation) -%}\n {{ adapter.dispatch('drop_schema', 'dbt')(relation) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__drop_schema"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.115935, "supported_languages": null}, "macro.dbt.default__drop_schema": {"name": "default__drop_schema", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/schema.sql", "original_file_path": "macros/adapters/schema.sql", "unique_id": "macro.dbt.default__drop_schema", "macro_sql": "{% macro default__drop_schema(relation) -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier() }} cascade\n {% endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.116411, "supported_languages": null}, "macro.dbt.current_timestamp": {"name": "current_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.current_timestamp", "macro_sql": "{%- macro current_timestamp() -%}\n {{ adapter.dispatch('current_timestamp', 'dbt')() }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.117439, "supported_languages": null}, "macro.dbt.default__current_timestamp": {"name": "default__current_timestamp", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__current_timestamp", "macro_sql": "{% macro default__current_timestamp() -%}\n {{ exceptions.raise_not_implemented(\n 'current_timestamp macro not implemented for adapter ' + adapter.type()) }}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.117823, "supported_languages": null}, "macro.dbt.snapshot_get_time": {"name": "snapshot_get_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.snapshot_get_time", "macro_sql": "\n\n{%- macro snapshot_get_time() -%}\n {{ adapter.dispatch('snapshot_get_time', 'dbt')() }}\n{%- endmacro -%}\n\n", "depends_on": {"macros": ["macro.dbt_postgres.postgres__snapshot_get_time"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.118193, "supported_languages": null}, "macro.dbt.default__snapshot_get_time": {"name": "default__snapshot_get_time", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__snapshot_get_time", "macro_sql": "{% macro default__snapshot_get_time() %}\n {{ current_timestamp() }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.118471, "supported_languages": null}, "macro.dbt.current_timestamp_backcompat": {"name": "current_timestamp_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.current_timestamp_backcompat", "macro_sql": "{% macro current_timestamp_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.118895, "supported_languages": null}, "macro.dbt.default__current_timestamp_backcompat": {"name": "default__current_timestamp_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__current_timestamp_backcompat", "macro_sql": "{% macro default__current_timestamp_backcompat() %}\n current_timestamp::timestamp\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.119096, "supported_languages": null}, "macro.dbt.current_timestamp_in_utc_backcompat": {"name": "current_timestamp_in_utc_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.current_timestamp_in_utc_backcompat", "macro_sql": "{% macro current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_in_utc_backcompat', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__current_timestamp_in_utc_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1195142, "supported_languages": null}, "macro.dbt.default__current_timestamp_in_utc_backcompat": {"name": "default__current_timestamp_in_utc_backcompat", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/timestamps.sql", "original_file_path": "macros/adapters/timestamps.sql", "unique_id": "macro.dbt.default__current_timestamp_in_utc_backcompat", "macro_sql": "{% macro default__current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.current_timestamp_backcompat", "macro.dbt_postgres.postgres__current_timestamp_backcompat"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.119937, "supported_languages": null}, "macro.dbt.get_create_index_sql": {"name": "get_create_index_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.get_create_index_sql", "macro_sql": "{% macro get_create_index_sql(relation, index_dict) -%}\n {{ return(adapter.dispatch('get_create_index_sql', 'dbt')(relation, index_dict)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_create_index_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.120949, "supported_languages": null}, "macro.dbt.default__get_create_index_sql": {"name": "default__get_create_index_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.default__get_create_index_sql", "macro_sql": "{% macro default__get_create_index_sql(relation, index_dict) -%}\n {% do return(None) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.121309, "supported_languages": null}, "macro.dbt.create_indexes": {"name": "create_indexes", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.create_indexes", "macro_sql": "{% macro create_indexes(relation) -%}\n {{ adapter.dispatch('create_indexes', 'dbt')(relation) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.default__create_indexes"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.12189, "supported_languages": null}, "macro.dbt.default__create_indexes": {"name": "default__create_indexes", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/indexes.sql", "original_file_path": "macros/adapters/indexes.sql", "unique_id": "macro.dbt.default__create_indexes", "macro_sql": "{% macro default__create_indexes(relation) -%}\n {%- set _indexes = config.get('indexes', default=[]) -%}\n\n {% for _index_dict in _indexes %}\n {% set create_index_sql = get_create_index_sql(relation, _index_dict) %}\n {% if create_index_sql %}\n {% do run_query(create_index_sql) %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.get_create_index_sql", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1229818, "supported_languages": null}, "macro.dbt.make_intermediate_relation": {"name": "make_intermediate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.make_intermediate_relation", "macro_sql": "{% macro make_intermediate_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_intermediate_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_intermediate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1292732, "supported_languages": null}, "macro.dbt.default__make_intermediate_relation": {"name": "default__make_intermediate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__make_intermediate_relation", "macro_sql": "{% macro default__make_intermediate_relation(base_relation, suffix) %}\n {{ return(default__make_temp_relation(base_relation, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.129865, "supported_languages": null}, "macro.dbt.make_temp_relation": {"name": "make_temp_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.make_temp_relation", "macro_sql": "{% macro make_temp_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_temp_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_temp_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.130422, "supported_languages": null}, "macro.dbt.default__make_temp_relation": {"name": "default__make_temp_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__make_temp_relation", "macro_sql": "{% macro default__make_temp_relation(base_relation, suffix) %}\n {%- set temp_identifier = base_relation.identifier ~ suffix -%}\n {%- set temp_relation = base_relation.incorporate(\n path={\"identifier\": temp_identifier}) -%}\n\n {{ return(temp_relation) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.131156, "supported_languages": null}, "macro.dbt.make_backup_relation": {"name": "make_backup_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.make_backup_relation", "macro_sql": "{% macro make_backup_relation(base_relation, backup_relation_type, suffix='__dbt_backup') %}\n {{ return(adapter.dispatch('make_backup_relation', 'dbt')(base_relation, backup_relation_type, suffix)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__make_backup_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.131776, "supported_languages": null}, "macro.dbt.default__make_backup_relation": {"name": "default__make_backup_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__make_backup_relation", "macro_sql": "{% macro default__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {%- set backup_identifier = base_relation.identifier ~ suffix -%}\n {%- set backup_relation = base_relation.incorporate(\n path={\"identifier\": backup_identifier},\n type=backup_relation_type\n ) -%}\n {{ return(backup_relation) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1325812, "supported_languages": null}, "macro.dbt.drop_relation": {"name": "drop_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.drop_relation", "macro_sql": "{% macro drop_relation(relation) -%}\n {{ return(adapter.dispatch('drop_relation', 'dbt')(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__drop_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.133047, "supported_languages": null}, "macro.dbt.default__drop_relation": {"name": "default__drop_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__drop_relation", "macro_sql": "{% macro default__drop_relation(relation) -%}\n {% call statement('drop_relation', auto_begin=False) -%}\n drop {{ relation.type }} if exists {{ relation }} cascade\n {%- endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.133596, "supported_languages": null}, "macro.dbt.truncate_relation": {"name": "truncate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.truncate_relation", "macro_sql": "{% macro truncate_relation(relation) -%}\n {{ return(adapter.dispatch('truncate_relation', 'dbt')(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__truncate_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.134069, "supported_languages": null}, "macro.dbt.default__truncate_relation": {"name": "default__truncate_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__truncate_relation", "macro_sql": "{% macro default__truncate_relation(relation) -%}\n {% call statement('truncate_relation') -%}\n truncate table {{ relation }}\n {%- endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1344929, "supported_languages": null}, "macro.dbt.rename_relation": {"name": "rename_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.rename_relation", "macro_sql": "{% macro rename_relation(from_relation, to_relation) -%}\n {{ return(adapter.dispatch('rename_relation', 'dbt')(from_relation, to_relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__rename_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.13501, "supported_languages": null}, "macro.dbt.default__rename_relation": {"name": "default__rename_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__rename_relation", "macro_sql": "{% macro default__rename_relation(from_relation, to_relation) -%}\n {% set target_name = adapter.quote_as_configured(to_relation.identifier, 'identifier') %}\n {% call statement('rename_relation') -%}\n alter table {{ from_relation }} rename to {{ target_name }}\n {%- endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.135726, "supported_languages": null}, "macro.dbt.get_or_create_relation": {"name": "get_or_create_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.get_or_create_relation", "macro_sql": "{% macro get_or_create_relation(database, schema, identifier, type) -%}\n {{ return(adapter.dispatch('get_or_create_relation', 'dbt')(database, schema, identifier, type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_or_create_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.136352, "supported_languages": null}, "macro.dbt.default__get_or_create_relation": {"name": "default__get_or_create_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.default__get_or_create_relation", "macro_sql": "{% macro default__get_or_create_relation(database, schema, identifier, type) %}\n {%- set target_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n\n {% if target_relation %}\n {% do return([true, target_relation]) %}\n {% endif %}\n\n {%- set new_relation = api.Relation.create(\n database=database,\n schema=schema,\n identifier=identifier,\n type=type\n ) -%}\n {% do return([false, new_relation]) %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1378748, "supported_languages": null}, "macro.dbt.load_cached_relation": {"name": "load_cached_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.load_cached_relation", "macro_sql": "{% macro load_cached_relation(relation) %}\n {% do return(adapter.get_relation(\n database=relation.database,\n schema=relation.schema,\n identifier=relation.identifier\n )) -%}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1384661, "supported_languages": null}, "macro.dbt.load_relation": {"name": "load_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.load_relation", "macro_sql": "{% macro load_relation(relation) %}\n {{ return(load_cached_relation(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.load_cached_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.138835, "supported_languages": null}, "macro.dbt.drop_relation_if_exists": {"name": "drop_relation_if_exists", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/relation.sql", "original_file_path": "macros/adapters/relation.sql", "unique_id": "macro.dbt.drop_relation_if_exists", "macro_sql": "{% macro drop_relation_if_exists(relation) %}\n {% if relation is not none %}\n {{ adapter.drop_relation(relation) }}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.139338, "supported_languages": null}, "macro.dbt.collect_freshness": {"name": "collect_freshness", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "unique_id": "macro.dbt.collect_freshness", "macro_sql": "{% macro collect_freshness(source, loaded_at_field, filter) %}\n {{ return(adapter.dispatch('collect_freshness', 'dbt')(source, loaded_at_field, filter))}}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__collect_freshness"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.140299, "supported_languages": null}, "macro.dbt.default__collect_freshness": {"name": "default__collect_freshness", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/freshness.sql", "original_file_path": "macros/adapters/freshness.sql", "unique_id": "macro.dbt.default__collect_freshness", "macro_sql": "{% macro default__collect_freshness(source, loaded_at_field, filter) %}\n {% call statement('collect_freshness', fetch_result=True, auto_begin=False) -%}\n select\n max({{ loaded_at_field }}) as max_loaded_at,\n {{ current_timestamp() }} as snapshotted_at\n from {{ source }}\n {% if filter %}\n where {{ filter }}\n {% endif %}\n {% endcall %}\n {{ return(load_result('collect_freshness').table) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement", "macro.dbt.current_timestamp"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.141366, "supported_languages": null}, "macro.dbt.copy_grants": {"name": "copy_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.copy_grants", "macro_sql": "{% macro copy_grants() %}\n {{ return(adapter.dispatch('copy_grants', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.144733, "supported_languages": null}, "macro.dbt.default__copy_grants": {"name": "default__copy_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__copy_grants", "macro_sql": "{% macro default__copy_grants() %}\n {{ return(True) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.14506, "supported_languages": null}, "macro.dbt.support_multiple_grantees_per_dcl_statement": {"name": "support_multiple_grantees_per_dcl_statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.support_multiple_grantees_per_dcl_statement", "macro_sql": "{% macro support_multiple_grantees_per_dcl_statement() %}\n {{ return(adapter.dispatch('support_multiple_grantees_per_dcl_statement', 'dbt')()) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.145495, "supported_languages": null}, "macro.dbt.default__support_multiple_grantees_per_dcl_statement": {"name": "default__support_multiple_grantees_per_dcl_statement", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__support_multiple_grantees_per_dcl_statement", "macro_sql": "\n\n{%- macro default__support_multiple_grantees_per_dcl_statement() -%}\n {{ return(True) }}\n{%- endmacro -%}\n\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1457942, "supported_languages": null}, "macro.dbt.should_revoke": {"name": "should_revoke", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.should_revoke", "macro_sql": "{% macro should_revoke(existing_relation, full_refresh_mode=True) %}\n\n {% if not existing_relation %}\n {#-- The table doesn't already exist, so no grants to copy over --#}\n {{ return(False) }}\n {% elif full_refresh_mode %}\n {#-- The object is being REPLACED -- whether grants are copied over depends on the value of user config --#}\n {{ return(copy_grants()) }}\n {% else %}\n {#-- The table is being merged/upserted/inserted -- grants will be carried over --#}\n {{ return(True) }}\n {% endif %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.copy_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.146819, "supported_languages": null}, "macro.dbt.get_show_grant_sql": {"name": "get_show_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_show_grant_sql", "macro_sql": "{% macro get_show_grant_sql(relation) %}\n {{ return(adapter.dispatch(\"get_show_grant_sql\", \"dbt\")(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_show_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.147292, "supported_languages": null}, "macro.dbt.default__get_show_grant_sql": {"name": "default__get_show_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_show_grant_sql", "macro_sql": "{% macro default__get_show_grant_sql(relation) %}\n show grants on {{ relation }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.147562, "supported_languages": null}, "macro.dbt.get_grant_sql": {"name": "get_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_grant_sql", "macro_sql": "{% macro get_grant_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_grant_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_grant_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1481369, "supported_languages": null}, "macro.dbt.default__get_grant_sql": {"name": "default__get_grant_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_grant_sql", "macro_sql": "\n\n{%- macro default__get_grant_sql(relation, privilege, grantees) -%}\n grant {{ privilege }} on {{ relation }} to {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.148619, "supported_languages": null}, "macro.dbt.get_revoke_sql": {"name": "get_revoke_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_revoke_sql", "macro_sql": "{% macro get_revoke_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_revoke_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_revoke_sql"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1491919, "supported_languages": null}, "macro.dbt.default__get_revoke_sql": {"name": "default__get_revoke_sql", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_revoke_sql", "macro_sql": "\n\n{%- macro default__get_revoke_sql(relation, privilege, grantees) -%}\n revoke {{ privilege }} on {{ relation }} from {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.149673, "supported_languages": null}, "macro.dbt.get_dcl_statement_list": {"name": "get_dcl_statement_list", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.get_dcl_statement_list", "macro_sql": "{% macro get_dcl_statement_list(relation, grant_config, get_dcl_macro) %}\n {{ return(adapter.dispatch('get_dcl_statement_list', 'dbt')(relation, grant_config, get_dcl_macro)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_dcl_statement_list"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1502562, "supported_languages": null}, "macro.dbt.default__get_dcl_statement_list": {"name": "default__get_dcl_statement_list", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__get_dcl_statement_list", "macro_sql": "\n\n{%- macro default__get_dcl_statement_list(relation, grant_config, get_dcl_macro) -%}\n {#\n -- Unpack grant_config into specific privileges and the set of users who need them granted/revoked.\n -- Depending on whether this database supports multiple grantees per statement, pass in the list of\n -- all grantees per privilege, or (if not) template one statement per privilege-grantee pair.\n -- `get_dcl_macro` will be either `get_grant_sql` or `get_revoke_sql`\n #}\n {%- set dcl_statements = [] -%}\n {%- for privilege, grantees in grant_config.items() %}\n {%- if support_multiple_grantees_per_dcl_statement() and grantees -%}\n {%- set dcl = get_dcl_macro(relation, privilege, grantees) -%}\n {%- do dcl_statements.append(dcl) -%}\n {%- else -%}\n {%- for grantee in grantees -%}\n {% set dcl = get_dcl_macro(relation, privilege, [grantee]) %}\n {%- do dcl_statements.append(dcl) -%}\n {% endfor -%}\n {%- endif -%}\n {%- endfor -%}\n {{ return(dcl_statements) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt.support_multiple_grantees_per_dcl_statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.151925, "supported_languages": null}, "macro.dbt.call_dcl_statements": {"name": "call_dcl_statements", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.call_dcl_statements", "macro_sql": "{% macro call_dcl_statements(dcl_statement_list) %}\n {{ return(adapter.dispatch(\"call_dcl_statements\", \"dbt\")(dcl_statement_list)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.152409, "supported_languages": null}, "macro.dbt.default__call_dcl_statements": {"name": "default__call_dcl_statements", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__call_dcl_statements", "macro_sql": "{% macro default__call_dcl_statements(dcl_statement_list) %}\n {#\n -- By default, supply all grant + revoke statements in a single semicolon-separated block,\n -- so that they're all processed together.\n\n -- Some databases do not support this. Those adapters will need to override this macro\n -- to run each statement individually.\n #}\n {% call statement('grants') %}\n {% for dcl_statement in dcl_statement_list %}\n {{ dcl_statement }};\n {% endfor %}\n {% endcall %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.153051, "supported_languages": null}, "macro.dbt.apply_grants": {"name": "apply_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.apply_grants", "macro_sql": "{% macro apply_grants(relation, grant_config, should_revoke) %}\n {{ return(adapter.dispatch(\"apply_grants\", \"dbt\")(relation, grant_config, should_revoke)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__apply_grants"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.153632, "supported_languages": null}, "macro.dbt.default__apply_grants": {"name": "default__apply_grants", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/apply_grants.sql", "original_file_path": "macros/adapters/apply_grants.sql", "unique_id": "macro.dbt.default__apply_grants", "macro_sql": "{% macro default__apply_grants(relation, grant_config, should_revoke=True) %}\n {#-- If grant_config is {} or None, this is a no-op --#}\n {% if grant_config %}\n {% if should_revoke %}\n {#-- We think previous grants may have carried over --#}\n {#-- Show current grants and calculate diffs --#}\n {% set current_grants_table = run_query(get_show_grant_sql(relation)) %}\n {% set current_grants_dict = adapter.standardize_grants_dict(current_grants_table) %}\n {% set needs_granting = diff_of_two_dicts(grant_config, current_grants_dict) %}\n {% set needs_revoking = diff_of_two_dicts(current_grants_dict, grant_config) %}\n {% if not (needs_granting or needs_revoking) %}\n {{ log('On ' ~ relation ~': All grants are in place, no revocation or granting needed.')}}\n {% endif %}\n {% else %}\n {#-- We don't think there's any chance of previous grants having carried over. --#}\n {#-- Jump straight to granting what the user has configured. --#}\n {% set needs_revoking = {} %}\n {% set needs_granting = grant_config %}\n {% endif %}\n {% if needs_granting or needs_revoking %}\n {% set revoke_statement_list = get_dcl_statement_list(relation, needs_revoking, get_revoke_sql) %}\n {% set grant_statement_list = get_dcl_statement_list(relation, needs_granting, get_grant_sql) %}\n {% set dcl_statement_list = revoke_statement_list + grant_statement_list %}\n {% if dcl_statement_list %}\n {{ call_dcl_statements(dcl_statement_list) }}\n {% endif %}\n {% endif %}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.get_show_grant_sql", "macro.dbt.get_dcl_statement_list", "macro.dbt.call_dcl_statements"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.156651, "supported_languages": null}, "macro.dbt.alter_column_comment": {"name": "alter_column_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.alter_column_comment", "macro_sql": "{% macro alter_column_comment(relation, column_dict) -%}\n {{ return(adapter.dispatch('alter_column_comment', 'dbt')(relation, column_dict)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.158199, "supported_languages": null}, "macro.dbt.default__alter_column_comment": {"name": "default__alter_column_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.default__alter_column_comment", "macro_sql": "{% macro default__alter_column_comment(relation, column_dict) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_column_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1586392, "supported_languages": null}, "macro.dbt.alter_relation_comment": {"name": "alter_relation_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.alter_relation_comment", "macro_sql": "{% macro alter_relation_comment(relation, relation_comment) -%}\n {{ return(adapter.dispatch('alter_relation_comment', 'dbt')(relation, relation_comment)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__alter_relation_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.159168, "supported_languages": null}, "macro.dbt.default__alter_relation_comment": {"name": "default__alter_relation_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.default__alter_relation_comment", "macro_sql": "{% macro default__alter_relation_comment(relation, relation_comment) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_relation_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.159607, "supported_languages": null}, "macro.dbt.persist_docs": {"name": "persist_docs", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.persist_docs", "macro_sql": "{% macro persist_docs(relation, model, for_relation=true, for_columns=true) -%}\n {{ return(adapter.dispatch('persist_docs', 'dbt')(relation, model, for_relation, for_columns)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__persist_docs"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.160279, "supported_languages": null}, "macro.dbt.default__persist_docs": {"name": "default__persist_docs", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/persist_docs.sql", "original_file_path": "macros/adapters/persist_docs.sql", "unique_id": "macro.dbt.default__persist_docs", "macro_sql": "{% macro default__persist_docs(relation, model, for_relation, for_columns) -%}\n {% if for_relation and config.persist_relation_docs() and model.description %}\n {% do run_query(alter_relation_comment(relation, model.description)) %}\n {% endif %}\n\n {% if for_columns and config.persist_column_docs() and model.columns %}\n {% do run_query(alter_column_comment(relation, model.columns)) %}\n {% endif %}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_query", "macro.dbt.alter_relation_comment", "macro.dbt.alter_column_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.161473, "supported_languages": null}, "macro.dbt.get_catalog": {"name": "get_catalog", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.get_catalog", "macro_sql": "{% macro get_catalog(information_schema, schemas) -%}\n {{ return(adapter.dispatch('get_catalog', 'dbt')(information_schema, schemas)) }}\n{%- endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_catalog"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1643069, "supported_languages": null}, "macro.dbt.default__get_catalog": {"name": "default__get_catalog", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__get_catalog", "macro_sql": "{% macro default__get_catalog(information_schema, schemas) -%}\n\n {% set typename = adapter.type() %}\n {% set msg -%}\n get_catalog not implemented for {{ typename }}\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.16497, "supported_languages": null}, "macro.dbt.information_schema_name": {"name": "information_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.information_schema_name", "macro_sql": "{% macro information_schema_name(database) %}\n {{ return(adapter.dispatch('information_schema_name', 'dbt')(database)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__information_schema_name"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.165441, "supported_languages": null}, "macro.dbt.default__information_schema_name": {"name": "default__information_schema_name", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__information_schema_name", "macro_sql": "{% macro default__information_schema_name(database) -%}\n {%- if database -%}\n {{ database }}.INFORMATION_SCHEMA\n {%- else -%}\n INFORMATION_SCHEMA\n {%- endif -%}\n{%- endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.166005, "supported_languages": null}, "macro.dbt.list_schemas": {"name": "list_schemas", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.list_schemas", "macro_sql": "{% macro list_schemas(database) -%}\n {{ return(adapter.dispatch('list_schemas', 'dbt')(database)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_schemas"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.166465, "supported_languages": null}, "macro.dbt.default__list_schemas": {"name": "default__list_schemas", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__list_schemas", "macro_sql": "{% macro default__list_schemas(database) -%}\n {% set sql %}\n select distinct schema_name\n from {{ information_schema_name(database) }}.SCHEMATA\n where catalog_name ilike '{{ database }}'\n {% endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.information_schema_name", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.167077, "supported_languages": null}, "macro.dbt.check_schema_exists": {"name": "check_schema_exists", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.check_schema_exists", "macro_sql": "{% macro check_schema_exists(information_schema, schema) -%}\n {{ return(adapter.dispatch('check_schema_exists', 'dbt')(information_schema, schema)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__check_schema_exists"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.167588, "supported_languages": null}, "macro.dbt.default__check_schema_exists": {"name": "default__check_schema_exists", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__check_schema_exists", "macro_sql": "{% macro default__check_schema_exists(information_schema, schema) -%}\n {% set sql -%}\n select count(*)\n from {{ information_schema.replace(information_schema_view='SCHEMATA') }}\n where catalog_name='{{ information_schema.database }}'\n and schema_name='{{ schema }}'\n {%- endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.replace", "macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.168344, "supported_languages": null}, "macro.dbt.list_relations_without_caching": {"name": "list_relations_without_caching", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.list_relations_without_caching", "macro_sql": "{% macro list_relations_without_caching(schema_relation) %}\n {{ return(adapter.dispatch('list_relations_without_caching', 'dbt')(schema_relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__list_relations_without_caching"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.168813, "supported_languages": null}, "macro.dbt.default__list_relations_without_caching": {"name": "default__list_relations_without_caching", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/metadata.sql", "original_file_path": "macros/adapters/metadata.sql", "unique_id": "macro.dbt.default__list_relations_without_caching", "macro_sql": "{% macro default__list_relations_without_caching(schema_relation) %}\n {{ exceptions.raise_not_implemented(\n 'list_relations_without_caching macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.169232, "supported_languages": null}, "macro.dbt.get_columns_in_relation": {"name": "get_columns_in_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.get_columns_in_relation", "macro_sql": "{% macro get_columns_in_relation(relation) -%}\n {{ return(adapter.dispatch('get_columns_in_relation', 'dbt')(relation)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt_postgres.postgres__get_columns_in_relation"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.172499, "supported_languages": null}, "macro.dbt.default__get_columns_in_relation": {"name": "default__get_columns_in_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__get_columns_in_relation", "macro_sql": "{% macro default__get_columns_in_relation(relation) -%}\n {{ exceptions.raise_not_implemented(\n 'get_columns_in_relation macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.172908, "supported_languages": null}, "macro.dbt.sql_convert_columns_in_relation": {"name": "sql_convert_columns_in_relation", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.sql_convert_columns_in_relation", "macro_sql": "{% macro sql_convert_columns_in_relation(table) -%}\n {% set columns = [] %}\n {% for row in table %}\n {% do columns.append(api.Column(*row)) %}\n {% endfor %}\n {{ return(columns) }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.173693, "supported_languages": null}, "macro.dbt.get_columns_in_query": {"name": "get_columns_in_query", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.get_columns_in_query", "macro_sql": "{% macro get_columns_in_query(select_sql) -%}\n {{ return(adapter.dispatch('get_columns_in_query', 'dbt')(select_sql)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__get_columns_in_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.174159, "supported_languages": null}, "macro.dbt.default__get_columns_in_query": {"name": "default__get_columns_in_query", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__get_columns_in_query", "macro_sql": "{% macro default__get_columns_in_query(select_sql) %}\n {% call statement('get_columns_in_query', fetch_result=True, auto_begin=False) -%}\n select * from (\n {{ select_sql }}\n ) as __dbt_sbq\n where false\n limit 0\n {% endcall %}\n\n {{ return(load_result('get_columns_in_query').table.columns | map(attribute='name') | list) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.175012, "supported_languages": null}, "macro.dbt.alter_column_type": {"name": "alter_column_type", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.alter_column_type", "macro_sql": "{% macro alter_column_type(relation, column_name, new_column_type) -%}\n {{ return(adapter.dispatch('alter_column_type', 'dbt')(relation, column_name, new_column_type)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__alter_column_type"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1755981, "supported_languages": null}, "macro.dbt.default__alter_column_type": {"name": "default__alter_column_type", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__alter_column_type", "macro_sql": "{% macro default__alter_column_type(relation, column_name, new_column_type) -%}\n {#\n 1. Create a new column (w/ temp name and correct type)\n 2. Copy data over to it\n 3. Drop the existing column (cascade!)\n 4. Rename the new column to existing column\n #}\n {%- set tmp_column = column_name + \"__dbt_alter\" -%}\n\n {% call statement('alter_column_type') %}\n alter table {{ relation }} add column {{ adapter.quote(tmp_column) }} {{ new_column_type }};\n update {{ relation }} set {{ adapter.quote(tmp_column) }} = {{ adapter.quote(column_name) }};\n alter table {{ relation }} drop column {{ adapter.quote(column_name) }} cascade;\n alter table {{ relation }} rename column {{ adapter.quote(tmp_column) }} to {{ adapter.quote(column_name) }}\n {% endcall %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.statement"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.177139, "supported_languages": null}, "macro.dbt.alter_relation_add_remove_columns": {"name": "alter_relation_add_remove_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.alter_relation_add_remove_columns", "macro_sql": "{% macro alter_relation_add_remove_columns(relation, add_columns = none, remove_columns = none) -%}\n {{ return(adapter.dispatch('alter_relation_add_remove_columns', 'dbt')(relation, add_columns, remove_columns)) }}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.default__alter_relation_add_remove_columns"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.177797, "supported_languages": null}, "macro.dbt.default__alter_relation_add_remove_columns": {"name": "default__alter_relation_add_remove_columns", "resource_type": "macro", "package_name": "dbt", "path": "macros/adapters/columns.sql", "original_file_path": "macros/adapters/columns.sql", "unique_id": "macro.dbt.default__alter_relation_add_remove_columns", "macro_sql": "{% macro default__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}\n\n {% if add_columns is none %}\n {% set add_columns = [] %}\n {% endif %}\n {% if remove_columns is none %}\n {% set remove_columns = [] %}\n {% endif %}\n\n {% set sql -%}\n\n alter {{ relation.type }} {{ relation }}\n\n {% for column in add_columns %}\n add column {{ column.name }} {{ column.data_type }}{{ ',' if not loop.last }}\n {% endfor %}{{ ',' if add_columns and remove_columns }}\n\n {% for column in remove_columns %}\n drop column {{ column.name }}{{ ',' if not loop.last }}\n {% endfor %}\n\n {%- endset -%}\n\n {% do run_query(sql) %}\n\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.run_query"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.179789, "supported_languages": null}, "macro.dbt.build_ref_function": {"name": "build_ref_function", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.build_ref_function", "macro_sql": "{% macro build_ref_function(model) %}\n\n {%- set ref_dict = {} -%}\n {%- for _ref in model.refs -%}\n {%- set resolved = ref(*_ref) -%}\n {%- do ref_dict.update({_ref | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef ref(*args,dbt_load_df_function):\n refs = {{ ref_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(refs[key])\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.182821, "supported_languages": null}, "macro.dbt.build_source_function": {"name": "build_source_function", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.build_source_function", "macro_sql": "{% macro build_source_function(model) %}\n\n {%- set source_dict = {} -%}\n {%- for _source in model.sources -%}\n {%- set resolved = source(*_source) -%}\n {%- do source_dict.update({_source | join(\".\"): resolved.quote(database=False, schema=False, identifier=False) | string}) -%}\n {%- endfor -%}\n\ndef source(*args, dbt_load_df_function):\n sources = {{ source_dict | tojson }}\n key = \".\".join(args)\n return dbt_load_df_function(sources[key])\n\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.183993, "supported_languages": null}, "macro.dbt.build_config_dict": {"name": "build_config_dict", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.build_config_dict", "macro_sql": "{% macro build_config_dict(model) %}\n {%- set config_dict = {} -%}\n {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %}\n {%- for key, default in config_dbt_used -%}\n {# weird type testing with enum, would be much easier to write this logic in Python! #}\n {%- if key == 'language' -%}\n {%- set value = 'python' -%}\n {%- endif -%}\n {%- set value = model.config.get(key, default) -%}\n {%- do config_dict.update({key: value}) -%}\n {%- endfor -%}\nconfig_dict = {{ config_dict }}\n{% endmacro %}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.185586, "supported_languages": null}, "macro.dbt.py_script_postfix": {"name": "py_script_postfix", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.py_script_postfix", "macro_sql": "{% macro py_script_postfix(model) %}\n# This part is user provided model code\n# you will need to copy the next section to run the code\n# COMMAND ----------\n# this part is dbt logic for get ref work, do not modify\n\n{{ build_ref_function(model ) }}\n{{ build_source_function(model ) }}\n{{ build_config_dict(model) }}\n\nclass config:\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def get(key, default=None):\n return config_dict.get(key, default)\n\nclass this:\n \"\"\"dbt.this() or dbt.this.identifier\"\"\"\n database = '{{ this.database }}'\n schema = '{{ this.schema }}'\n identifier = '{{ this.identifier }}'\n def __repr__(self):\n return '{{ this }}'\n\n\nclass dbtObj:\n def __init__(self, load_df_function) -> None:\n self.source = lambda *args: source(*args, dbt_load_df_function=load_df_function)\n self.ref = lambda *args: ref(*args, dbt_load_df_function=load_df_function)\n self.config = config\n self.this = this()\n self.is_incremental = {{ is_incremental() }}\n\n# COMMAND ----------\n{{py_script_comment()}}\n{% endmacro %}", "depends_on": {"macros": ["macro.dbt.build_ref_function", "macro.dbt.build_source_function", "macro.dbt.build_config_dict", "macro.dbt.is_incremental", "macro.dbt.py_script_comment"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.186578, "supported_languages": null}, "macro.dbt.py_script_comment": {"name": "py_script_comment", "resource_type": "macro", "package_name": "dbt", "path": "macros/python_model/python.sql", "original_file_path": "macros/python_model/python.sql", "unique_id": "macro.dbt.py_script_comment", "macro_sql": "{%macro py_script_comment()%}\n{%endmacro%}", "depends_on": {"macros": []}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1867762, "supported_languages": null}, "macro.dbt.test_unique": {"name": "test_unique", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_unique", "macro_sql": "{% test unique(model, column_name) %}\n {% set macro = adapter.dispatch('test_unique', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_unique"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1879969, "supported_languages": null}, "macro.dbt.test_not_null": {"name": "test_not_null", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_not_null", "macro_sql": "{% test not_null(model, column_name) %}\n {% set macro = adapter.dispatch('test_not_null', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_not_null"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.188813, "supported_languages": null}, "macro.dbt.test_accepted_values": {"name": "test_accepted_values", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_accepted_values", "macro_sql": "{% test accepted_values(model, column_name, values, quote=True) %}\n {% set macro = adapter.dispatch('test_accepted_values', 'dbt') %}\n {{ macro(model, column_name, values, quote) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_accepted_values"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.189566, "supported_languages": null}, "macro.dbt.test_relationships": {"name": "test_relationships", "resource_type": "macro", "package_name": "dbt", "path": "tests/generic/builtin.sql", "original_file_path": "tests/generic/builtin.sql", "unique_id": "macro.dbt.test_relationships", "macro_sql": "{% test relationships(model, column_name, to, field) %}\n {% set macro = adapter.dispatch('test_relationships', 'dbt') %}\n {{ macro(model, column_name, to, field) }}\n{% endtest %}", "depends_on": {"macros": ["macro.dbt.default__test_relationships"]}, "description": "", "meta": {}, "docs": {"show": true, "node_color": null}, "patch_path": null, "arguments": [], "created_at": 1670853278.1902661, "supported_languages": null}}, "docs": {"doc.dbt.__overview__": {"name": "__overview__", "resource_type": "doc", "package_name": "dbt", "path": "overview.md", "original_file_path": "docs/overview.md", "unique_id": "doc.dbt.__overview__", "block_contents": "### Welcome!\n\nWelcome to the auto-generated documentation for your dbt project!\n\n### Navigation\n\nYou can use the `Project` and `Database` navigation tabs on the left side of the window to explore the models\nin your project.\n\n#### Project Tab\nThe `Project` tab mirrors the directory structure of your dbt project. In this tab, you can see all of the\nmodels defined in your dbt project, as well as models imported from dbt packages.\n\n#### Database Tab\nThe `Database` tab also exposes your models, but in a format that looks more like a database explorer. This view\nshows relations (tables and views) grouped into database schemas. Note that ephemeral models are _not_ shown\nin this interface, as they do not exist in the database.\n\n### Graph Exploration\nYou can click the blue icon on the bottom-right corner of the page to view the lineage graph of your models.\n\nOn model pages, you'll see the immediate parents and children of the model you're exploring. By clicking the `Expand`\nbutton at the top-right of this lineage pane, you'll be able to see all of the models that are used to build,\nor are built from, the model you're exploring.\n\nOnce expanded, you'll be able to use the `--select` and `--exclude` model selection syntax to filter the\nmodels in the graph. For more information on model selection, check out the [dbt docs](https://docs.getdbt.com/docs/model-selection-syntax).\n\nNote that you can also right-click on models to interactively filter and explore the graph.\n\n---\n\n### More information\n\n- [What is dbt](https://docs.getdbt.com/docs/introduction)?\n- Read the [dbt viewpoint](https://docs.getdbt.com/docs/viewpoint)\n- [Installation](https://docs.getdbt.com/docs/installation)\n- Join the [dbt Community](https://www.getdbt.com/community/) for questions and discussion"}}, "exposures": {}, "metrics": {"metric.test.my_metric": {"name": "my_metric", "resource_type": "metric", "package_name": "test", "path": "metric.yml", "original_file_path": "models/metric.yml", "unique_id": "metric.test.my_metric", "fqn": ["test", "my_metric"], "description": "", "label": "Count records", "calculation_method": "count", "timestamp": "updated_at", "expression": "*", "filters": [], "time_grains": ["day"], "dimensions": [], "window": null, "model": "ref('my_model')", "model_unique_id": null, "meta": {}, "tags": [], "config": {"enabled": true}, "unrendered_config": {}, "sources": [], "depends_on": {"macros": [], "nodes": ["model.test.my_model"]}, "refs": [["my_model"]], "metrics": [], "created_at": 1670853278.56334}}, "selectors": {}, "disabled": {}, "parent_map": {"model.test.my_model": [], "metric.test.my_metric": ["model.test.my_model"]}, "child_map": {"model.test.my_model": ["metric.test.my_metric"], "metric.test.my_metric": []}} diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 2656c84e249..51a6b633e40 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -399,30 +399,19 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "unrendered_config": unrendered_second_config, }, "seed.test.seed": { - "compiled_path": None, "build_path": None, "created_at": ANY, - "compiled": True, - "compiled_code": "", "config": seed_config, "patch_path": "test://" + seed_schema_yml_path, "path": "seed.csv", "name": "seed", "root_path": project.project_root, - "relation_name": relation_name_node_format.format( - project.database, my_schema_name, "seed" - ), "resource_type": "seed", "raw_code": "", - "language": "sql", "package_name": "test", "original_file_path": seed_path, - "refs": [], - "sources": [], - "depends_on": {"nodes": [], "macros": []}, "unique_id": "seed.test.seed", "fqn": ["test", "seed"], - "metrics": [], "tags": [], "meta": {}, "schema": my_schema_name, @@ -473,12 +462,11 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): }, }, "docs": {"node_color": None, "show": True}, - "compiled": True, - "compiled_code": "", - "extra_ctes_injected": True, - "extra_ctes": [], "checksum": checksum_file(seed_path), "unrendered_config": unrendered_seed_config, + "relation_name": relation_name_node_format.format( + project.database, my_schema_name, "seed" + ), }, "test.test.not_null_model_id.d01cc630e6": { "alias": "not_null_model_id", @@ -834,9 +822,9 @@ def expected_seeded_manifest(project, model_database=None, quote_model=False): "test.test.unique_model_id.67b76558ff": [], }, "docs": { - "dbt.__overview__": ANY, - "test.macro_info": ANY, - "test.macro_arg_info": ANY, + "doc.dbt.__overview__": ANY, + "doc.test.macro_info": ANY, + "doc.test.macro_arg_info": ANY, }, "disabled": {}, } @@ -1009,7 +997,6 @@ def expected_references_manifest(project): }, "seed.test.seed": { "alias": "seed", - "compiled_path": None, "build_path": None, "created_at": ANY, "columns": { @@ -1055,22 +1042,16 @@ def expected_references_manifest(project): }, }, "config": get_rendered_seed_config(), - "sources": [], - "depends_on": {"macros": [], "nodes": []}, "deferred": False, "description": "The test seed", "docs": {"node_color": None, "show": True}, "fqn": ["test", "seed"], - "metrics": [], "name": "seed", "original_file_path": seed_path, "package_name": "test", "patch_path": "test://" + os.path.join("seeds", "schema.yml"), "path": "seed.csv", "raw_code": "", - "language": "sql", - "refs": [], - "relation_name": '"{0}"."{1}".seed'.format(model_database, my_schema_name), "resource_type": "seed", "root_path": project.project_root, "schema": my_schema_name, @@ -1078,12 +1059,11 @@ def expected_references_manifest(project): "tags": [], "meta": {}, "unique_id": "seed.test.seed", - "compiled": True, - "compiled_code": "", - "extra_ctes_injected": True, - "extra_ctes": [], "checksum": checksum_file(seed_path), "unrendered_config": get_unrendered_seed_config(), + "relation_name": '"{0}"."{1}".seed'.format( + project.database, my_schema_name + ), }, "snapshot.test.snapshot_seed": { "alias": "snapshot_seed", @@ -1208,88 +1188,98 @@ def expected_references_manifest(project): "metrics": {}, "selectors": {}, "docs": { - "dbt.__overview__": ANY, - "test.column_info": { + "doc.dbt.__overview__": ANY, + "doc.test.column_info": { "block_contents": "An ID field", + "resource_type": "doc", "name": "column_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "unique_id": "test.column_info", + "unique_id": "doc.test.column_info", }, - "test.ephemeral_summary": { + "doc.test.ephemeral_summary": { "block_contents": ("A summmary table of the ephemeral copy of the seed data"), + "resource_type": "doc", "name": "ephemeral_summary", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "unique_id": "test.ephemeral_summary", + "unique_id": "doc.test.ephemeral_summary", }, - "test.source_info": { + "doc.test.source_info": { "block_contents": "My source", + "resource_type": "doc", "name": "source_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "unique_id": "test.source_info", + "unique_id": "doc.test.source_info", }, - "test.summary_count": { + "doc.test.summary_count": { "block_contents": "The number of instances of the first name", + "resource_type": "doc", "name": "summary_count", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "unique_id": "test.summary_count", + "unique_id": "doc.test.summary_count", }, - "test.summary_first_name": { + "doc.test.summary_first_name": { "block_contents": "The first name being summarized", + "resource_type": "doc", "name": "summary_first_name", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "unique_id": "test.summary_first_name", + "unique_id": "doc.test.summary_first_name", }, - "test.table_info": { + "doc.test.table_info": { "block_contents": "My table", + "resource_type": "doc", "name": "table_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "unique_id": "test.table_info", + "unique_id": "doc.test.table_info", }, - "test.view_summary": { + "doc.test.view_summary": { "block_contents": ( "A view of the summary of the ephemeral copy of the seed data" ), + "resource_type": "doc", "name": "view_summary", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "unique_id": "test.view_summary", + "unique_id": "doc.test.view_summary", }, - "test.macro_info": { + "doc.test.macro_info": { "block_contents": "My custom test that I wrote that does nothing", + "resource_type": "doc", "name": "macro_info", "original_file_path": os.path.join("macros", "macro.md"), "package_name": "test", "path": "macro.md", - "unique_id": "test.macro_info", + "unique_id": "doc.test.macro_info", }, - "test.notebook_info": { + "doc.test.notebook_info": { "block_contents": "A description of the complex exposure", + "resource_type": "doc", "name": "notebook_info", "original_file_path": docs_path, "package_name": "test", "path": "docs.md", - "unique_id": "test.notebook_info", + "unique_id": "doc.test.notebook_info", }, - "test.macro_arg_info": { + "doc.test.macro_arg_info": { "block_contents": "The model for my custom test", + "resource_type": "doc", "name": "macro_arg_info", "original_file_path": os.path.join("macros", "macro.md"), "package_name": "test", "path": "macro.md", - "unique_id": "test.macro_arg_info", + "unique_id": "doc.test.macro_arg_info", }, }, "child_map": { @@ -1328,7 +1318,6 @@ def expected_references_manifest(project): "patch_path": "test://" + os.path.join("macros", "schema.yml"), "resource_type": "macro", "unique_id": "macro.test.test_nothing", - "tags": [], "supported_languages": None, "arguments": [ { diff --git a/tests/functional/list/test_list.py b/tests/functional/list/test_list.py index 78fca376d7d..cf0d3d89add 100644 --- a/tests/functional/list/test_list.py +++ b/tests/functional/list/test_list.py @@ -357,7 +357,6 @@ def expect_seed_output(self): "json": { "name": "seed", "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, "tags": [], "config": { "enabled": True, diff --git a/tests/functional/partial_parsing/test_pp_docs.py b/tests/functional/partial_parsing/test_pp_docs.py index f9ab5e3a2d7..b3c7d52212d 100644 --- a/tests/functional/partial_parsing/test_pp_docs.py +++ b/tests/functional/partial_parsing/test_pp_docs.py @@ -129,7 +129,7 @@ def test_pp_docs(self, project): results = run_dbt(["--partial-parse", "run"]) manifest = get_manifest(project.project_root) assert len(manifest.docs) == 2 - doc_id = "test.customer_table" + doc_id = "doc.test.customer_table" assert doc_id in manifest.docs doc = manifest.docs[doc_id] doc_file_id = doc.file_id @@ -225,7 +225,7 @@ def models(self): def test_remove_replace(self, project): run_dbt(["parse", "--write-manifest"]) manifest = get_manifest(project.project_root) - doc_id = "test.whatever" + doc_id = "doc.test.whatever" assert doc_id in manifest.docs doc = manifest.docs[doc_id] doc_file = manifest.files[doc.file_id] From 3d54a838228a05e29d306f4265d1b00b20f05066 Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Tue, 13 Dec 2022 15:07:56 -0800 Subject: [PATCH 064/156] [CT-1284] Change Python model default materialization to table (#6432) --- .../unreleased/Fixes-20221213-112620.yaml | 6 + core/dbt/parser/base.py | 1 + test/unit/test_parser.py | 434 ++++++++++-------- 3 files changed, 237 insertions(+), 204 deletions(-) create mode 100644 .changes/unreleased/Fixes-20221213-112620.yaml diff --git a/.changes/unreleased/Fixes-20221213-112620.yaml b/.changes/unreleased/Fixes-20221213-112620.yaml new file mode 100644 index 00000000000..a2220f9a920 --- /dev/null +++ b/.changes/unreleased/Fixes-20221213-112620.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: '[CT-1284] Change Python model default materialization to table' +time: 2022-12-13T11:26:20.550017-08:00 +custom: + Author: aranke + Issue: "6345" diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index bfcd3b20e14..21bc74fbfc5 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -191,6 +191,7 @@ def _create_parsetime_node( name = block.name if block.path.relative_path.endswith(".py"): language = ModelLanguage.python + config.add_config_call({"materialized": "table"}) else: # this is not ideal but we have a lot of tests to adjust if don't do it language = ModelLanguage.sql diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 316caffa870..7ca68f0e1fd 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -1,44 +1,39 @@ -import ipdb +import os import unittest +from copy import deepcopy from unittest import mock -import os import yaml -from copy import deepcopy import dbt.flags import dbt.parser from dbt import tracking from dbt.context.context_config import ContextConfig -from dbt.exceptions import CompilationException, ParsingException -from dbt.parser import ( - ModelParser, MacroParser, SingularTestParser, GenericTestParser, - SchemaParser, SnapshotParser, AnalysisParser -) -from dbt.parser.schemas import ( - TestablePatchParser, SourceParser, AnalysisPatchParser, MacroPatchParser -) -from dbt.parser.search import FileBlock -from dbt.parser.generic_test_builders import YamlBlock -from dbt.parser.sources import SourcePatcher - -from dbt.node_types import NodeType, ModelLanguage from dbt.contracts.files import SourceFile, FileHash, FilePath, SchemaSourceFile from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.model_config import ( NodeConfig, TestConfig, SnapshotConfig ) from dbt.contracts.graph.nodes import ( - ModelNode, Macro, DependsOn, ColumnInfo, - SingularTestNode, GenericTestNode, SnapshotNode, + ModelNode, Macro, DependsOn, SingularTestNode, SnapshotNode, AnalysisNode, UnpatchedSourceDefinition ) -from dbt.contracts.graph.unparsed import Docs +from dbt.exceptions import CompilationException, ParsingException +from dbt.node_types import NodeType +from dbt.parser import ( + ModelParser, MacroParser, SingularTestParser, GenericTestParser, + SchemaParser, SnapshotParser, AnalysisParser +) +from dbt.parser.generic_test_builders import YamlBlock from dbt.parser.models import ( _get_config_call_dict, _shift_sources, _get_exp_sample_result, _get_stable_sample_result, _get_sample_result ) -import itertools -from .utils import config_from_parts_or_dicts, normalize, generate_name_macros, MockNode, MockSource, MockDocumentation +from dbt.parser.schemas import ( + TestablePatchParser, SourceParser, AnalysisPatchParser, MacroPatchParser +) +from dbt.parser.search import FileBlock +from dbt.parser.sources import SourcePatcher +from .utils import config_from_parts_or_dicts, normalize, generate_name_macros, MockNode def get_abs_os_path(unix_path): @@ -161,7 +156,7 @@ def file_block_for(self, data: str, filename: str, searched: str): return FileBlock(file=source_file) def assert_has_manifest_lengths(self, manifest, macros=3, nodes=0, - sources=0, docs=0, disabled=0): + sources=0, docs=0, disabled=0): self.assertEqual(len(manifest.macros), macros) self.assertEqual(len(manifest.nodes), nodes) self.assertEqual(len(manifest.sources), sources) @@ -196,7 +191,6 @@ def assertEqualNodes(node_one, node_two): assert node_one_dict == node_two_dict - SINGLE_TABLE_SOURCE = ''' version: 2 sources: @@ -221,7 +215,6 @@ def assertEqualNodes(node_one, node_two): values: ['red', 'blue', 'green'] ''' - SINGLE_TABLE_MODEL_TESTS = ''' version: 2 models: @@ -239,7 +232,6 @@ def assertEqualNodes(node_one, node_two): arg: 100 ''' - SINGLE_TABLE_SOURCE_PATCH = ''' version: 2 sources: @@ -402,7 +394,7 @@ def setUp(self): patch_path=None, ) nodes = {my_model_node.unique_id: my_model_node} - macros={m.unique_id: m for m in generate_name_macros('root')} + macros = {m.unique_id: m for m in generate_name_macros('root')} self.manifest = Manifest(nodes=nodes, macros=macros) self.manifest.ref_lookup self.parser = SchemaParser( @@ -495,6 +487,136 @@ def test__parse_basic_model_tests(self): self.assertEqual(self.parser.manifest.files[file_id].node_patches, ['model.root.my_model']) +sql_model = """ +{{ config(materialized="table") }} +select 1 as id +""" + +sql_model_parse_error = "{{ SYNTAX ERROR }}" + +python_model = """ +import textblob +import text as a +from torch import b +import textblob.text +import sklearn + +def model(dbt, session): + dbt.config( + materialized='table', + packages=['sklearn==0.1.0'] + ) + df0 = dbt.ref("a_model").to_pandas() + df1 = dbt.ref("my_sql_model").task.limit(2) + df2 = dbt.ref("my_sql_model_1") + df3 = dbt.ref("my_sql_model_2") + df4 = dbt.source("test", 'table1').limit(max=[max(dbt.ref('something'))]) + df5 = [dbt.ref('test1')] + + a_dict = {'test2': dbt.ref('test2')} + df5 = {'test2': dbt.ref('test3')} + df6 = [dbt.ref("test4")] + + df = df0.limit(2) + return df +""" + +python_model_config = """ +def model(dbt, session): + dbt.config.get("param_1") + dbt.config.get("param_2") + return dbt.ref("some_model") +""" + +python_model_config_with_defaults = """ +def model(dbt, session): + dbt.config.get("param_None", None) + dbt.config.get("param_Str", "default") + dbt.config.get("param_List", [1, 2]) + return dbt.ref("some_model") +""" + +python_model_single_argument = """ +def model(dbt): + dbt.config(materialized="table") + return dbt.ref("some_model") +""" + +python_model_no_argument = """ +import pandas as pd + +def model(): + return pd.dataframe([1, 2]) +""" + +python_model_incorrect_argument_name = """ +def model(tbd, session): + tbd.config(materialized="table") + return tbd.ref("some_model") +""" + +python_model_multiple_models = """ +def model(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model") + +def model(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model") +""" + +python_model_incorrect_function_name = """ +def model1(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model") +""" + +python_model_multiple_returns = """ +def model(dbt, session): + dbt.config(materialized='table') + return dbt.ref("some_model"), dbt.ref("some_other_model") +""" + +python_model_no_return = """ +def model(dbt, session): + dbt.config(materialized='table') +""" + +python_model_single_return = """ +import pandas as pd + +def model(dbt, session): + dbt.config(materialized='table') + return pd.dataframe([1, 2]) +""" + +python_model_incorrect_ref = """ +def model(dbt, session): + model_names = ["orders", "customers"] + models = [] + + for model_name in model_names: + models.extend(dbt.ref(model_name)) + + return models[0] +""" + +python_model_default_materialization = """ +import pandas as pd + +def model(dbt, session): + return pd.dataframe([1, 2]) +""" + +python_model_custom_materialization = """ +import pandas as pd + +def model(dbt, session): + dbt.config(materialized="view") + return pd.dataframe([1, 2]) +""" + + class ModelParserTest(BaseParserTest): def setUp(self): super().setUp() @@ -508,8 +630,7 @@ def file_block_for(self, data, filename): return super().file_block_for(data, filename, 'models') def test_basic(self): - raw_code = '{{ config(materialized="table") }}select 1 as id' - block = self.file_block_for(raw_code, 'nested/model_1.sql') + block = self.file_block_for(sql_model, 'nested/model_1.sql') self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) @@ -527,7 +648,7 @@ def test_basic(self): config=NodeConfig(materialized='table'), path=normalize('nested/model_1.sql'), language='sql', - raw_code=raw_code, + raw_code=sql_model, checksum=block.file.checksum, unrendered_config={'materialized': 'table'}, config_call_dict={ @@ -538,34 +659,14 @@ def test_basic(self): file_id = 'snowplow://' + normalize('models/nested/model_1.sql') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['model.snowplow.model_1']) - - def test_parse_python_file(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - packages = ['sklearn==0.1.0'] - ) - import textblob - import text as a - from torch import b - import textblob.text - import sklearn - df0 = pandas(dbt.ref("a_model")) - df1 = dbt.ref("my_sql_model").task.limit(2) - df2 = dbt.ref("my_sql_model_1") - df3 = dbt.ref("my_sql_model_2") - df4 = dbt.source("test", 'table1').limit(max = [max(dbt.ref('something'))]) - df5 = [dbt.ref('test1')] - - a_dict = {'test2' : dbt.ref('test2')} - df5 = anotherfunction({'test2' : dbt.ref('test3')}) - df6 = [somethingelse.ref(dbt.ref("test4"))] - - df = df.limit(2) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_sql_model_parse_error(self): + block = self.file_block_for(sql_model_parse_error, 'nested/model_1.sql') + with self.assertRaises(CompilationException): + self.parser.parse_file(block) + + def test_python_model_parse(self): + block = self.file_block_for(python_model, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) @@ -586,181 +687,104 @@ def model(dbt, session): # config.packages = ['textblob'] path=normalize('nested/py_model.py'), language='python', - raw_code=py_code, + raw_code=python_model, checksum=block.file.checksum, - unrendered_config={'materialized': 'table', 'packages':python_packages}, - config_call_dict={'materialized': 'table', 'packages':python_packages}, - refs=[['a_model'], ['my_sql_model'], ['my_sql_model_1'], ['my_sql_model_2'], ['something'], ['test1'], ['test2'], ['test3'], ['test4']], - sources = [['test', 'table1']], + unrendered_config={'materialized': 'table', 'packages': python_packages}, + config_call_dict={'materialized': 'table', 'packages': python_packages}, + refs=[['a_model'], ['my_sql_model'], ['my_sql_model_1'], ['my_sql_model_2'], ['something'], ['test1'], + ['test2'], ['test3'], ['test4']], + sources=[['test', 'table1']], ) assertEqualNodes(node, expected) file_id = 'snowplow://' + normalize('models/nested/py_model.py') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['model.snowplow.py_model']) - def test_python_model_config_get(self): - py_code = """ -def model(dbt, session): - dbt.config.get("param_1") - dbt.config.get("param_2") - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_config(self): + block = self.file_block_for(python_model_config, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - + self.parser.parse_file(block) node = list(self.parser.manifest.nodes.values())[0] self.assertEqual(node.config.to_dict()["config_keys_used"], ["param_1", "param_2"]) - def test_python_model_config_default(self): - py_code = """ -def model(dbt, session): - dbt.config.get("param_None", None) - dbt.config.get("param_Str", "default") - dbt.config.get("param_List", [1, 2]) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') - self.parser.manifest.files[block.file.file_id] = block.file - - self.parser.parse_file(block) - node = list(self.parser.manifest.nodes.values())[0] - default_values = node.config.to_dict()["config_keys_defaults"] - self.assertIsNone(default_values[0]) - self.assertEqual(default_values[1], "default") - self.assertEqual(default_values[2], [1, 2]) + def test_python_model_config_with_defaults(self): + block = self.file_block_for(python_model_config_with_defaults, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + default_values = node.config.to_dict()["config_keys_defaults"] + self.assertIsNone(default_values[0]) + self.assertEqual(default_values[1], "default") + self.assertEqual(default_values[2], [1, 2]) - def test_wrong_python_model_def_miss_session(self): - py_code = """ -def model(dbt): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_single_argument(self): + block = self.file_block_for(python_model_single_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_wrong_python_model_def_miss_session(self): - py_code = """ -def model(): - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_no_argument(self): + block = self.file_block_for(python_model_no_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_wrong_python_model_def_wrong_arg(self): - """ First argument for python model should be dbt - """ - py_code = """ -def model(dat, session): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_incorrect_argument_name(self): + block = self.file_block_for(python_model_incorrect_argument_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_multipe_model(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - return df -def model(dbt, session): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_multiple_models(self): + block = self.file_block_for(python_model_multiple_models, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_no_model(self): - py_code = """ -def model1(dbt, session): - dbt.config( - materialized='table', - ) - return df - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_python_model_incorrect_function_name(self): + block = self.file_block_for(python_model_incorrect_function_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_mutiple_return(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - return df1, df2 - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_python_model_multiple_returns(self): + block = self.file_block_for(python_model_multiple_returns, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - - def test_wrong_python_model_def_no_return(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + + def test_python_model_no_return(self): + block = self.file_block_for(python_model_no_return, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_correct_python_model_def_return_function(self): - py_code = """ -def model(dbt, session): - dbt.config( - materialized='table', - ) - return pandas.dataframe([1,2]) - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_single_return(self): + block = self.file_block_for(python_model_single_return, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - self.parser.parse_file(block) + self.assertIsNone(self.parser.parse_file(block)) - def test_parse_error(self): - block = self.file_block_for('{{ SYNTAX ERROR }}', 'nested/model_1.sql') - with self.assertRaises(CompilationException): + def test_python_model_incorrect_ref(self): + block = self.file_block_for(python_model_incorrect_ref, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + with self.assertRaises(ParsingException): self.parser.parse_file(block) - def test_parse_ref_with_non_string(self): - py_code = """ -def model(dbt, session): - - model_names = ["orders", "customers"] - models = [] - - for model_name in model_names: - models.extend(dbt.ref(model_name)) - - return models[0] - """ - block = self.file_block_for(py_code, 'nested/py_model.py') + def test_python_model_default_materialization(self): + block = self.file_block_for(python_model_default_materialization, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): - self.parser.parse_file(block) - + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + self.assertEqual(node.get_materialization(), "table") + def test_python_model_custom_materialization(self): + block = self.file_block_for(python_model_custom_materialization, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + self.assertEqual(node.get_materialization(), "view") class StaticModelParserTest(BaseParserTest): def setUp(self): @@ -809,9 +833,10 @@ def test_built_in_macro_override_detection(self): unrendered_config={'materialized': 'table'}, ) - assert(self.parser._has_banned_macro(node)) + assert (self.parser._has_banned_macro(node)) + -# TODO +# TODO class StaticModelParserUnitTest(BaseParserTest): # _get_config_call_dict # _shift_sources @@ -986,7 +1011,8 @@ def file_block_for(self, data, filename): return super().file_block_for(data, filename, 'snapshots') def test_parse_error(self): - block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}', 'nested/snap_1.sql') + block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}', + 'nested/snap_1.sql') with self.assertRaises(CompilationException): self.parser.parse_file(block) @@ -1036,10 +1062,10 @@ def test_single_block(self): 'updated_at': 'last_update', }, config_call_dict={ - 'strategy': 'timestamp', - 'target_database': 'dbt', - 'target_schema': 'analytics', - 'unique_key': 'id', + 'strategy': 'timestamp', + 'target_database': 'dbt', + 'target_schema': 'analytics', + 'unique_key': 'id', 'updated_at': 'last_update', }, ) @@ -1104,10 +1130,10 @@ def test_multi_block(self): 'updated_at': 'last_update', }, config_call_dict={ - 'strategy': 'timestamp', - 'target_database': 'dbt', - 'target_schema': 'analytics', - 'unique_key': 'id', + 'strategy': 'timestamp', + 'target_database': 'dbt', + 'target_schema': 'analytics', + 'unique_key': 'id', 'updated_at': 'last_update', }, ) @@ -1141,10 +1167,10 @@ def test_multi_block(self): 'updated_at': 'last_update', }, config_call_dict={ - 'strategy': 'timestamp', - 'target_database': 'dbt', - 'target_schema': 'analytics', - 'unique_key': 'id', + 'strategy': 'timestamp', + 'target_database': 'dbt', + 'target_schema': 'analytics', + 'unique_key': 'id', 'updated_at': 'last_update', }, ) @@ -1269,7 +1295,7 @@ def test_basic(self): class GenericTestParserTest(BaseParserTest): -# generic tests in the test-paths directory currently leverage the macro parser + # generic tests in the test-paths directory currently leverage the macro parser def setUp(self): super().setUp() self.parser = GenericTestParser( @@ -1340,6 +1366,6 @@ def test_basic(self): relation_name=None, ) assertEqualNodes(node, expected) - file_id = 'snowplow://' + normalize('analyses/nested/analysis_1.sql') + file_id = 'snowplow://' + normalize('analyses/nested/analysis_1.sql') self.assertIn(file_id, self.parser.manifest.files) self.assertEqual(self.parser.manifest.files[file_id].nodes, ['analysis.snowplow.analysis_1']) From c00052cbfb91621895b5fc94dae875ff24fb4243 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Tue, 13 Dec 2022 18:15:25 -0500 Subject: [PATCH 065/156] Add Optional back on "database" field of HasRelationMetadata (#6439) --- core/dbt/contracts/graph/nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/dbt/contracts/graph/nodes.py b/core/dbt/contracts/graph/nodes.py index bc955e9503e..033318a34c1 100644 --- a/core/dbt/contracts/graph/nodes.py +++ b/core/dbt/contracts/graph/nodes.py @@ -152,7 +152,7 @@ class ColumnInfo(AdditionalPropertiesMixin, ExtensibleDbtClassMixin, Replaceable # Metrics, exposures, @dataclass class HasRelationMetadata(dbtClassMixin, Replaceable): - database: str + database: Optional[str] schema: str # Can't set database to None like it ought to be From 05dc0212e7710f0889bd0f9a5fffcdc1e8cc66e7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 13 Dec 2022 20:18:11 -0500 Subject: [PATCH 066/156] Bumping version to 1.4.0b1 and generate changelog (#6440) * Bumping version to 1.4.0b1 and generate CHANGELOG * Updating date * Updating date Co-authored-by: Github Build Bot Co-authored-by: leahwicz <60146280+leahwicz@users.noreply.github.com> --- .bumpversion.cfg | 2 +- .changes/1.4.0-b1.md | 87 ++++++++++++++++++ .../Dependency-20220923-000646.yaml | 0 .../Dependency-20221007-000848.yaml | 0 .../Dependency-20221020-000753.yaml | 0 .../Dependency-20221026-000910.yaml | 0 .../Dependency-20221205-002118.yaml | 0 .../Docs-20220908-154157.yaml | 0 .../Docs-20221007-090656.yaml | 0 .../Docs-20221017-171411.yaml | 0 .../Docs-20221116-155743.yaml | 0 .../Docs-20221202-150523.yaml | 0 .../Features-20220408-165459.yaml | 0 .../Features-20220817-154857.yaml | 0 .../Features-20220912-125935.yaml | 0 .../Features-20220914-095625.yaml | 0 .../Features-20220925-211651.yaml | 0 .../Features-20221003-110705.yaml | 0 .../Features-20221102-150003.yaml | 0 .../Features-20221107-105018.yaml | 0 .../Features-20221114-185207.yaml | 0 .../Features-20221130-112913.yaml | 0 .../Features-20221206-150704.yaml | 0 .../Fixes-20220916-104854.yaml | 0 .../Fixes-20221010-113218.yaml | 0 .../Fixes-20221011-160715.yaml | 0 .../Fixes-20221016-173742.yaml | 0 .../Fixes-20221107-095314.yaml | 0 .../Fixes-20221115-081021.yaml | 0 .../Fixes-20221124-163419.yaml | 0 .../Fixes-20221202-164859.yaml | 0 .../Under the Hood-20220927-194259.yaml | 0 .../Under the Hood-20220929-134406.yaml | 0 .../Under the Hood-20221005-120310.yaml | 0 .../Under the Hood-20221007-094627.yaml | 0 .../Under the Hood-20221007-140044.yaml | 0 .../Under the Hood-20221013-181912.yaml | 0 .../Under the Hood-20221017-151511.yaml | 0 .../Under the Hood-20221017-155844.yaml | 0 .../Under the Hood-20221028-104837.yaml | 0 .../Under the Hood-20221028-110344.yaml | 0 .../Under the Hood-20221108-074550.yaml | 0 .../Under the Hood-20221108-115633.yaml | 0 .../Under the Hood-20221108-133104.yaml | 0 .../Under the Hood-20221116-130037.yaml | 0 .../Under the Hood-20221118-145717.yaml | 0 .../Under the Hood-20221205-164948.yaml | 0 .../Under the Hood-20221206-094015.yaml | 0 .../Under the Hood-20221206-113053.yaml | 0 .../Under the Hood-20221211-214240.yaml | 0 CHANGELOG.md | 88 +++++++++++++++++++ core/dbt/version.py | 2 +- core/setup.py | 2 +- docker/Dockerfile | 12 +-- .../dbt/adapters/postgres/__version__.py | 2 +- plugins/postgres/setup.py | 2 +- .../adapter/dbt/tests/adapter/__version__.py | 2 +- tests/adapter/setup.py | 2 +- 58 files changed, 188 insertions(+), 13 deletions(-) create mode 100644 .changes/1.4.0-b1.md rename .changes/{unreleased => 1.4.0}/Dependency-20220923-000646.yaml (100%) rename .changes/{unreleased => 1.4.0}/Dependency-20221007-000848.yaml (100%) rename .changes/{unreleased => 1.4.0}/Dependency-20221020-000753.yaml (100%) rename .changes/{unreleased => 1.4.0}/Dependency-20221026-000910.yaml (100%) rename .changes/{unreleased => 1.4.0}/Dependency-20221205-002118.yaml (100%) rename .changes/{unreleased => 1.4.0}/Docs-20220908-154157.yaml (100%) rename .changes/{unreleased => 1.4.0}/Docs-20221007-090656.yaml (100%) rename .changes/{unreleased => 1.4.0}/Docs-20221017-171411.yaml (100%) rename .changes/{unreleased => 1.4.0}/Docs-20221116-155743.yaml (100%) rename .changes/{unreleased => 1.4.0}/Docs-20221202-150523.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20220408-165459.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20220817-154857.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20220912-125935.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20220914-095625.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20220925-211651.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20221003-110705.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20221102-150003.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20221107-105018.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20221114-185207.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20221130-112913.yaml (100%) rename .changes/{unreleased => 1.4.0}/Features-20221206-150704.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20220916-104854.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20221010-113218.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20221011-160715.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20221016-173742.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20221107-095314.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20221115-081021.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20221124-163419.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20221202-164859.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20220927-194259.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20220929-134406.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221005-120310.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221007-094627.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221007-140044.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221013-181912.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221017-151511.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221017-155844.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221028-104837.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221028-110344.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221108-074550.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221108-115633.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221108-133104.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221116-130037.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221118-145717.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221205-164948.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221206-094015.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221206-113053.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221211-214240.yaml (100%) diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 02ea0717225..3cdca1ad352 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.4.0a1 +current_version = 1.4.0b1 parse = (?P\d+) \.(?P\d+) \.(?P\d+) diff --git a/.changes/1.4.0-b1.md b/.changes/1.4.0-b1.md new file mode 100644 index 00000000000..747aba542dd --- /dev/null +++ b/.changes/1.4.0-b1.md @@ -0,0 +1,87 @@ +## dbt-core 1.4.0-b1 - December 14, 2022 + +### Features + +- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) +- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) +- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) +- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) + +### Fixes + +- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) +- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) +- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) +- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) +- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) + +### Docs + +- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) +- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) +- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) + +### Under the Hood + +- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) +- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) +- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) +- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) +- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) +- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) +- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) +- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) +- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) +- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) +- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) +- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) +- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) + +### Dependencies + +- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) +- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) +- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) +- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) + +### Dependency + +- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) + +### Contributors +- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) diff --git a/.changes/unreleased/Dependency-20220923-000646.yaml b/.changes/1.4.0/Dependency-20220923-000646.yaml similarity index 100% rename from .changes/unreleased/Dependency-20220923-000646.yaml rename to .changes/1.4.0/Dependency-20220923-000646.yaml diff --git a/.changes/unreleased/Dependency-20221007-000848.yaml b/.changes/1.4.0/Dependency-20221007-000848.yaml similarity index 100% rename from .changes/unreleased/Dependency-20221007-000848.yaml rename to .changes/1.4.0/Dependency-20221007-000848.yaml diff --git a/.changes/unreleased/Dependency-20221020-000753.yaml b/.changes/1.4.0/Dependency-20221020-000753.yaml similarity index 100% rename from .changes/unreleased/Dependency-20221020-000753.yaml rename to .changes/1.4.0/Dependency-20221020-000753.yaml diff --git a/.changes/unreleased/Dependency-20221026-000910.yaml b/.changes/1.4.0/Dependency-20221026-000910.yaml similarity index 100% rename from .changes/unreleased/Dependency-20221026-000910.yaml rename to .changes/1.4.0/Dependency-20221026-000910.yaml diff --git a/.changes/unreleased/Dependency-20221205-002118.yaml b/.changes/1.4.0/Dependency-20221205-002118.yaml similarity index 100% rename from .changes/unreleased/Dependency-20221205-002118.yaml rename to .changes/1.4.0/Dependency-20221205-002118.yaml diff --git a/.changes/unreleased/Docs-20220908-154157.yaml b/.changes/1.4.0/Docs-20220908-154157.yaml similarity index 100% rename from .changes/unreleased/Docs-20220908-154157.yaml rename to .changes/1.4.0/Docs-20220908-154157.yaml diff --git a/.changes/unreleased/Docs-20221007-090656.yaml b/.changes/1.4.0/Docs-20221007-090656.yaml similarity index 100% rename from .changes/unreleased/Docs-20221007-090656.yaml rename to .changes/1.4.0/Docs-20221007-090656.yaml diff --git a/.changes/unreleased/Docs-20221017-171411.yaml b/.changes/1.4.0/Docs-20221017-171411.yaml similarity index 100% rename from .changes/unreleased/Docs-20221017-171411.yaml rename to .changes/1.4.0/Docs-20221017-171411.yaml diff --git a/.changes/unreleased/Docs-20221116-155743.yaml b/.changes/1.4.0/Docs-20221116-155743.yaml similarity index 100% rename from .changes/unreleased/Docs-20221116-155743.yaml rename to .changes/1.4.0/Docs-20221116-155743.yaml diff --git a/.changes/unreleased/Docs-20221202-150523.yaml b/.changes/1.4.0/Docs-20221202-150523.yaml similarity index 100% rename from .changes/unreleased/Docs-20221202-150523.yaml rename to .changes/1.4.0/Docs-20221202-150523.yaml diff --git a/.changes/unreleased/Features-20220408-165459.yaml b/.changes/1.4.0/Features-20220408-165459.yaml similarity index 100% rename from .changes/unreleased/Features-20220408-165459.yaml rename to .changes/1.4.0/Features-20220408-165459.yaml diff --git a/.changes/unreleased/Features-20220817-154857.yaml b/.changes/1.4.0/Features-20220817-154857.yaml similarity index 100% rename from .changes/unreleased/Features-20220817-154857.yaml rename to .changes/1.4.0/Features-20220817-154857.yaml diff --git a/.changes/unreleased/Features-20220912-125935.yaml b/.changes/1.4.0/Features-20220912-125935.yaml similarity index 100% rename from .changes/unreleased/Features-20220912-125935.yaml rename to .changes/1.4.0/Features-20220912-125935.yaml diff --git a/.changes/unreleased/Features-20220914-095625.yaml b/.changes/1.4.0/Features-20220914-095625.yaml similarity index 100% rename from .changes/unreleased/Features-20220914-095625.yaml rename to .changes/1.4.0/Features-20220914-095625.yaml diff --git a/.changes/unreleased/Features-20220925-211651.yaml b/.changes/1.4.0/Features-20220925-211651.yaml similarity index 100% rename from .changes/unreleased/Features-20220925-211651.yaml rename to .changes/1.4.0/Features-20220925-211651.yaml diff --git a/.changes/unreleased/Features-20221003-110705.yaml b/.changes/1.4.0/Features-20221003-110705.yaml similarity index 100% rename from .changes/unreleased/Features-20221003-110705.yaml rename to .changes/1.4.0/Features-20221003-110705.yaml diff --git a/.changes/unreleased/Features-20221102-150003.yaml b/.changes/1.4.0/Features-20221102-150003.yaml similarity index 100% rename from .changes/unreleased/Features-20221102-150003.yaml rename to .changes/1.4.0/Features-20221102-150003.yaml diff --git a/.changes/unreleased/Features-20221107-105018.yaml b/.changes/1.4.0/Features-20221107-105018.yaml similarity index 100% rename from .changes/unreleased/Features-20221107-105018.yaml rename to .changes/1.4.0/Features-20221107-105018.yaml diff --git a/.changes/unreleased/Features-20221114-185207.yaml b/.changes/1.4.0/Features-20221114-185207.yaml similarity index 100% rename from .changes/unreleased/Features-20221114-185207.yaml rename to .changes/1.4.0/Features-20221114-185207.yaml diff --git a/.changes/unreleased/Features-20221130-112913.yaml b/.changes/1.4.0/Features-20221130-112913.yaml similarity index 100% rename from .changes/unreleased/Features-20221130-112913.yaml rename to .changes/1.4.0/Features-20221130-112913.yaml diff --git a/.changes/unreleased/Features-20221206-150704.yaml b/.changes/1.4.0/Features-20221206-150704.yaml similarity index 100% rename from .changes/unreleased/Features-20221206-150704.yaml rename to .changes/1.4.0/Features-20221206-150704.yaml diff --git a/.changes/unreleased/Fixes-20220916-104854.yaml b/.changes/1.4.0/Fixes-20220916-104854.yaml similarity index 100% rename from .changes/unreleased/Fixes-20220916-104854.yaml rename to .changes/1.4.0/Fixes-20220916-104854.yaml diff --git a/.changes/unreleased/Fixes-20221010-113218.yaml b/.changes/1.4.0/Fixes-20221010-113218.yaml similarity index 100% rename from .changes/unreleased/Fixes-20221010-113218.yaml rename to .changes/1.4.0/Fixes-20221010-113218.yaml diff --git a/.changes/unreleased/Fixes-20221011-160715.yaml b/.changes/1.4.0/Fixes-20221011-160715.yaml similarity index 100% rename from .changes/unreleased/Fixes-20221011-160715.yaml rename to .changes/1.4.0/Fixes-20221011-160715.yaml diff --git a/.changes/unreleased/Fixes-20221016-173742.yaml b/.changes/1.4.0/Fixes-20221016-173742.yaml similarity index 100% rename from .changes/unreleased/Fixes-20221016-173742.yaml rename to .changes/1.4.0/Fixes-20221016-173742.yaml diff --git a/.changes/unreleased/Fixes-20221107-095314.yaml b/.changes/1.4.0/Fixes-20221107-095314.yaml similarity index 100% rename from .changes/unreleased/Fixes-20221107-095314.yaml rename to .changes/1.4.0/Fixes-20221107-095314.yaml diff --git a/.changes/unreleased/Fixes-20221115-081021.yaml b/.changes/1.4.0/Fixes-20221115-081021.yaml similarity index 100% rename from .changes/unreleased/Fixes-20221115-081021.yaml rename to .changes/1.4.0/Fixes-20221115-081021.yaml diff --git a/.changes/unreleased/Fixes-20221124-163419.yaml b/.changes/1.4.0/Fixes-20221124-163419.yaml similarity index 100% rename from .changes/unreleased/Fixes-20221124-163419.yaml rename to .changes/1.4.0/Fixes-20221124-163419.yaml diff --git a/.changes/unreleased/Fixes-20221202-164859.yaml b/.changes/1.4.0/Fixes-20221202-164859.yaml similarity index 100% rename from .changes/unreleased/Fixes-20221202-164859.yaml rename to .changes/1.4.0/Fixes-20221202-164859.yaml diff --git a/.changes/unreleased/Under the Hood-20220927-194259.yaml b/.changes/1.4.0/Under the Hood-20220927-194259.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20220927-194259.yaml rename to .changes/1.4.0/Under the Hood-20220927-194259.yaml diff --git a/.changes/unreleased/Under the Hood-20220929-134406.yaml b/.changes/1.4.0/Under the Hood-20220929-134406.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20220929-134406.yaml rename to .changes/1.4.0/Under the Hood-20220929-134406.yaml diff --git a/.changes/unreleased/Under the Hood-20221005-120310.yaml b/.changes/1.4.0/Under the Hood-20221005-120310.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221005-120310.yaml rename to .changes/1.4.0/Under the Hood-20221005-120310.yaml diff --git a/.changes/unreleased/Under the Hood-20221007-094627.yaml b/.changes/1.4.0/Under the Hood-20221007-094627.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221007-094627.yaml rename to .changes/1.4.0/Under the Hood-20221007-094627.yaml diff --git a/.changes/unreleased/Under the Hood-20221007-140044.yaml b/.changes/1.4.0/Under the Hood-20221007-140044.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221007-140044.yaml rename to .changes/1.4.0/Under the Hood-20221007-140044.yaml diff --git a/.changes/unreleased/Under the Hood-20221013-181912.yaml b/.changes/1.4.0/Under the Hood-20221013-181912.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221013-181912.yaml rename to .changes/1.4.0/Under the Hood-20221013-181912.yaml diff --git a/.changes/unreleased/Under the Hood-20221017-151511.yaml b/.changes/1.4.0/Under the Hood-20221017-151511.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221017-151511.yaml rename to .changes/1.4.0/Under the Hood-20221017-151511.yaml diff --git a/.changes/unreleased/Under the Hood-20221017-155844.yaml b/.changes/1.4.0/Under the Hood-20221017-155844.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221017-155844.yaml rename to .changes/1.4.0/Under the Hood-20221017-155844.yaml diff --git a/.changes/unreleased/Under the Hood-20221028-104837.yaml b/.changes/1.4.0/Under the Hood-20221028-104837.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221028-104837.yaml rename to .changes/1.4.0/Under the Hood-20221028-104837.yaml diff --git a/.changes/unreleased/Under the Hood-20221028-110344.yaml b/.changes/1.4.0/Under the Hood-20221028-110344.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221028-110344.yaml rename to .changes/1.4.0/Under the Hood-20221028-110344.yaml diff --git a/.changes/unreleased/Under the Hood-20221108-074550.yaml b/.changes/1.4.0/Under the Hood-20221108-074550.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221108-074550.yaml rename to .changes/1.4.0/Under the Hood-20221108-074550.yaml diff --git a/.changes/unreleased/Under the Hood-20221108-115633.yaml b/.changes/1.4.0/Under the Hood-20221108-115633.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221108-115633.yaml rename to .changes/1.4.0/Under the Hood-20221108-115633.yaml diff --git a/.changes/unreleased/Under the Hood-20221108-133104.yaml b/.changes/1.4.0/Under the Hood-20221108-133104.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221108-133104.yaml rename to .changes/1.4.0/Under the Hood-20221108-133104.yaml diff --git a/.changes/unreleased/Under the Hood-20221116-130037.yaml b/.changes/1.4.0/Under the Hood-20221116-130037.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221116-130037.yaml rename to .changes/1.4.0/Under the Hood-20221116-130037.yaml diff --git a/.changes/unreleased/Under the Hood-20221118-145717.yaml b/.changes/1.4.0/Under the Hood-20221118-145717.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221118-145717.yaml rename to .changes/1.4.0/Under the Hood-20221118-145717.yaml diff --git a/.changes/unreleased/Under the Hood-20221205-164948.yaml b/.changes/1.4.0/Under the Hood-20221205-164948.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221205-164948.yaml rename to .changes/1.4.0/Under the Hood-20221205-164948.yaml diff --git a/.changes/unreleased/Under the Hood-20221206-094015.yaml b/.changes/1.4.0/Under the Hood-20221206-094015.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221206-094015.yaml rename to .changes/1.4.0/Under the Hood-20221206-094015.yaml diff --git a/.changes/unreleased/Under the Hood-20221206-113053.yaml b/.changes/1.4.0/Under the Hood-20221206-113053.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221206-113053.yaml rename to .changes/1.4.0/Under the Hood-20221206-113053.yaml diff --git a/.changes/unreleased/Under the Hood-20221211-214240.yaml b/.changes/1.4.0/Under the Hood-20221211-214240.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221211-214240.yaml rename to .changes/1.4.0/Under the Hood-20221211-214240.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 039de921800..edc845a9e55 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,94 @@ - "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version. - Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry) +## dbt-core 1.4.0-b1 - December 14, 2022 + +### Features + +- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) +- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) +- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) +- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) + +### Fixes + +- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) +- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) +- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) +- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) +- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) + +### Docs + +- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) +- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) +- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) + +### Under the Hood + +- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) +- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) +- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) +- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) +- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) +- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) +- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) +- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) +- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) +- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) +- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) +- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) +- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) +- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) + +### Dependencies + +- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) +- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) +- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) +- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) + +### Dependency + +- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) + +### Contributors +- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) +- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) +- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) +- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) +- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) +- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) +- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) +- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) +- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) +- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) +- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) +- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) +- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) +- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) +- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) +- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) + ## Previous Releases diff --git a/core/dbt/version.py b/core/dbt/version.py index 65b3a08c476..d668a902ae6 100644 --- a/core/dbt/version.py +++ b/core/dbt/version.py @@ -235,5 +235,5 @@ def _get_adapter_plugin_names() -> Iterator[str]: yield plugin_name -__version__ = "1.4.0a1" +__version__ = "1.4.0b1" installed = get_installed_version() diff --git a/core/setup.py b/core/setup.py index 719dd000329..241a70ab6bb 100644 --- a/core/setup.py +++ b/core/setup.py @@ -25,7 +25,7 @@ package_name = "dbt-core" -package_version = "1.4.0a1" +package_version = "1.4.0b1" description = """With dbt, data analysts and engineers can build analytics \ the way engineers build applications.""" diff --git a/docker/Dockerfile b/docker/Dockerfile index 8d3756ca786..72332c35de9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -14,12 +14,12 @@ FROM --platform=$build_for python:3.10.7-slim-bullseye as base # N.B. The refs updated automagically every release via bumpversion # N.B. dbt-postgres is currently found in the core codebase so a value of dbt-core@ is correct -ARG dbt_core_ref=dbt-core@v1.4.0a1 -ARG dbt_postgres_ref=dbt-core@v1.4.0a1 -ARG dbt_redshift_ref=dbt-redshift@v1.4.0a1 -ARG dbt_bigquery_ref=dbt-bigquery@v1.4.0a1 -ARG dbt_snowflake_ref=dbt-snowflake@v1.4.0a1 -ARG dbt_spark_ref=dbt-spark@v1.4.0a1 +ARG dbt_core_ref=dbt-core@v1.4.0b1 +ARG dbt_postgres_ref=dbt-core@v1.4.0b1 +ARG dbt_redshift_ref=dbt-redshift@v1.4.0b1 +ARG dbt_bigquery_ref=dbt-bigquery@v1.4.0b1 +ARG dbt_snowflake_ref=dbt-snowflake@v1.4.0b1 +ARG dbt_spark_ref=dbt-spark@v1.4.0b1 # special case args ARG dbt_spark_version=all ARG dbt_third_party diff --git a/plugins/postgres/dbt/adapters/postgres/__version__.py b/plugins/postgres/dbt/adapters/postgres/__version__.py index 70ba273f562..27cfeecd9e8 100644 --- a/plugins/postgres/dbt/adapters/postgres/__version__.py +++ b/plugins/postgres/dbt/adapters/postgres/__version__.py @@ -1 +1 @@ -version = "1.4.0a1" +version = "1.4.0b1" diff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py index 3511c96f9fa..00a91759aec 100644 --- a/plugins/postgres/setup.py +++ b/plugins/postgres/setup.py @@ -41,7 +41,7 @@ def _dbt_psycopg2_name(): package_name = "dbt-postgres" -package_version = "1.4.0a1" +package_version = "1.4.0b1" description = """The postgres adapter plugin for dbt (data build tool)""" this_directory = os.path.abspath(os.path.dirname(__file__)) diff --git a/tests/adapter/dbt/tests/adapter/__version__.py b/tests/adapter/dbt/tests/adapter/__version__.py index 70ba273f562..27cfeecd9e8 100644 --- a/tests/adapter/dbt/tests/adapter/__version__.py +++ b/tests/adapter/dbt/tests/adapter/__version__.py @@ -1 +1 @@ -version = "1.4.0a1" +version = "1.4.0b1" diff --git a/tests/adapter/setup.py b/tests/adapter/setup.py index ddb664d6989..f9ac627e445 100644 --- a/tests/adapter/setup.py +++ b/tests/adapter/setup.py @@ -20,7 +20,7 @@ package_name = "dbt-tests-adapter" -package_version = "1.4.0a1" +package_version = "1.4.0b1" description = """The dbt adapter tests for adapter plugins""" this_directory = os.path.abspath(os.path.dirname(__file__)) From 5e4e917de5f4da7b24862a2d9b14a2709686e3ff Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Wed, 14 Dec 2022 11:13:34 -0500 Subject: [PATCH 067/156] =?UTF-8?q?CT-1685:=20Restore=20certain=20aspects?= =?UTF-8?q?=20of=20legacy=20logging=20behavior=20important=E2=80=A6=20(#64?= =?UTF-8?q?43)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * CT-1685: Restore certain aspects of legacy logging behavior important to dbt-rpc * CT-1658: And changelog entry --- .../Under the Hood-20221213-214106.yaml | 7 ++++ core/dbt/events/eventmgr.py | 32 +++++++++++++++++-- core/dbt/events/functions.py | 14 +++++--- 3 files changed, 46 insertions(+), 7 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221213-214106.yaml diff --git a/.changes/unreleased/Under the Hood-20221213-214106.yaml b/.changes/unreleased/Under the Hood-20221213-214106.yaml new file mode 100644 index 00000000000..708c84661d6 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221213-214106.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Restore important legacy logging behaviors, following refactor which removed + them +time: 2022-12-13T21:41:06.815133-05:00 +custom: + Author: peterallenwebb + Issue: "6437" diff --git a/core/dbt/events/eventmgr.py b/core/dbt/events/eventmgr.py index c2c922ab5a8..97a7d5d4360 100644 --- a/core/dbt/events/eventmgr.py +++ b/core/dbt/events/eventmgr.py @@ -47,6 +47,33 @@ class LineFormat(Enum): } +# We should consider fixing the problem, but log_level() can return a string for +# DynamicLevel events, even thought it is supposed to return an EventLevel. This +# function gets a string for the level, no matter what. +def _get_level_str(e: BaseEvent) -> str: + return e.log_level().value if isinstance(e.log_level(), EventLevel) else str(e.log_level()) + + +# We need this function for now because the numeric log severity levels in +# Python do not match those for logbook, so we have to explicitly call the +# correct function by name. +def send_to_logger(l, level: str, log_line: str): + if level == "test": + l.debug(log_line) + elif level == "debug": + l.debug(log_line) + elif level == "info": + l.info(log_line) + elif level == "warn": + l.warning(log_line) + elif level == "error": + l.error(log_line) + else: + raise AssertionError( + f"While attempting to log {log_line}, encountered the unhandled level: {level}" + ) + + @dataclass class LoggerConfig: name: str @@ -93,7 +120,7 @@ def write_line(self, e: BaseEvent): line = self.create_line(e) python_level = _log_level_map[e.log_level()] if self._python_logger is not None: - self._python_logger.log(python_level, line) + send_to_logger(self._python_logger, _get_level_str(e), line) elif self._stream is not None and _log_level_map[self.level] <= python_level: self._stream.write(line + "\n") @@ -128,8 +155,7 @@ def create_debug_line(self, e: BaseEvent) -> str: log_line = f"\n\n{separator} {datetime.utcnow()} | {self.event_manager.invocation_id} {separator}\n" ts: str = datetime.utcnow().strftime("%H:%M:%S.%f") scrubbed_msg: str = self.scrubber(e.message()) # type: ignore - # log_level() for DynamicLevel events returns str instead of EventLevel - level = e.log_level().value if isinstance(e.log_level(), EventLevel) else e.log_level() + level = _get_level_str(e) log_line += ( f"{self._get_color_tag()}{ts} [{level:<5}]{self._get_thread_name()} {scrubbed_msg}" ) diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index c9d82b9036a..bfdf9cf2714 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -18,10 +18,6 @@ LOG_VERSION = 3 metadata_vars: Optional[Dict[str, str]] = None -# The default event manager will not log anything, but some tests run code that -# generates events, without configuring the event manager. -EVENT_MANAGER: EventManager = EventManager() - def setup_event_logger(log_path: str, level_override: Optional[EventLevel] = None): cleanup_event_logger() @@ -114,6 +110,16 @@ def cleanup_event_logger(): EVENT_MANAGER.callbacks.clear() +# The default event manager will not log anything, but some tests run code that +# generates events, without configuring the event manager, so we create an empty +# manager here until there is a better testing strategy in place. +EVENT_MANAGER: EventManager = EventManager() + +# Since dbt-rpc does not do its own log setup, we set up logbook if legacy +# logging is enabled. +if flags.ENABLE_LEGACY_LOGGER: + EVENT_MANAGER.add_logger(_get_logbook_log_config(None)) + # This global, and the following two functions for capturing stdout logs are # an unpleasant hack we intend to remove as part of API-ification. The GitHub # issue #6350 was opened for that work. From 7e90e067afab662842be8e200fff0f7753564681 Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Wed, 14 Dec 2022 17:04:50 -0500 Subject: [PATCH 068/156] Paw/ct 1652 restore default logging (#6447) * CT-1652: Restore stdout logging before logger is fully configured * CT-1652: Add changelog entry * CT-1652: formatting fix for black --- .changes/unreleased/Fixes-20221214-155307.yaml | 7 +++++++ core/dbt/events/functions.py | 17 ++++++++--------- 2 files changed, 15 insertions(+), 9 deletions(-) create mode 100644 .changes/unreleased/Fixes-20221214-155307.yaml diff --git a/.changes/unreleased/Fixes-20221214-155307.yaml b/.changes/unreleased/Fixes-20221214-155307.yaml new file mode 100644 index 00000000000..cb37e0a809c --- /dev/null +++ b/.changes/unreleased/Fixes-20221214-155307.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Repair a regression which prevented basic logging before the logging subsystem + is completely configured. +time: 2022-12-14T15:53:07.396512-05:00 +custom: + Author: peterallenwebb + Issue: "6434" diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index bfdf9cf2714..36dd2e9ba79 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -38,7 +38,7 @@ def setup_event_logger(log_path: str, level_override: Optional[EventLevel] = Non EVENT_MANAGER.add_logger(_get_logfile_config(os.path.join(log_path, "dbt.log"))) -def _get_stdout_config(level: Optional[EventLevel]) -> LoggerConfig: +def _get_stdout_config(level: Optional[EventLevel] = None) -> LoggerConfig: fmt = LineFormat.PlainText if flags.LOG_FORMAT == "json": fmt = LineFormat.Json @@ -90,7 +90,7 @@ def _logfile_filter(log_cache_events: bool, evt: BaseEvent) -> bool: ) -def _get_logbook_log_config(level: Optional[EventLevel]) -> LoggerConfig: +def _get_logbook_log_config(level: Optional[EventLevel] = None) -> LoggerConfig: config = _get_stdout_config(level) config.name = "logbook_log" config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e, Cache) @@ -110,15 +110,14 @@ def cleanup_event_logger(): EVENT_MANAGER.callbacks.clear() -# The default event manager will not log anything, but some tests run code that -# generates events, without configuring the event manager, so we create an empty -# manager here until there is a better testing strategy in place. +# Since dbt-rpc does not do its own log setup, and since some events can +# currently fire before logs can be configured by setup_event_logger(), we +# create a default configuration with default settings and no file output. EVENT_MANAGER: EventManager = EventManager() +EVENT_MANAGER.add_logger( + _get_logbook_log_config() if flags.ENABLE_LEGACY_LOGGER else _get_stdout_config() +) -# Since dbt-rpc does not do its own log setup, we set up logbook if legacy -# logging is enabled. -if flags.ENABLE_LEGACY_LOGGER: - EVENT_MANAGER.add_logger(_get_logbook_log_config(None)) # This global, and the following two functions for capturing stdout logs are # an unpleasant hack we intend to remove as part of API-ification. The GitHub From e8da84fb9e177d9eee3d7722d3d6906bb283183d Mon Sep 17 00:00:00 2001 From: dave-connors-3 <73915542+dave-connors-3@users.noreply.github.com> Date: Wed, 14 Dec 2022 16:07:41 -0600 Subject: [PATCH 069/156] Feature/support incremental predicates (#5702) * pass predicated to merge strategy * postgres delete and insert * merge with predicates * update to use arbitrary list of predicates, not dictionaries, merge and delete * changie * add functional test to adapter zone * comma in test config * add test for incremental predicates delete and insert postgres * update test structure for inheritance * handle predicates config for backwards compatibility * test for predicates keyword * Add generated CLI API docs Co-authored-by: Colin Co-authored-by: Github Build Bot --- .../unreleased/Features-20220823-085727.yaml | 7 + .../docs/build/doctrees/environment.pickle | Bin 65160 -> 65160 bytes .../models/incremental/incremental.sql | 4 +- .../models/incremental/merge.sql | 30 ++-- .../models/incremental/strategies.sql | 6 +- .../test_incremental_predicates.py | 154 ++++++++++++++++++ 6 files changed, 186 insertions(+), 15 deletions(-) create mode 100644 .changes/unreleased/Features-20220823-085727.yaml create mode 100644 tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py diff --git a/.changes/unreleased/Features-20220823-085727.yaml b/.changes/unreleased/Features-20220823-085727.yaml new file mode 100644 index 00000000000..4d8daebbf5e --- /dev/null +++ b/.changes/unreleased/Features-20220823-085727.yaml @@ -0,0 +1,7 @@ +kind: Features +body: incremental predicates +time: 2022-08-23T08:57:27.640804-05:00 +custom: + Author: dave-connors-3 + Issue: "5680" + PR: "5702" diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 73d18c236adbabe5cec4cc557ff682bc63a5baf8..8aaad5e25b0b97cc741c122d6608193f2544081f 100644 GIT binary patch delta 21 dcmeD9%iQsodBZz-wr3rpJnrn9zsOIy2LNvp3F!a; delta 21 dcmeD9%iQsodBZz-w!7cjb+rmNf03VZ4*+?#3oZZv diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql index 602067616d2..e8ff5c1ea4f 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/incremental.sql @@ -50,9 +50,9 @@ {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#} {% set incremental_strategy = config.get('incremental_strategy') or 'default' %} - {% set incremental_predicates = config.get('incremental_predicates', none) %} + {% set incremental_predicates = config.get('predicates', none) or config.get('incremental_predicates', none) %} {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %} - {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'predicates': incremental_predicates }) %} + {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'incremental_predicates': incremental_predicates }) %} {% set build_sql = strategy_sql_macro_func(strategy_arg_dict) %} {% endif %} diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql index 836d768d01a..5033178be49 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql @@ -1,9 +1,9 @@ -{% macro get_merge_sql(target, source, unique_key, dest_columns, predicates=none) -%} - {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, predicates) }} +{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} + {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }} {%- endmacro %} -{% macro default__get_merge_sql(target, source, unique_key, dest_columns, predicates) -%} - {%- set predicates = [] if predicates is none else [] + predicates -%} +{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} + {%- set predicates = [] if incremental_predicates is none else [] + incremental_predicates -%} {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%} {%- set merge_update_columns = config.get('merge_update_columns') -%} {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%} @@ -32,7 +32,7 @@ merge into {{ target }} as DBT_INTERNAL_DEST using {{ source }} as DBT_INTERNAL_SOURCE - on {{ predicates | join(' and ') }} + on {{"(" ~ predicates | join(") and (") ~ ")"}} {% if unique_key %} when matched then update set @@ -50,11 +50,11 @@ {% endmacro %} -{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%} - {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns) }} +{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} + {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }} {%- endmacro %} -{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns) -%} +{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%} @@ -65,8 +65,13 @@ where ( {% for key in unique_key %} {{ source }}.{{ key }} = {{ target }}.{{ key }} - {{ "and " if not loop.last }} + {{ "and " if not loop.last}} {% endfor %} + {% if incremental_predicates %} + {% for predicate in incremental_predicates %} + and {{ predicate }} + {% endfor %} + {% endif %} ); {% else %} delete from {{ target }} @@ -74,7 +79,12 @@ {{ unique_key }}) in ( select ({{ unique_key }}) from {{ source }} - ); + ) + {%- if incremental_predicates %} + {% for predicate in incremental_predicates %} + and {{ predicate }} + {% endfor %} + {%- endif -%}; {% endif %} {% endif %} diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql index 5226d01de16..72082ccad32 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/strategies.sql @@ -21,7 +21,7 @@ {% macro default__get_incremental_delete_insert_sql(arg_dict) %} - {% do return(get_delete_insert_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"])) %} + {% do return(get_delete_insert_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"], arg_dict["incremental_predicates"])) %} {% endmacro %} @@ -35,7 +35,7 @@ {% macro default__get_incremental_merge_sql(arg_dict) %} - {% do return(get_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"])) %} + {% do return(get_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["unique_key"], arg_dict["dest_columns"], arg_dict["incremental_predicates"])) %} {% endmacro %} @@ -48,7 +48,7 @@ {% macro default__get_incremental_insert_overwrite_sql(arg_dict) %} - {% do return(get_insert_overwrite_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["dest_columns"], arg_dict["predicates"])) %} + {% do return(get_insert_overwrite_merge_sql(arg_dict["target_relation"], arg_dict["temp_relation"], arg_dict["dest_columns"], arg_dict["incremental_predicates"])) %} {% endmacro %} diff --git a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py new file mode 100644 index 00000000000..11a4b6c0384 --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py @@ -0,0 +1,154 @@ +import pytest +from dbt.tests.util import run_dbt, check_relations_equal +from collections import namedtuple + + +models__delete_insert_incremental_predicates_sql = """ +{{ config( + materialized = 'incremental', + unique_key = 'id' +) }} + +{% if not is_incremental() %} + +select 1 as id, 'hello' as msg, 'blue' as color +union all +select 2 as id, 'goodbye' as msg, 'red' as color + +{% else %} + +-- delete will not happen on the above record where id = 2, so new record will be inserted instead +select 1 as id, 'hey' as msg, 'blue' as color +union all +select 2 as id, 'yo' as msg, 'green' as color +union all +select 3 as id, 'anyway' as msg, 'purple' as color + +{% endif %} +""" + +seeds__expected_delete_insert_incremental_predicates_csv = """id,msg,color +1,hey,blue +2,goodbye,red +2,yo,green +3,anyway,purple +""" + +ResultHolder = namedtuple( + "ResultHolder", + [ + "seed_count", + "model_count", + "seed_rows", + "inc_test_model_count", + "opt_model_count", + "relation", + ], +) + + +class BaseIncrementalPredicates: + @pytest.fixture(scope="class") + def models(self): + return { + "delete_insert_incremental_predicates.sql": models__delete_insert_incremental_predicates_sql + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "expected_delete_insert_incremental_predicates.csv": seeds__expected_delete_insert_incremental_predicates_csv + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "+incremental_predicates": [ + "id != 2" + ], + "+incremental_strategy": "delete+insert" + } + } + + def update_incremental_model(self, incremental_model): + """update incremental model after the seed table has been updated""" + model_result_set = run_dbt(["run", "--select", incremental_model]) + return len(model_result_set) + + def get_test_fields( + self, project, seed, incremental_model, update_sql_file, opt_model_count=None + ): + + seed_count = len(run_dbt(["seed", "--select", seed, "--full-refresh"])) + + model_count = len(run_dbt(["run", "--select", incremental_model, "--full-refresh"])) + # pass on kwarg + relation = incremental_model + # update seed in anticipation of incremental model update + row_count_query = "select * from {}.{}".format(project.test_schema, seed) + # project.run_sql_file(Path("seeds") / Path(update_sql_file + ".sql")) + seed_rows = len(project.run_sql(row_count_query, fetch="all")) + + # propagate seed state to incremental model according to unique keys + inc_test_model_count = self.update_incremental_model(incremental_model=incremental_model) + + return ResultHolder( + seed_count, model_count, seed_rows, inc_test_model_count, opt_model_count, relation + ) + + def check_scenario_correctness(self, expected_fields, test_case_fields, project): + """Invoke assertions to verify correct build functionality""" + # 1. test seed(s) should build afresh + assert expected_fields.seed_count == test_case_fields.seed_count + # 2. test model(s) should build afresh + assert expected_fields.model_count == test_case_fields.model_count + # 3. seeds should have intended row counts post update + assert expected_fields.seed_rows == test_case_fields.seed_rows + # 4. incremental test model(s) should be updated + assert expected_fields.inc_test_model_count == test_case_fields.inc_test_model_count + # 5. extra incremental model(s) should be built; optional since + # comparison may be between an incremental model and seed + if expected_fields.opt_model_count and test_case_fields.opt_model_count: + assert expected_fields.opt_model_count == test_case_fields.opt_model_count + # 6. result table should match intended result set (itself a relation) + check_relations_equal( + project.adapter, [expected_fields.relation, test_case_fields.relation] + ) + + def get_expected_fields(self, relation, seed_rows, opt_model_count=None): + return ResultHolder( + seed_count=1, + model_count=1, + inc_test_model_count=1, + seed_rows=seed_rows, + opt_model_count=opt_model_count, + relation=relation + ) + + # no unique_key test + def test__incremental_predicates(self, project): + """seed should match model after two incremental runs""" + + expected_fields = self.get_expected_fields(relation="expected_delete_insert_incremental_predicates", seed_rows=4) + test_case_fields = self.get_test_fields( + project, seed="expected_delete_insert_incremental_predicates", incremental_model="delete_insert_incremental_predicates", update_sql_file=None + ) + self.check_scenario_correctness(expected_fields, test_case_fields, project) + + +class TestIncrementalPredicatesDeleteInsert(BaseIncrementalPredicates): + pass + + +class TestPredicatesDeleteInsert(BaseIncrementalPredicates): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "+predicates": [ + "id != 2" + ], + "+incremental_strategy": "delete+insert" + } + } From df93858b4b1752950669f7e32cee82bccaf06263 Mon Sep 17 00:00:00 2001 From: leahwicz <60146280+leahwicz@users.noreply.github.com> Date: Thu, 15 Dec 2022 09:55:23 -0500 Subject: [PATCH 070/156] Updating 1.4.0b1 changelog (#6450) --- .changes/1.4.0-b1.md | 7 ++++++- .../{unreleased => 1.4.0}/Features-20220823-085727.yaml | 0 .changes/{unreleased => 1.4.0}/Fixes-20221213-112620.yaml | 0 .changes/{unreleased => 1.4.0}/Fixes-20221214-155307.yaml | 0 .../Under the Hood-20221213-214106.yaml | 0 CHANGELOG.md | 7 ++++++- 6 files changed, 12 insertions(+), 2 deletions(-) rename .changes/{unreleased => 1.4.0}/Features-20220823-085727.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20221213-112620.yaml (100%) rename .changes/{unreleased => 1.4.0}/Fixes-20221214-155307.yaml (100%) rename .changes/{unreleased => 1.4.0}/Under the Hood-20221213-214106.yaml (100%) diff --git a/.changes/1.4.0-b1.md b/.changes/1.4.0-b1.md index 747aba542dd..d4005636890 100644 --- a/.changes/1.4.0-b1.md +++ b/.changes/1.4.0-b1.md @@ -1,4 +1,4 @@ -## dbt-core 1.4.0-b1 - December 14, 2022 +## dbt-core 1.4.0-b1 - December 15, 2022 ### Features @@ -13,6 +13,7 @@ - Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) - Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) - Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) ### Fixes @@ -24,6 +25,8 @@ - Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) - After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) - Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) +- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) +- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) ### Docs @@ -54,6 +57,7 @@ - Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) - Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) - Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) +- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) ### Dependencies @@ -85,3 +89,4 @@ - [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) - [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) - [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) diff --git a/.changes/unreleased/Features-20220823-085727.yaml b/.changes/1.4.0/Features-20220823-085727.yaml similarity index 100% rename from .changes/unreleased/Features-20220823-085727.yaml rename to .changes/1.4.0/Features-20220823-085727.yaml diff --git a/.changes/unreleased/Fixes-20221213-112620.yaml b/.changes/1.4.0/Fixes-20221213-112620.yaml similarity index 100% rename from .changes/unreleased/Fixes-20221213-112620.yaml rename to .changes/1.4.0/Fixes-20221213-112620.yaml diff --git a/.changes/unreleased/Fixes-20221214-155307.yaml b/.changes/1.4.0/Fixes-20221214-155307.yaml similarity index 100% rename from .changes/unreleased/Fixes-20221214-155307.yaml rename to .changes/1.4.0/Fixes-20221214-155307.yaml diff --git a/.changes/unreleased/Under the Hood-20221213-214106.yaml b/.changes/1.4.0/Under the Hood-20221213-214106.yaml similarity index 100% rename from .changes/unreleased/Under the Hood-20221213-214106.yaml rename to .changes/1.4.0/Under the Hood-20221213-214106.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index edc845a9e55..338814ed03a 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ - "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version. - Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry) -## dbt-core 1.4.0-b1 - December 14, 2022 +## dbt-core 1.4.0-b1 - December 15, 2022 ### Features @@ -20,6 +20,7 @@ - Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) - Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) - Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) +- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) ### Fixes @@ -31,6 +32,8 @@ - Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) - After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) - Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) +- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) +- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) ### Docs @@ -61,6 +64,7 @@ - Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) - Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) - Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) +- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) ### Dependencies @@ -92,6 +96,7 @@ - [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) - [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) - [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) +- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) ## Previous Releases From b9bdb775aba3a9b658aa0086e40e299d126fe04e Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Mon, 19 Dec 2022 13:13:38 -0600 Subject: [PATCH 071/156] Fix dependency changelog kind (#6452) --- .changes/1.4.0-b1.md | 3 --- .changes/1.4.0/Dependency-20221205-002118.yaml | 2 +- CHANGELOG.md | 3 --- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/.changes/1.4.0-b1.md b/.changes/1.4.0-b1.md index d4005636890..b2a0e96827c 100644 --- a/.changes/1.4.0-b1.md +++ b/.changes/1.4.0-b1.md @@ -65,9 +65,6 @@ - Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) - Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) - Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) - -### Dependency - - Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) ### Contributors diff --git a/.changes/1.4.0/Dependency-20221205-002118.yaml b/.changes/1.4.0/Dependency-20221205-002118.yaml index b1e1ae1a6cd..f4203a5285c 100644 --- a/.changes/1.4.0/Dependency-20221205-002118.yaml +++ b/.changes/1.4.0/Dependency-20221205-002118.yaml @@ -1,4 +1,4 @@ -kind: "Dependency" +kind: "Dependencies" body: "Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core" time: 2022-12-05T00:21:18.00000Z custom: diff --git a/CHANGELOG.md b/CHANGELOG.md index 338814ed03a..4a91696f68b 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,9 +72,6 @@ - Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) - Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) - Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) - -### Dependency - - Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) ### Contributors From 304797b099d3c7150ddd8ecc425ac5e872e0410b Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Mon, 19 Dec 2022 19:36:24 -0600 Subject: [PATCH 072/156] exception cleanup (#6347) * starting to move jinja exceptions * convert some exceptions * add back old functions for backward compatibility * organize * more conversions * more conversions * add changelog * split out CacheInconsistency * more conversions * convert even more * convert parsingexceptions * fix tests * more conversions * more conversions * finish converting exception functions * convert more tests * standardize to msg * remove some TODOs * fix test param and check the rest * add comment, move exceptions * add types * fix type errors * fix type for adapter_response * remove 0.13 version from message --- .../Breaking Changes-20221205-141937.yaml | 9 + core/dbt/adapters/base/impl.py | 70 +- core/dbt/adapters/base/relation.py | 6 +- core/dbt/adapters/cache.py | 46 +- core/dbt/adapters/sql/impl.py | 7 +- core/dbt/clients/_jinja_blocks.py | 55 +- core/dbt/clients/git.py | 25 +- core/dbt/clients/jinja.py | 24 +- core/dbt/clients/jinja_static.py | 12 +- core/dbt/clients/system.py | 3 +- core/dbt/compilation.py | 4 +- core/dbt/config/profile.py | 16 +- core/dbt/config/project.py | 22 +- core/dbt/config/runtime.py | 24 +- core/dbt/config/utils.py | 8 +- core/dbt/context/base.py | 24 +- core/dbt/context/configured.py | 7 +- core/dbt/context/docs.py | 10 +- core/dbt/context/exceptions_jinja.py | 142 + core/dbt/context/macro_resolver.py | 6 +- core/dbt/context/macros.py | 6 +- core/dbt/context/providers.py | 120 +- core/dbt/context/secret.py | 5 +- core/dbt/contracts/graph/manifest.py | 36 +- core/dbt/contracts/relation.py | 6 +- core/dbt/deps/git.py | 7 +- core/dbt/deps/registry.py | 10 +- core/dbt/deps/resolver.py | 24 +- core/dbt/events/functions.py | 5 +- core/dbt/exceptions.py | 2410 +++++++++++++---- core/dbt/parser/base.py | 8 +- core/dbt/parser/generic_test_builders.py | 71 +- core/dbt/parser/manifest.py | 10 +- core/dbt/parser/models.py | 21 +- core/dbt/parser/schemas.py | 91 +- core/dbt/parser/snapshots.py | 4 +- core/dbt/task/generate.py | 4 +- core/dbt/task/run.py | 6 +- core/dbt/task/runnable.py | 2 +- core/dbt/task/test.py | 10 +- core/dbt/utils.py | 4 +- .../postgres/dbt/adapters/postgres/impl.py | 31 +- .../duplicates/test_duplicate_model.py | 4 +- .../functional/exit_codes/test_exit_codes.py | 2 +- .../schema_tests/test_schema_v2_tests.py | 6 +- 45 files changed, 2349 insertions(+), 1074 deletions(-) create mode 100644 .changes/unreleased/Breaking Changes-20221205-141937.yaml create mode 100644 core/dbt/context/exceptions_jinja.py diff --git a/.changes/unreleased/Breaking Changes-20221205-141937.yaml b/.changes/unreleased/Breaking Changes-20221205-141937.yaml new file mode 100644 index 00000000000..be840b20a99 --- /dev/null +++ b/.changes/unreleased/Breaking Changes-20221205-141937.yaml @@ -0,0 +1,9 @@ +kind: Breaking Changes +body: Cleaned up exceptions to directly raise in code. Removed use of all exception + functions in the code base and marked them all as deprecated to be removed next + minor release. +time: 2022-12-05T14:19:37.863032-06:00 +custom: + Author: emmyoop + Issue: "6339" + PR: "6347" diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index bbac18cb16b..64ebbeac5dd 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -22,13 +22,20 @@ import pytz from dbt.exceptions import ( - raise_database_error, - raise_compiler_error, - invalid_type_error, - get_relation_returned_multiple_results, InternalException, + InvalidMacroArgType, + InvalidMacroResult, + InvalidQuoteConfigType, NotImplementedException, + NullRelationCacheAttempted, + NullRelationDropAttempted, + RelationReturnedMultipleResults, + RenameToNoneAttempted, RuntimeException, + SnapshotTargetIncomplete, + SnapshotTargetNotSnapshotTable, + UnexpectedNull, + UnexpectedNonTimestamp, ) from dbt.adapters.protocol import ( @@ -97,18 +104,10 @@ def _utc(dt: Optional[datetime], source: BaseRelation, field_name: str) -> datet assume the datetime is already for UTC and add the timezone. """ if dt is None: - raise raise_database_error( - "Expected a non-null value when querying field '{}' of table " - " {} but received value 'null' instead".format(field_name, source) - ) + raise UnexpectedNull(field_name, source) elif not hasattr(dt, "tzinfo"): - raise raise_database_error( - "Expected a timestamp value when querying field '{}' of table " - "{} but received value of type '{}' instead".format( - field_name, source, type(dt).__name__ - ) - ) + raise UnexpectedNonTimestamp(field_name, source, dt) elif dt.tzinfo: return dt.astimezone(pytz.UTC) @@ -434,7 +433,7 @@ def cache_added(self, relation: Optional[BaseRelation]) -> str: """Cache a new relation in dbt. It will show up in `list relations`.""" if relation is None: name = self.nice_connection_name() - raise_compiler_error("Attempted to cache a null relation for {}".format(name)) + raise NullRelationCacheAttempted(name) self.cache.add(relation) # so jinja doesn't render things return "" @@ -446,7 +445,7 @@ def cache_dropped(self, relation: Optional[BaseRelation]) -> str: """ if relation is None: name = self.nice_connection_name() - raise_compiler_error("Attempted to drop a null relation for {}".format(name)) + raise NullRelationDropAttempted(name) self.cache.drop(relation) return "" @@ -463,9 +462,7 @@ def cache_renamed( name = self.nice_connection_name() src_name = _relation_name(from_relation) dst_name = _relation_name(to_relation) - raise_compiler_error( - "Attempted to rename {} to {} for {}".format(src_name, dst_name, name) - ) + raise RenameToNoneAttempted(src_name, dst_name, name) self.cache.rename(from_relation, to_relation) return "" @@ -615,7 +612,7 @@ def get_missing_columns( to_relation. """ if not isinstance(from_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="get_missing_columns", arg_name="from_relation", got_value=from_relation, @@ -623,7 +620,7 @@ def get_missing_columns( ) if not isinstance(to_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="get_missing_columns", arg_name="to_relation", got_value=to_relation, @@ -648,7 +645,7 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: incorrect. """ if not isinstance(relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="valid_snapshot_target", arg_name="relation", got_value=relation, @@ -669,24 +666,16 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: if missing: if extra: - msg = ( - 'Snapshot target has ("{}") but not ("{}") - is it an ' - "unmigrated previous version archive?".format( - '", "'.join(extra), '", "'.join(missing) - ) - ) + raise SnapshotTargetIncomplete(extra, missing) else: - msg = 'Snapshot target is not a snapshot table (missing "{}")'.format( - '", "'.join(missing) - ) - raise_compiler_error(msg) + raise SnapshotTargetNotSnapshotTable(missing) @available.parse_none def expand_target_column_types( self, from_relation: BaseRelation, to_relation: BaseRelation ) -> None: if not isinstance(from_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="expand_target_column_types", arg_name="from_relation", got_value=from_relation, @@ -694,7 +683,7 @@ def expand_target_column_types( ) if not isinstance(to_relation, self.Relation): - invalid_type_error( + raise InvalidMacroArgType( method_name="expand_target_column_types", arg_name="to_relation", got_value=to_relation, @@ -776,7 +765,7 @@ def get_relation(self, database: str, schema: str, identifier: str) -> Optional[ "schema": schema, "database": database, } - get_relation_returned_multiple_results(kwargs, matches) + raise RelationReturnedMultipleResults(kwargs, matches) elif matches: return matches[0] @@ -840,10 +829,7 @@ def quote_seed_column(self, column: str, quote_config: Optional[bool]) -> str: elif quote_config is None: pass else: - raise_compiler_error( - f'The seed configuration value of "quote_columns" has an ' - f"invalid type {type(quote_config)}" - ) + raise InvalidQuoteConfigType(quote_config) if quote_columns: return self.quote(column) @@ -1093,11 +1079,7 @@ def calculate_freshness( # now we have a 1-row table of the maximum `loaded_at_field` value and # the current time according to the db. if len(table) != 1 or len(table[0]) != 2: - raise_compiler_error( - 'Got an invalid result from "{}" macro: {}'.format( - FRESHNESS_MACRO_NAME, [tuple(r) for r in table] - ) - ) + raise InvalidMacroResult(FRESHNESS_MACRO_NAME, table) if table[0][0] is None: # no records in the table, so really the max_loaded_at was # infinitely long ago. Just call it 0:00 January 1 year UTC diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py index 0461990c92d..5bc0c56b264 100644 --- a/core/dbt/adapters/base/relation.py +++ b/core/dbt/adapters/base/relation.py @@ -11,7 +11,7 @@ Policy, Path, ) -from dbt.exceptions import InternalException +from dbt.exceptions import ApproximateMatch, InternalException, MultipleDatabasesNotAllowed from dbt.node_types import NodeType from dbt.utils import filter_null_values, deep_merge, classproperty @@ -100,7 +100,7 @@ def matches( if approximate_match and not exact_match: target = self.create(database=database, schema=schema, identifier=identifier) - dbt.exceptions.approximate_relation_match(target, self) + raise ApproximateMatch(target, self) return exact_match @@ -438,7 +438,7 @@ def flatten(self, allow_multiple_databases: bool = False): if not allow_multiple_databases: seen = {r.database.lower() for r in self if r.database} if len(seen) > 1: - dbt.exceptions.raise_compiler_error(str(seen)) + raise MultipleDatabasesNotAllowed(seen) for information_schema_name, schema in self.search(): path = {"database": information_schema_name.database, "schema": schema} diff --git a/core/dbt/adapters/cache.py b/core/dbt/adapters/cache.py index 6c60039f262..90c4cab27fb 100644 --- a/core/dbt/adapters/cache.py +++ b/core/dbt/adapters/cache.py @@ -1,4 +1,3 @@ -import re import threading from copy import deepcopy from typing import Any, Dict, Iterable, List, Optional, Set, Tuple @@ -9,7 +8,13 @@ _make_msg_from_ref_key, _ReferenceKey, ) -import dbt.exceptions +from dbt.exceptions import ( + DependentLinkNotCached, + NewNameAlreadyInCache, + NoneRelationFound, + ReferencedLinkNotCached, + TruncatedModelNameCausedCollision, +) from dbt.events.functions import fire_event, fire_event_if from dbt.events.types import ( AddLink, @@ -150,11 +155,7 @@ def rename_key(self, old_key, new_key): :raises InternalError: If the new key already exists. """ if new_key in self.referenced_by: - dbt.exceptions.raise_cache_inconsistent( - 'in rename of "{}" -> "{}", new name is in the cache already'.format( - old_key, new_key - ) - ) + raise NewNameAlreadyInCache(old_key, new_key) if old_key not in self.referenced_by: return @@ -270,15 +271,11 @@ def _add_link(self, referenced_key, dependent_key): if referenced is None: return if referenced is None: - dbt.exceptions.raise_cache_inconsistent( - "in add_link, referenced link key {} not in cache!".format(referenced_key) - ) + raise ReferencedLinkNotCached(referenced_key) dependent = self.relations.get(dependent_key) if dependent is None: - dbt.exceptions.raise_cache_inconsistent( - "in add_link, dependent link key {} not in cache!".format(dependent_key) - ) + raise DependentLinkNotCached(dependent_key) assert dependent is not None # we just raised! @@ -430,24 +427,7 @@ def _check_rename_constraints(self, old_key, new_key): if new_key in self.relations: # Tell user when collision caused by model names truncated during # materialization. - match = re.search("__dbt_backup|__dbt_tmp$", new_key.identifier) - if match: - truncated_model_name_prefix = new_key.identifier[: match.start()] - message_addendum = ( - "\n\nName collisions can occur when the length of two " - "models' names approach your database's builtin limit. " - "Try restructuring your project such that no two models " - "share the prefix '{}'.".format(truncated_model_name_prefix) - + " Then, clean your warehouse of any removed models." - ) - else: - message_addendum = "" - - dbt.exceptions.raise_cache_inconsistent( - "in rename, new key {} already in cache: {}{}".format( - new_key, list(self.relations.keys()), message_addendum - ) - ) + raise TruncatedModelNameCausedCollision(new_key, self.relations) if old_key not in self.relations: fire_event(TemporaryRelation(key=_make_msg_from_ref_key(old_key))) @@ -505,9 +485,7 @@ def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[ ] if None in results: - dbt.exceptions.raise_cache_inconsistent( - "in get_relations, a None relation was found in the cache!" - ) + raise NoneRelationFound() return results def clear(self): diff --git a/core/dbt/adapters/sql/impl.py b/core/dbt/adapters/sql/impl.py index 20241d9e53d..4606b046f54 100644 --- a/core/dbt/adapters/sql/impl.py +++ b/core/dbt/adapters/sql/impl.py @@ -1,9 +1,8 @@ import agate from typing import Any, Optional, Tuple, Type, List -import dbt.clients.agate_helper from dbt.contracts.connection import Connection -import dbt.exceptions +from dbt.exceptions import RelationTypeNull from dbt.adapters.base import BaseAdapter, available from dbt.adapters.cache import _make_ref_key_msg from dbt.adapters.sql import SQLConnectionManager @@ -132,9 +131,7 @@ def alter_column_type(self, relation, column_name, new_column_type) -> None: def drop_relation(self, relation): if relation.type is None: - dbt.exceptions.raise_compiler_error( - "Tried to drop relation {}, but its type is null.".format(relation) - ) + raise RelationTypeNull(relation) self.cache_dropped(relation) self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation}) diff --git a/core/dbt/clients/_jinja_blocks.py b/core/dbt/clients/_jinja_blocks.py index c1ef31acf44..fa74a317649 100644 --- a/core/dbt/clients/_jinja_blocks.py +++ b/core/dbt/clients/_jinja_blocks.py @@ -1,7 +1,15 @@ import re from collections import namedtuple -import dbt.exceptions +from dbt.exceptions import ( + BlockDefinitionNotAtTop, + InternalException, + MissingCloseTag, + MissingControlFlowStartTag, + NestedTags, + UnexpectedControlFlowEndTag, + UnexpectedMacroEOF, +) def regex(pat): @@ -139,10 +147,7 @@ def _first_match(self, *patterns, **kwargs): def _expect_match(self, expected_name, *patterns, **kwargs): match = self._first_match(*patterns, **kwargs) if match is None: - msg = 'unexpected EOF, expected {}, got "{}"'.format( - expected_name, self.data[self.pos :] - ) - dbt.exceptions.raise_compiler_error(msg) + raise UnexpectedMacroEOF(expected_name, self.data[self.pos :]) return match def handle_expr(self, match): @@ -256,7 +261,7 @@ def find_tags(self): elif block_type_name is not None: yield self.handle_tag(match) else: - raise dbt.exceptions.InternalException( + raise InternalException( "Invalid regex match in next_block, expected block start, " "expr start, or comment start" ) @@ -265,13 +270,6 @@ def __iter__(self): return self.find_tags() -duplicate_tags = ( - "Got nested tags: {outer.block_type_name} (started at {outer.start}) did " - "not have a matching {{% end{outer.block_type_name} %}} before a " - "subsequent {inner.block_type_name} was found (started at {inner.start})" -) - - _CONTROL_FLOW_TAGS = { "if": "endif", "for": "endfor", @@ -319,33 +317,16 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): found = self.stack.pop() else: expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name] - dbt.exceptions.raise_compiler_error( - ( - "Got an unexpected control flow end tag, got {} but " - "never saw a preceeding {} (@ {})" - ).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start)) - ) + raise UnexpectedControlFlowEndTag(tag, expected, self.tag_parser) expected = _CONTROL_FLOW_TAGS[found] if expected != tag.block_type_name: - dbt.exceptions.raise_compiler_error( - ( - "Got an unexpected control flow end tag, got {} but " - "expected {} next (@ {})" - ).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start)) - ) + raise MissingControlFlowStartTag(tag, expected, self.tag_parser) if tag.block_type_name in allowed_blocks: if self.stack: - dbt.exceptions.raise_compiler_error( - ( - "Got a block definition inside control flow at {}. " - "All dbt block definitions must be at the top level" - ).format(self.tag_parser.linepos(tag.start)) - ) + raise BlockDefinitionNotAtTop(self.tag_parser, tag.start) if self.current is not None: - dbt.exceptions.raise_compiler_error( - duplicate_tags.format(outer=self.current, inner=tag) - ) + raise NestedTags(outer=self.current, inner=tag) if collect_raw_data: raw_data = self.data[self.last_position : tag.start] self.last_position = tag.start @@ -366,11 +347,7 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): if self.current: linecount = self.data[: self.current.end].count("\n") + 1 - dbt.exceptions.raise_compiler_error( - ("Reached EOF without finding a close tag for {} (searched from line {})").format( - self.current.block_type_name, linecount - ) - ) + raise MissingCloseTag(self.current.block_type_name, linecount) if collect_raw_data: raw_data = self.data[self.last_position :] diff --git a/core/dbt/clients/git.py b/core/dbt/clients/git.py index 9eaa93203e0..4ddbb1969ee 100644 --- a/core/dbt/clients/git.py +++ b/core/dbt/clients/git.py @@ -14,10 +14,10 @@ ) from dbt.exceptions import ( CommandResultError, + GitCheckoutError, + GitCloningError, + GitCloningProblem, RuntimeException, - bad_package_spec, - raise_git_cloning_error, - raise_git_cloning_problem, ) from packaging import version @@ -27,16 +27,6 @@ def _is_commit(revision: str) -> bool: return bool(re.match(r"\b[0-9a-f]{40}\b", revision)) -def _raise_git_cloning_error(repo, revision, error): - stderr = error.stderr.strip() - if "usage: git" in stderr: - stderr = stderr.split("\nusage: git")[0] - if re.match("fatal: destination path '(.+)' already exists", stderr): - raise_git_cloning_error(error) - - bad_package_spec(repo, revision, stderr) - - def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirectory=None): has_revision = revision is not None is_commit = _is_commit(revision or "") @@ -64,7 +54,7 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec try: result = run_cmd(cwd, clone_cmd, env={"LC_ALL": "C"}) except CommandResultError as exc: - _raise_git_cloning_error(repo, revision, exc) + raise GitCloningError(repo, revision, exc) if subdirectory: cwd_subdir = os.path.join(cwd, dirname or "") @@ -72,7 +62,7 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec try: run_cmd(cwd_subdir, clone_cmd_subdir) except CommandResultError as exc: - _raise_git_cloning_error(repo, revision, exc) + raise GitCloningError(repo, revision, exc) if remove_git_dir: rmdir(os.path.join(dirname, ".git")) @@ -115,8 +105,7 @@ def checkout(cwd, repo, revision=None): try: return _checkout(cwd, repo, revision) except CommandResultError as exc: - stderr = exc.stderr.strip() - bad_package_spec(repo, revision, stderr) + raise GitCheckoutError(repo=repo, revision=revision, error=exc) def get_current_sha(cwd): @@ -145,7 +134,7 @@ def clone_and_checkout( err = exc.stderr exists = re.match("fatal: destination path '(.+)' already exists", err) if not exists: - raise_git_cloning_problem(repo) + raise GitCloningProblem(repo) directory = None start_sha = None diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index ac04bb86cb4..c1b8865e33e 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -28,12 +28,16 @@ from dbt.contracts.graph.nodes import GenericTestNode from dbt.exceptions import ( - InternalException, - raise_compiler_error, + CaughtMacroException, + CaughtMacroExceptionWithNode, CompilationException, - invalid_materialization_argument, - MacroReturn, + InternalException, + InvalidMaterializationArg, JinjaRenderingException, + MacroReturn, + MaterializtionMacroNotUsed, + NoSupportedLanguagesFound, + UndefinedCompilation, UndefinedMacroException, ) from dbt import flags @@ -237,7 +241,7 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise_compiler_error(str(e)) + raise CaughtMacroException(e) def call_macro(self, *args, **kwargs): # called from __call__ methods @@ -296,7 +300,7 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise_compiler_error(str(e), self.macro) + raise CaughtMacroExceptionWithNode(exc=e, node=self.macro) except CompilationException as e: e.stack.append(self.macro) raise e @@ -376,7 +380,7 @@ def parse(self, parser): node.defaults.append(languages) else: - invalid_materialization_argument(materialization_name, target.name) + raise InvalidMaterializationArg(materialization_name, target.name) if SUPPORTED_LANG_ARG not in node.args: node.args.append(SUPPORTED_LANG_ARG) @@ -451,7 +455,7 @@ def __call__(self, *args, **kwargs): return self def __reduce__(self): - raise_compiler_error(f"{self.name} is undefined", node=node) + raise UndefinedCompilation(name=self.name, node=node) return Undefined @@ -651,13 +655,13 @@ def _convert_function(value: Any, keypath: Tuple[Union[str, int], ...]) -> Any: def get_supported_languages(node: jinja2.nodes.Macro) -> List[ModelLanguage]: if "materialization" not in node.name: - raise_compiler_error("Only materialization macros can be used with this function") + raise MaterializtionMacroNotUsed(node=node) no_kwargs = not node.defaults no_langs_found = SUPPORTED_LANG_ARG not in node.args if no_kwargs or no_langs_found: - raise_compiler_error(f"No supported_languages found in materialization macro {node.name}") + raise NoSupportedLanguagesFound(node=node) lang_idx = node.args.index(SUPPORTED_LANG_ARG) # indexing defaults from the end diff --git a/core/dbt/clients/jinja_static.py b/core/dbt/clients/jinja_static.py index 337a25eadda..d71211cea6e 100644 --- a/core/dbt/clients/jinja_static.py +++ b/core/dbt/clients/jinja_static.py @@ -1,6 +1,6 @@ import jinja2 from dbt.clients.jinja import get_environment -from dbt.exceptions import raise_compiler_error +from dbt.exceptions import MacroNamespaceNotString, MacroNameNotString def statically_extract_macro_calls(string, ctx, db_wrapper=None): @@ -117,20 +117,14 @@ def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper): func_name = kwarg.value.value possible_macro_calls.append(func_name) else: - raise_compiler_error( - f"The macro_name parameter ({kwarg.value.value}) " - "to adapter.dispatch was not a string" - ) + raise MacroNameNotString(kwarg_value=kwarg.value.value) elif kwarg.key == "macro_namespace": # This will remain to enable static resolution kwarg_type = type(kwarg.value).__name__ if kwarg_type == "Const": macro_namespace = kwarg.value.value else: - raise_compiler_error( - "The macro_namespace parameter to adapter.dispatch " - f"is a {kwarg_type}, not a string" - ) + raise MacroNamespaceNotString(kwarg_type) # positional arguments if packages_arg: diff --git a/core/dbt/clients/system.py b/core/dbt/clients/system.py index b1cd1b5a074..b776e91b1d0 100644 --- a/core/dbt/clients/system.py +++ b/core/dbt/clients/system.py @@ -144,7 +144,8 @@ def make_symlink(source: str, link_path: str) -> None: Create a symlink at `link_path` referring to `source`. """ if not supports_symlinks(): - dbt.exceptions.system_error("create a symbolic link") + # TODO: why not import these at top? + raise dbt.exceptions.SymbolicLinkError() os.symlink(source, link_path) diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index fcf98b4e914..4ae78fd3485 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -21,7 +21,7 @@ SeedNode, ) from dbt.exceptions import ( - dependency_not_found, + GraphDependencyNotFound, InternalException, RuntimeException, ) @@ -399,7 +399,7 @@ def link_node(self, linker: Linker, node: GraphMemberNode, manifest: Manifest): elif dependency in manifest.metrics: linker.dependency(node.unique_id, (manifest.metrics[dependency].unique_id)) else: - dependency_not_found(node, dependency) + raise GraphDependencyNotFound(node, dependency) def link_graph(self, linker: Linker, manifest: Manifest, add_test_edges: bool = False): for source in manifest.sources.values(): diff --git a/core/dbt/config/profile.py b/core/dbt/config/profile.py index 39679baa109..e8bf85dbd27 100644 --- a/core/dbt/config/profile.py +++ b/core/dbt/config/profile.py @@ -9,12 +9,14 @@ from dbt.clients.yaml_helper import load_yaml_text from dbt.contracts.connection import Credentials, HasCredentials from dbt.contracts.project import ProfileConfig, UserConfig -from dbt.exceptions import CompilationException -from dbt.exceptions import DbtProfileError -from dbt.exceptions import DbtProjectError -from dbt.exceptions import ValidationException -from dbt.exceptions import RuntimeException -from dbt.exceptions import validator_error_message +from dbt.exceptions import ( + CompilationException, + DbtProfileError, + DbtProjectError, + ValidationException, + RuntimeException, + ProfileConfigInvalid, +) from dbt.events.types import MissingProfileTarget from dbt.events.functions import fire_event from dbt.utils import coerce_dict_str @@ -156,7 +158,7 @@ def validate(self): dct = self.to_profile_info(serialize_credentials=True) ProfileConfig.validate(dct) except ValidationError as exc: - raise DbtProfileError(validator_error_message(exc)) from exc + raise ProfileConfigInvalid(exc) from exc @staticmethod def _credentials_from_profile( diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index 9521dd29882..69c6b79866c 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -16,19 +16,19 @@ import os from dbt import flags, deprecations -from dbt.clients.system import resolve_path_from_base -from dbt.clients.system import path_exists -from dbt.clients.system import load_file_contents +from dbt.clients.system import path_exists, resolve_path_from_base, load_file_contents from dbt.clients.yaml_helper import load_yaml_text from dbt.contracts.connection import QueryComment -from dbt.exceptions import DbtProjectError -from dbt.exceptions import SemverException -from dbt.exceptions import validator_error_message -from dbt.exceptions import RuntimeException +from dbt.exceptions import ( + DbtProjectError, + SemverException, + ProjectContractBroken, + ProjectContractInvalid, + RuntimeException, +) from dbt.graph import SelectionSpec from dbt.helper_types import NoValue -from dbt.semver import VersionSpecifier -from dbt.semver import versions_compatible +from dbt.semver import VersionSpecifier, versions_compatible from dbt.version import get_installed_version from dbt.utils import MultiDict from dbt.node_types import NodeType @@ -325,7 +325,7 @@ def create_project(self, rendered: RenderComponents) -> "Project": ProjectContract.validate(rendered.project_dict) cfg = ProjectContract.from_dict(rendered.project_dict) except ValidationError as e: - raise DbtProjectError(validator_error_message(e)) from e + raise ProjectContractInvalid(e) from e # name/version are required in the Project definition, so we can assume # they are present name = cfg.name @@ -642,7 +642,7 @@ def validate(self): try: ProjectContract.validate(self.to_project_config()) except ValidationError as e: - raise DbtProjectError(validator_error_message(e)) from e + raise ProjectContractBroken(e) from e @classmethod def partial_load(cls, project_root: str, *, verify_version: bool = False) -> PartialProject: diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index 236baf497a6..8b1b30f383b 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -25,10 +25,11 @@ from dbt.contracts.relation import ComponentName from dbt.dataclass_schema import ValidationError from dbt.exceptions import ( + ConfigContractBroken, DbtProjectError, + NonUniquePackageName, RuntimeException, - raise_compiler_error, - validator_error_message, + UninstalledPackagesFound, ) from dbt.events.functions import warn_or_error from dbt.events.types import UnusedResourceConfigPath @@ -186,7 +187,7 @@ def validate(self): try: Configuration.validate(self.serialize()) except ValidationError as e: - raise DbtProjectError(validator_error_message(e)) from e + raise ConfigContractBroken(e) from e @classmethod def _get_rendered_profile( @@ -352,22 +353,15 @@ def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: count_packages_specified = len(self.packages.packages) # type: ignore count_packages_installed = len(tuple(self._get_project_directories())) if count_packages_specified > count_packages_installed: - raise_compiler_error( - f"dbt found {count_packages_specified} package(s) " - f"specified in packages.yml, but only " - f"{count_packages_installed} package(s) installed " - f'in {self.packages_install_path}. Run "dbt deps" to ' - f"install package dependencies." + raise UninstalledPackagesFound( + count_packages_specified, + count_packages_installed, + self.packages_install_path, ) project_paths = itertools.chain(internal_packages, self._get_project_directories()) for project_name, project in self.load_projects(project_paths): if project_name in all_projects: - raise_compiler_error( - f"dbt found more than one package with the name " - f'"{project_name}" included in this project. Package ' - f"names must be unique in a project. Please rename " - f"one of these packages." - ) + raise NonUniquePackageName(project_name) all_projects[project_name] = project self.dependencies = all_projects return self.dependencies diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py index 728e558ebbd..921626ba088 100644 --- a/core/dbt/config/utils.py +++ b/core/dbt/config/utils.py @@ -9,7 +9,7 @@ from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt.events.functions import fire_event from dbt.events.types import InvalidVarsYAML -from dbt.exceptions import ValidationException, raise_compiler_error +from dbt.exceptions import ValidationException, VarsArgNotYamlDict def parse_cli_vars(var_string: str) -> Dict[str, Any]: @@ -19,11 +19,7 @@ def parse_cli_vars(var_string: str) -> Dict[str, Any]: if var_type is dict: return cli_vars else: - type_name = var_type.__name__ - raise_compiler_error( - "The --vars argument must be a YAML dictionary, but was " - "of type '{}'".format(type_name) - ) + raise VarsArgNotYamlDict(var_type) except ValidationException: fire_event(InvalidVarsYAML()) raise diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index e57c3edac56..59984cb96ab 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -10,11 +10,12 @@ from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER from dbt.contracts.graph.nodes import Resource from dbt.exceptions import ( - CompilationException, + DisallowSecretEnvVar, + EnvVarMissing, MacroReturn, - raise_compiler_error, - raise_parsing_error, - disallow_secret_env_var, + RequiredVarNotFound, + SetStrictWrongType, + ZipStrictWrongType, ) from dbt.events.functions import fire_event, get_invocation_id from dbt.events.types import JinjaLogInfo, JinjaLogDebug @@ -128,7 +129,6 @@ def __new__(mcls, name, bases, dct): class Var: - UndefinedVarError = "Required var '{}' not found in config:\nVars supplied to {} = {}" _VAR_NOTSET = object() def __init__( @@ -153,10 +153,7 @@ def node_name(self): return "" def get_missing_var(self, var_name): - dct = {k: self._merged[k] for k in self._merged} - pretty_vars = json.dumps(dct, sort_keys=True, indent=4) - msg = self.UndefinedVarError.format(var_name, self.node_name, pretty_vars) - raise_compiler_error(msg, self._node) + raise RequiredVarNotFound(var_name, self._merged, self._node) def has_var(self, var_name: str): return var_name in self._merged @@ -300,7 +297,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -315,8 +312,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) if os.environ.get("DBT_MACRO_DEBUGGING"): @@ -497,7 +493,7 @@ def set_strict(value: Iterable[Any]) -> Set[Any]: try: return set(value) except TypeError as e: - raise CompilationException(e) + raise SetStrictWrongType(e) @contextmember("zip") @staticmethod @@ -541,7 +537,7 @@ def zip_strict(*args: Iterable[Any]) -> Iterable[Any]: try: return zip(*args) except TypeError as e: - raise CompilationException(e) + raise ZipStrictWrongType(e) @contextmember @staticmethod diff --git a/core/dbt/context/configured.py b/core/dbt/context/configured.py index ae2ee10baec..ca1de35423b 100644 --- a/core/dbt/context/configured.py +++ b/core/dbt/context/configured.py @@ -8,7 +8,7 @@ from dbt.context.base import contextproperty, contextmember, Var from dbt.context.target import TargetContext -from dbt.exceptions import raise_parsing_error, disallow_secret_env_var +from dbt.exceptions import EnvVarMissing, DisallowSecretEnvVar class ConfiguredContext(TargetContext): @@ -86,7 +86,7 @@ def var(self) -> ConfiguredVar: def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -104,8 +104,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) class MacroResolvingContext(ConfiguredContext): diff --git a/core/dbt/context/docs.py b/core/dbt/context/docs.py index 4908829d414..89a652736dd 100644 --- a/core/dbt/context/docs.py +++ b/core/dbt/context/docs.py @@ -1,8 +1,8 @@ from typing import Any, Dict, Union from dbt.exceptions import ( - doc_invalid_args, - doc_target_not_found, + DocTargetNotFound, + InvalidDocArgs, ) from dbt.config.runtime import RuntimeConfig from dbt.contracts.graph.manifest import Manifest @@ -52,7 +52,7 @@ def doc(self, *args: str) -> str: elif len(args) == 2: doc_package_name, doc_name = args else: - doc_invalid_args(self.node, args) + raise InvalidDocArgs(self.node, args) # Documentation target_doc = self.manifest.resolve_doc( @@ -68,7 +68,9 @@ def doc(self, *args: str) -> str: # TODO CT-211 source_file.add_node(self.node.unique_id) # type: ignore[union-attr] else: - doc_target_not_found(self.node, doc_name, doc_package_name) + raise DocTargetNotFound( + node=self.node, target_doc_name=doc_name, target_doc_package=doc_package_name + ) return target_doc.block_contents diff --git a/core/dbt/context/exceptions_jinja.py b/core/dbt/context/exceptions_jinja.py new file mode 100644 index 00000000000..5663b4701e0 --- /dev/null +++ b/core/dbt/context/exceptions_jinja.py @@ -0,0 +1,142 @@ +import functools +from typing import NoReturn + +from dbt.events.functions import warn_or_error +from dbt.events.helpers import env_secrets, scrub_secrets +from dbt.events.types import JinjaLogWarning + +from dbt.exceptions import ( + RuntimeException, + MissingConfig, + MissingMaterialization, + MissingRelation, + AmbiguousAlias, + AmbiguousCatalogMatch, + CacheInconsistency, + DataclassNotDict, + CompilationException, + DatabaseException, + DependencyNotFound, + DependencyException, + DuplicatePatchPath, + DuplicateResourceName, + InvalidPropertyYML, + NotImplementedException, + RelationWrongType, +) + + +def warn(msg, node=None): + warn_or_error(JinjaLogWarning(msg=msg), node=node) + return "" + + +def missing_config(model, name) -> NoReturn: + raise MissingConfig(unique_id=model.unique_id, name=name) + + +def missing_materialization(model, adapter_type) -> NoReturn: + raise MissingMaterialization(model=model, adapter_type=adapter_type) + + +def missing_relation(relation, model=None) -> NoReturn: + raise MissingRelation(relation, model) + + +def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: + raise AmbiguousAlias(node_1, node_2, duped_name) + + +def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: + raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + + +def raise_cache_inconsistent(message) -> NoReturn: + raise CacheInconsistency(message) + + +def raise_dataclass_not_dict(obj) -> NoReturn: + raise DataclassNotDict(obj) + + +def raise_compiler_error(msg, node=None) -> NoReturn: + raise CompilationException(msg, node) + + +def raise_database_error(msg, node=None) -> NoReturn: + raise DatabaseException(msg, node) + + +def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: + raise DependencyNotFound(node, node_description, required_pkg) + + +def raise_dependency_error(msg) -> NoReturn: + raise DependencyException(scrub_secrets(msg, env_secrets())) + + +def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: + raise DuplicatePatchPath(patch_1, existing_patch_path) + + +def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: + raise DuplicateResourceName(node_1, node_2) + + +def raise_invalid_property_yml_version(path, issue) -> NoReturn: + raise InvalidPropertyYML(path, issue) + + +def raise_not_implemented(msg) -> NoReturn: + raise NotImplementedException(msg) + + +def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: + raise RelationWrongType(relation, expected_type, model) + + +# Update this when a new function should be added to the +# dbt context's `exceptions` key! +CONTEXT_EXPORTS = { + fn.__name__: fn + for fn in [ + warn, + missing_config, + missing_materialization, + missing_relation, + raise_ambiguous_alias, + raise_ambiguous_catalog_match, + raise_cache_inconsistent, + raise_dataclass_not_dict, + raise_compiler_error, + raise_database_error, + raise_dep_not_found, + raise_dependency_error, + raise_duplicate_patch_name, + raise_duplicate_resource_name, + raise_invalid_property_yml_version, + raise_not_implemented, + relation_wrong_type, + ] +} + + +# wraps context based exceptions in node info +def wrapper(model): + def wrap(func): + @functools.wraps(func) + def inner(*args, **kwargs): + try: + return func(*args, **kwargs) + except RuntimeException as exc: + exc.add_node(model) + raise exc + + return inner + + return wrap + + +def wrapped_exports(model): + wrap = wrapper(model) + return {name: wrap(export) for name, export in CONTEXT_EXPORTS.items()} diff --git a/core/dbt/context/macro_resolver.py b/core/dbt/context/macro_resolver.py index a108a1889b9..6e70bafd05e 100644 --- a/core/dbt/context/macro_resolver.py +++ b/core/dbt/context/macro_resolver.py @@ -1,6 +1,6 @@ from typing import Dict, MutableMapping, Optional from dbt.contracts.graph.nodes import Macro -from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error +from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME from dbt.clients.jinja import MacroGenerator @@ -86,7 +86,7 @@ def _add_macro_to( package_namespaces[macro.package_name] = namespace if macro.name in namespace: - raise_duplicate_macro_name(macro, macro, macro.package_name) + raise DuplicateMacroName(macro, macro, macro.package_name) package_namespaces[macro.package_name][macro.name] = macro def add_macro(self, macro: Macro): @@ -187,7 +187,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.macro_resolver.packages: macro = self.macro_resolver.packages[package_name].get(name) else: - raise_compiler_error(f"Could not find package '{package_name}'") + raise PackageNotFoundForMacro(package_name) if not macro: return None macro_func = MacroGenerator(macro, self.ctx, self.node, self.thread_ctx) diff --git a/core/dbt/context/macros.py b/core/dbt/context/macros.py index 700109b8081..921480ec05a 100644 --- a/core/dbt/context/macros.py +++ b/core/dbt/context/macros.py @@ -3,7 +3,7 @@ from dbt.clients.jinja import MacroGenerator, MacroStack from dbt.contracts.graph.nodes import Macro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME -from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error +from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro FlatNamespace = Dict[str, MacroGenerator] @@ -75,7 +75,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.packages: return self.packages[package_name].get(name) else: - raise_compiler_error(f"Could not find package '{package_name}'") + raise PackageNotFoundForMacro(package_name) # This class builds the MacroNamespace by adding macros to @@ -122,7 +122,7 @@ def _add_macro_to( hierarchy[macro.package_name] = namespace if macro.name in namespace: - raise_duplicate_macro_name(macro_func.macro, macro, macro.package_name) + raise DuplicateMacroName(macro_func.macro, macro, macro.package_name) hierarchy[macro.package_name][macro.name] = macro_func def add_macro(self, macro: Macro, ctx: Dict[str, Any]): diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 06642810730..2e7af0a79f2 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -19,13 +19,14 @@ from dbt.clients import agate_helper from dbt.clients.jinja import get_rendered, MacroGenerator, MacroStack from dbt.config import RuntimeConfig, Project -from .base import contextmember, contextproperty, Var -from .configured import FQNLookup -from .context_config import ContextConfig from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER +from dbt.context.base import contextmember, contextproperty, Var +from dbt.context.configured import FQNLookup +from dbt.context.context_config import ContextConfig +from dbt.context.exceptions_jinja import wrapped_exports from dbt.context.macro_resolver import MacroResolver, TestMacroNamespace -from .macros import MacroNamespaceBuilder, MacroNamespace -from .manifest import ManifestContext +from dbt.context.macros import MacroNamespaceBuilder, MacroNamespace +from dbt.context.manifest import ManifestContext from dbt.contracts.connection import AdapterResponse from dbt.contracts.graph.manifest import Manifest, Disabled from dbt.contracts.graph.nodes import ( @@ -41,20 +42,27 @@ from dbt.events.functions import get_metadata_vars from dbt.exceptions import ( CompilationException, - ParsingException, + ConflictingConfigKeys, + DisallowSecretEnvVar, + EnvVarMissing, InternalException, - ValidationException, + InvalidInlineModelConfig, + InvalidNumberSourceArgs, + InvalidPersistDocsValueType, + LoadAgateTableNotSeed, + LoadAgateTableValueError, + MacroInvalidDispatchArg, + MacrosSourcesUnWriteable, + MetricInvalidArgs, + MissingConfig, + OperationsCannotRefEphemeralNodes, + PackageNotInDeps, + ParsingException, + RefBadContext, + RefInvalidArgs, RuntimeException, - macro_invalid_dispatch_arg, - missing_config, - raise_compiler_error, - ref_invalid_args, - metric_invalid_args, - target_not_found, - ref_bad_context, - wrapped_exports, - raise_parsing_error, - disallow_secret_env_var, + TargetNotFound, + ValidationException, ) from dbt.config import IsFQNResource from dbt.node_types import NodeType, ModelLanguage @@ -139,7 +147,7 @@ def dispatch( raise CompilationException(msg) if packages is not None: - raise macro_invalid_dispatch_arg(macro_name) + raise MacroInvalidDispatchArg(macro_name) namespace = macro_namespace @@ -233,7 +241,7 @@ def __call__(self, *args: str) -> RelationProxy: elif len(args) == 2: package, name = args else: - ref_invalid_args(self.model, args) + raise RefInvalidArgs(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -257,9 +265,7 @@ def validate_args(self, source_name: str, table_name: str): def __call__(self, *args: str) -> RelationProxy: if len(args) != 2: - raise_compiler_error( - f"source() takes exactly two arguments ({len(args)} given)", self.model - ) + raise InvalidNumberSourceArgs(args, node=self.model) self.validate_args(args[0], args[1]) return self.resolve(args[0], args[1]) @@ -294,7 +300,7 @@ def __call__(self, *args: str) -> MetricReference: elif len(args) == 2: package, name = args else: - metric_invalid_args(self.model, args) + raise MetricInvalidArgs(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -315,12 +321,7 @@ def _transform_config(self, config): if oldkey in config: newkey = oldkey.replace("_", "-") if newkey in config: - raise_compiler_error( - 'Invalid config, has conflicting keys "{}" and "{}"'.format( - oldkey, newkey - ), - self.model, - ) + raise ConflictingConfigKeys(oldkey, newkey, node=self.model) config[newkey] = config.pop(oldkey) return config @@ -330,7 +331,7 @@ def __call__(self, *args, **kwargs): elif len(args) == 0 and len(kwargs) > 0: opts = kwargs else: - raise_compiler_error("Invalid inline model config", self.model) + raise InvalidInlineModelConfig(node=self.model) opts = self._transform_config(opts) @@ -378,7 +379,7 @@ def _lookup(self, name, default=_MISSING): else: result = self.model.config.get(name, default) if result is _MISSING: - missing_config(self.model, name) + raise MissingConfig(unique_id=self.model.unique_id, name=name) return result def require(self, name, validator=None): @@ -400,20 +401,14 @@ def get(self, name, default=None, validator=None): def persist_relation_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise_compiler_error( - f"Invalid value provided for 'persist_docs'. Expected dict " - f"but received {type(persist_docs)}" - ) + raise InvalidPersistDocsValueType(persist_docs) return persist_docs.get("relation", False) def persist_column_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise_compiler_error( - f"Invalid value provided for 'persist_docs'. Expected dict " - f"but received {type(persist_docs)}" - ) + raise InvalidPersistDocsValueType(persist_docs) return persist_docs.get("columns", False) @@ -472,7 +467,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Rel ) if target_model is None or isinstance(target_model, Disabled): - target_not_found( + raise TargetNotFound( node=self.model, target_name=target_name, target_kind="node", @@ -494,7 +489,7 @@ def validate( ) -> None: if resolved.unique_id not in self.model.depends_on.nodes: args = self._repack_args(target_name, target_package) - ref_bad_context(self.model, args) + raise RefBadContext(node=self.model, args=args) class OperationRefResolver(RuntimeRefResolver): @@ -510,12 +505,7 @@ def create_relation(self, target_model: ManifestNode, name: str) -> RelationProx if target_model.is_ephemeral_model: # In operations, we can't ref() ephemeral nodes, because # Macros do not support set_cte - raise_compiler_error( - "Operations can not ref() ephemeral nodes, but {} is ephemeral".format( - target_model.name - ), - self.model, - ) + raise OperationsCannotRefEphemeralNodes(target_model.name, node=self.model) else: return super().create_relation(target_model, name) @@ -538,7 +528,7 @@ def resolve(self, source_name: str, table_name: str): ) if target_source is None or isinstance(target_source, Disabled): - target_not_found( + raise TargetNotFound( node=self.model, target_name=f"{source_name}.{table_name}", target_kind="source", @@ -565,7 +555,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Met ) if target_metric is None or isinstance(target_metric, Disabled): - target_not_found( + raise TargetNotFound( node=self.model, target_name=target_name, target_kind="metric", @@ -594,7 +584,7 @@ def packages_for_node(self) -> Iterable[Project]: if package_name != self._config.project_name: if package_name not in dependencies: # I don't think this is actually reachable - raise_compiler_error(f"Node package named {package_name} not found!", self._node) + raise PackageNotInDeps(package_name, node=self._node) yield dependencies[package_name] yield self._config @@ -777,7 +767,7 @@ def inner(value: T) -> None: def write(self, payload: str) -> str: # macros/source defs aren't 'writeable'. if isinstance(self.model, (Macro, SourceDefinition)): - raise_compiler_error('cannot "write" macros or sources') + raise MacrosSourcesUnWriteable(node=self.model) self.model.build_path = self.model.write_node(self.config.target_path, "run", payload) return "" @@ -792,21 +782,19 @@ def try_or_compiler_error( try: return func(*args, **kwargs) except Exception: - raise_compiler_error(message_if_exception, self.model) + raise CompilationException(message_if_exception, self.model) @contextmember def load_agate_table(self) -> agate.Table: if not isinstance(self.model, SeedNode): - raise_compiler_error( - "can only load_agate_table for seeds (got a {})".format(self.model.resource_type) - ) + raise LoadAgateTableNotSeed(self.model.resource_type, node=self.model) assert self.model.root_path path = os.path.join(self.model.root_path, self.model.original_file_path) column_types = self.model.config.column_types try: table = agate_helper.from_csv(path, text_columns=column_types) except ValueError as e: - raise_compiler_error(str(e)) + raise LoadAgateTableValueError(e, node=self.model) table.original_abspath = os.path.abspath(path) return table @@ -1208,7 +1196,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1241,8 +1229,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.env_vars.append(var) # type: ignore[union-attr] return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) @contextproperty def selected_resources(self) -> List[str]: @@ -1423,7 +1410,7 @@ def generate_runtime_macro_context( class ExposureRefResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) not in (1, 2): - ref_invalid_args(self.model, args) + raise RefInvalidArgs(node=self.model, args=args) self.model.refs.append(list(args)) return "" @@ -1431,9 +1418,7 @@ def __call__(self, *args) -> str: class ExposureSourceResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) != 2: - raise_compiler_error( - f"source() takes exactly two arguments ({len(args)} given)", self.model - ) + raise InvalidNumberSourceArgs(args, node=self.model) self.model.sources.append(list(args)) return "" @@ -1441,7 +1426,7 @@ def __call__(self, *args) -> str: class ExposureMetricResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) not in (1, 2): - metric_invalid_args(self.model, args) + raise MetricInvalidArgs(node=self.model, args=args) self.model.metrics.append(list(args)) return "" @@ -1483,7 +1468,7 @@ def __call__(self, *args) -> str: elif len(args) == 2: package, name = args else: - ref_invalid_args(self.model, args) + raise RefInvalidArgs(node=self.model, args=args) self.validate_args(name, package) self.model.refs.append(list(args)) return "" @@ -1573,7 +1558,7 @@ def _build_test_namespace(self): def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - disallow_secret_env_var(var) + raise DisallowSecretEnvVar(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1599,8 +1584,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.add_env_var(var, yaml_key, name) # type: ignore[union-attr] return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) def generate_test_context( diff --git a/core/dbt/context/secret.py b/core/dbt/context/secret.py index 11a6dc54f07..da13509ef50 100644 --- a/core/dbt/context/secret.py +++ b/core/dbt/context/secret.py @@ -4,7 +4,7 @@ from .base import BaseContext, contextmember from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER -from dbt.exceptions import raise_parsing_error +from dbt.exceptions import EnvVarMissing SECRET_PLACEHOLDER = "$$$DBT_SECRET_START$$${}$$$DBT_SECRET_END$$$" @@ -50,8 +50,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: self.env_vars[var] = return_value if var in os.environ else DEFAULT_ENV_PLACEHOLDER return return_value else: - msg = f"Env var required but not provided: '{var}'" - raise_parsing_error(msg) + raise EnvVarMissing(var) def generate_secret_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]: diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index cd1eb561fcc..c43012ec521 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -41,14 +41,14 @@ from dbt.dataclass_schema import dbtClassMixin from dbt.exceptions import ( CompilationException, - raise_duplicate_resource_name, - raise_compiler_error, + DuplicateResourceName, + DuplicateMacroInPackage, + DuplicateMaterializationName, ) from dbt.helper_types import PathSet from dbt.events.functions import fire_event from dbt.events.types import MergedFromState from dbt.node_types import NodeType -from dbt.ui import line_wrap_message from dbt import flags from dbt import tracking import dbt.utils @@ -398,12 +398,7 @@ def __eq__(self, other: object) -> bool: return NotImplemented equal = self.specificity == other.specificity and self.locality == other.locality if equal: - raise_compiler_error( - "Found two materializations with the name {} (packages {} and " - "{}). dbt cannot resolve this ambiguity".format( - self.macro.name, self.macro.package_name, other.macro.package_name - ) - ) + raise DuplicateMaterializationName(self.macro, other) return equal @@ -1040,26 +1035,7 @@ def merge_from_artifact( def add_macro(self, source_file: SourceFile, macro: Macro): if macro.unique_id in self.macros: # detect that the macro exists and emit an error - other_path = self.macros[macro.unique_id].original_file_path - # subtract 2 for the "Compilation Error" indent - # note that the line wrap eats newlines, so if you want newlines, - # this is the result :( - msg = line_wrap_message( - f"""\ - dbt found two macros named "{macro.name}" in the project - "{macro.package_name}". - - - To fix this error, rename or remove one of the following - macros: - - - {macro.original_file_path} - - - {other_path} - """, - subtract=2, - ) - raise_compiler_error(msg) + raise DuplicateMacroInPackage(macro=macro, macro_mapping=self.macros) self.macros[macro.unique_id] = macro source_file.macros.append(macro.unique_id) @@ -1237,7 +1213,7 @@ def __post_serialize__(self, dct): def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]): if value.unique_id in src: - raise_duplicate_resource_name(value, src[value.unique_id]) + raise DuplicateResourceName(value, src[value.unique_id]) K_T = TypeVar("K_T") diff --git a/core/dbt/contracts/relation.py b/core/dbt/contracts/relation.py index fbe18146bb4..e8cba2ad155 100644 --- a/core/dbt/contracts/relation.py +++ b/core/dbt/contracts/relation.py @@ -9,7 +9,7 @@ from dbt.dataclass_schema import dbtClassMixin, StrEnum from dbt.contracts.util import Replaceable -from dbt.exceptions import raise_dataclass_not_dict, CompilationException +from dbt.exceptions import CompilationException, DataclassNotDict from dbt.utils import deep_merge @@ -43,10 +43,10 @@ def __getitem__(self, key): raise KeyError(key) from None def __iter__(self): - raise_dataclass_not_dict(self) + raise DataclassNotDict(self) def __len__(self): - raise_dataclass_not_dict(self) + raise DataclassNotDict(self) def incorporate(self, **kwargs): value = self.to_dict(omit_none=True) diff --git a/core/dbt/deps/git.py b/core/dbt/deps/git.py index e6dcc479a80..5d7a1331c58 100644 --- a/core/dbt/deps/git.py +++ b/core/dbt/deps/git.py @@ -9,7 +9,7 @@ GitPackage, ) from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path -from dbt.exceptions import ExecutableError, raise_dependency_error +from dbt.exceptions import ExecutableError, MultipleVersionGitDeps from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import EnsureGitInstalled, DepsUnpinned @@ -143,10 +143,7 @@ def resolved(self) -> GitPinnedPackage: if len(requested) == 0: requested = {"HEAD"} elif len(requested) > 1: - raise_dependency_error( - "git dependencies should contain exactly one version. " - "{} contains: {}".format(self.git, requested) - ) + raise MultipleVersionGitDeps(self.git, requested) return GitPinnedPackage( git=self.git, diff --git a/core/dbt/deps/registry.py b/core/dbt/deps/registry.py index 9f163d89758..f3398f4b16f 100644 --- a/core/dbt/deps/registry.py +++ b/core/dbt/deps/registry.py @@ -10,10 +10,10 @@ ) from dbt.deps.base import PinnedPackage, UnpinnedPackage from dbt.exceptions import ( - package_version_not_found, - VersionsNotCompatibleException, DependencyException, - package_not_found, + PackageNotFound, + PackageVersionNotFound, + VersionsNotCompatibleException, ) @@ -71,7 +71,7 @@ def __init__( def _check_in_index(self): index = registry.index_cached() if self.package not in index: - package_not_found(self.package) + raise PackageNotFound(self.package) @classmethod def from_contract(cls, contract: RegistryPackage) -> "RegistryUnpinnedPackage": @@ -118,7 +118,7 @@ def resolved(self) -> RegistryPinnedPackage: target = None if not target: # raise an exception if no installable target version is found - package_version_not_found(self.package, range_, installable, should_version_check) + raise PackageVersionNotFound(self.package, range_, installable, should_version_check) latest_compatible = installable[-1] return RegistryPinnedPackage( package=self.package, version=target, version_latest=latest_compatible diff --git a/core/dbt/deps/resolver.py b/core/dbt/deps/resolver.py index e4c1992894c..323e2f562c1 100644 --- a/core/dbt/deps/resolver.py +++ b/core/dbt/deps/resolver.py @@ -1,7 +1,12 @@ from dataclasses import dataclass, field from typing import Dict, List, NoReturn, Union, Type, Iterator, Set -from dbt.exceptions import raise_dependency_error, InternalException +from dbt.exceptions import ( + DuplicateDependencyToRoot, + DuplicateProjectDependency, + MismatchedDependencyTypes, + InternalException, +) from dbt.config import Project, RuntimeConfig from dbt.config.renderer import DbtProjectYamlRenderer @@ -51,10 +56,7 @@ def __setitem__(self, key: BasePackage, value): self.packages[key_str] = value def _mismatched_types(self, old: UnpinnedPackage, new: UnpinnedPackage) -> NoReturn: - raise_dependency_error( - f"Cannot incorporate {new} ({new.__class__.__name__}) in {old} " - f"({old.__class__.__name__}): mismatched types" - ) + raise MismatchedDependencyTypes(new, old) def incorporate(self, package: UnpinnedPackage): key: str = self._pick_key(package) @@ -105,17 +107,9 @@ def _check_for_duplicate_project_names( for package in final_deps: project_name = package.get_project_name(config, renderer) if project_name in seen: - raise_dependency_error( - f'Found duplicate project "{project_name}". This occurs when ' - "a dependency has the same project name as some other " - "dependency." - ) + raise DuplicateProjectDependency(project_name) elif project_name == config.project_name: - raise_dependency_error( - "Found a dependency with the same name as the root project " - f'"{project_name}". Package names must be unique in a project.' - " Please rename one of these packages." - ) + raise DuplicateDependencyToRoot(project_name) seen.add(project_name) diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index 36dd2e9ba79..f061606632e 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -159,9 +159,10 @@ def event_to_dict(event: BaseEvent) -> dict: def warn_or_error(event, node=None): if flags.WARN_ERROR: - from dbt.exceptions import raise_compiler_error + # TODO: resolve this circular import when at top + from dbt.exceptions import EventCompilationException - raise_compiler_error(scrub_secrets(event.info.msg, env_secrets()), node) + raise EventCompilationException(event.info.msg, node) else: fire_event(event) diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index 32aa8b477a9..2db130bb44e 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -1,23 +1,29 @@ import builtins -import functools -from typing import NoReturn, Optional, Mapping, Any +import json +import re +from typing import Any, Dict, List, Mapping, NoReturn, Optional, Union +# from dbt.contracts.graph import ManifestNode # or ParsedNode? +from dbt.dataclass_schema import ValidationError +from dbt.events.functions import warn_or_error from dbt.events.helpers import env_secrets, scrub_secrets from dbt.events.types import JinjaLogWarning from dbt.events.contextvars import get_node_info from dbt.node_types import NodeType +from dbt.ui import line_wrap_message import dbt.dataclass_schema -def validator_error_message(exc): - """Given a dbt.dataclass_schema.ValidationError (which is basically a - jsonschema.ValidationError), return the relevant parts as a string +class MacroReturn(builtins.BaseException): + """ + Hack of all hacks + This is not actually an exception. + It's how we return a value from a macro. """ - if not isinstance(exc, dbt.dataclass_schema.ValidationError): - return str(exc) - path = "[%s]" % "][".join(map(repr, exc.relative_path)) - return "at path {}: {}".format(path, exc.message) + + def __init__(self, value): + self.value = value class Exception(builtins.Exception): @@ -32,25 +38,53 @@ def data(self): } -class MacroReturn(builtins.BaseException): - """ - Hack of all hacks - """ +class InternalException(Exception): + def __init__(self, msg: str): + self.stack: List = [] + self.msg = scrub_secrets(msg, env_secrets()) - def __init__(self, value): - self.value = value + @property + def type(self): + return "Internal" + def process_stack(self): + lines = [] + stack = self.stack + first = True -class InternalException(Exception): - pass + if len(stack) > 1: + lines.append("") + + for item in stack: + msg = "called by" + + if first: + msg = "in" + first = False + + lines.append(f"> {msg}") + + return lines + + def __str__(self): + if hasattr(self.msg, "split"): + split_msg = self.msg.split("\n") + else: + split_msg = str(self.msg).split("\n") + + lines = ["{}".format(self.type + " Error")] + split_msg + + lines += self.process_stack() + + return lines[0] + "\n" + "\n".join([" " + line for line in lines[1:]]) class RuntimeException(RuntimeError, Exception): CODE = 10001 MESSAGE = "Runtime error" - def __init__(self, msg, node=None): - self.stack = [] + def __init__(self, msg: str, node=None): + self.stack: List = [] self.node = node self.msg = scrub_secrets(msg, env_secrets()) @@ -69,14 +103,14 @@ def node_to_string(self, node): return "" if not hasattr(node, "name"): # we probably failed to parse a block, so we can't know the name - return "{} ({})".format(node.resource_type, node.original_file_path) + return f"{node.resource_type} ({node.original_file_path})" if hasattr(node, "contents"): # handle FileBlocks. They aren't really nodes but we want to render # out the path we know at least. This indicates an error during # block parsing. - return "{}".format(node.path.original_file_path) - return "{} {} ({})".format(node.resource_type, node.name, node.original_file_path) + return f"{node.path.original_file_path}" + return f"{node.resource_type} {node.name} ({node.original_file_path})" def process_stack(self): lines = [] @@ -93,15 +127,24 @@ def process_stack(self): msg = "in" first = False - lines.append("> {} {}".format(msg, self.node_to_string(item))) + lines.append(f"> {msg} {self.node_to_string(item)}") return lines - def __str__(self, prefix="! "): + def validator_error_message(self, exc: builtins.Exception): + """Given a dbt.dataclass_schema.ValidationError (which is basically a + jsonschema.ValidationError), return the relevant parts as a string + """ + if not isinstance(exc, dbt.dataclass_schema.ValidationError): + return str(exc) + path = "[%s]" % "][".join(map(repr, exc.relative_path)) + return f"at path {path}: {exc.message}" + + def __str__(self, prefix: str = "! "): node_string = "" if self.node is not None: - node_string = " in {}".format(self.node_to_string(self.node)) + node_string = f" in {self.node_to_string(self.node)}" if hasattr(self.msg, "split"): split_msg = self.msg.split("\n") @@ -138,7 +181,7 @@ class RPCTimeoutException(RuntimeException): CODE = 10008 MESSAGE = "RPC timeout error" - def __init__(self, timeout): + def __init__(self, timeout: Optional[float]): super().__init__(self.MESSAGE) self.timeout = timeout @@ -147,7 +190,7 @@ def data(self): result.update( { "timeout": self.timeout, - "message": "RPC timed out after {}s".format(self.timeout), + "message": f"RPC timed out after {self.timeout}s", } ) return result @@ -157,15 +200,15 @@ class RPCKilledException(RuntimeException): CODE = 10009 MESSAGE = "RPC process killed" - def __init__(self, signum): + def __init__(self, signum: int): self.signum = signum - self.message = "RPC process killed by signal {}".format(self.signum) - super().__init__(self.message) + self.msg = f"RPC process killed by signal {self.signum}" + super().__init__(self.msg) def data(self): return { "signum": self.signum, - "message": self.message, + "message": self.msg, } @@ -173,7 +216,7 @@ class RPCCompiling(RuntimeException): CODE = 10010 MESSAGE = 'RPC server is compiling the project, call the "status" method for' " compile status" - def __init__(self, msg=None, node=None): + def __init__(self, msg: str = None, node=None): if msg is None: msg = "compile in progress" super().__init__(msg, node) @@ -185,13 +228,13 @@ class RPCLoadException(RuntimeException): 'RPC server failed to compile project, call the "status" method for' " compile status" ) - def __init__(self, cause): + def __init__(self, cause: Dict[str, Any]): self.cause = cause - self.message = "{}: {}".format(self.MESSAGE, self.cause["message"]) - super().__init__(self.message) + self.msg = f'{self.MESSAGE}: {self.cause["message"]}' + super().__init__(self.msg) def data(self): - return {"cause": self.cause, "message": self.message} + return {"cause": self.cause, "message": self.msg} class DatabaseException(RuntimeException): @@ -202,7 +245,7 @@ def process_stack(self): lines = [] if hasattr(self.node, "build_path") and self.node.build_path: - lines.append("compiled Code at {}".format(self.node.build_path)) + lines.append(f"compiled Code at {self.node.build_path}") return lines + RuntimeException.process_stack(self) @@ -219,6 +262,17 @@ class CompilationException(RuntimeException): def type(self): return "Compilation" + def _fix_dupe_msg(self, path_1: str, path_2: str, name: str, type_name: str) -> str: + if path_1 == path_2: + return ( + f"remove one of the {type_name} entries for {name} in this file:\n - {path_1!s}\n" + ) + else: + return ( + f"remove the {type_name} entry for {name} in one of these files:\n" + f" - {path_1!s}\n{path_2!s}" + ) + class RecursionException(RuntimeException): pass @@ -238,14 +292,13 @@ def type(self): return "Parsing" +# TODO: this isn't raised in the core codebase. Is it raised elsewhere? class JSONValidationException(ValidationException): def __init__(self, typename, errors): self.typename = typename self.errors = errors self.errors_message = ", ".join(errors) - msg = 'Invalid arguments passed to "{}" instance: {}'.format( - self.typename, self.errors_message - ) + msg = f'Invalid arguments passed to "{self.typename}" instance: {self.errors_message}' super().__init__(msg) def __reduce__(self): @@ -259,7 +312,7 @@ def __init__(self, expected: str, found: Optional[str]): self.found = found self.filename = "input file" - super().__init__(self.get_message()) + super().__init__(msg=self.get_message()) def add_filename(self, filename: str): self.filename = filename @@ -286,7 +339,7 @@ class JinjaRenderingException(CompilationException): class UndefinedMacroException(CompilationException): - def __str__(self, prefix="! ") -> str: + def __str__(self, prefix: str = "! ") -> str: msg = super().__str__(prefix) return ( f"{msg}. This can happen when calling a macro that does " @@ -303,7 +356,7 @@ def __init__(self, task_id): self.task_id = task_id def __str__(self): - return "{}: {}".format(self.MESSAGE, self.task_id) + return f"{self.MESSAGE}: {self.task_id}" class AliasException(ValidationException): @@ -320,9 +373,9 @@ class DbtConfigError(RuntimeException): CODE = 10007 MESSAGE = "DBT Configuration Error" - def __init__(self, message, project=None, result_type="invalid_project", path=None): + def __init__(self, msg: str, project=None, result_type="invalid_project", path=None): self.project = project - super().__init__(message) + super().__init__(msg) self.result_type = result_type self.path = path @@ -338,8 +391,8 @@ class FailFastException(RuntimeException): CODE = 10013 MESSAGE = "FailFast Error" - def __init__(self, message, result=None, node=None): - super().__init__(msg=message, node=node) + def __init__(self, msg: str, result=None, node=None): + super().__init__(msg=msg, node=node) self.result = result @property @@ -360,7 +413,7 @@ class DbtProfileError(DbtConfigError): class SemverException(Exception): - def __init__(self, msg=None): + def __init__(self, msg: str = None): self.msg = msg if msg is not None: super().__init__(msg) @@ -373,7 +426,10 @@ class VersionsNotCompatibleException(SemverException): class NotImplementedException(Exception): - pass + def __init__(self, msg: str): + self.msg = msg + self.formatted_msg = f"ERROR: {self.msg}" + super().__init__(self.formatted_msg) class FailedToConnectException(DatabaseException): @@ -381,52 +437,58 @@ class FailedToConnectException(DatabaseException): class CommandError(RuntimeException): - def __init__(self, cwd, cmd, message="Error running command"): + def __init__(self, cwd: str, cmd: List[str], msg: str = "Error running command"): cmd_scrubbed = list(scrub_secrets(cmd_txt, env_secrets()) for cmd_txt in cmd) - super().__init__(message) + super().__init__(msg) self.cwd = cwd self.cmd = cmd_scrubbed - self.args = (cwd, cmd_scrubbed, message) + self.args = (cwd, cmd_scrubbed, msg) def __str__(self): if len(self.cmd) == 0: - return "{}: No arguments given".format(self.msg) - return '{}: "{}"'.format(self.msg, self.cmd[0]) + return f"{self.msg}: No arguments given" + return f'{self.msg}: "{self.cmd[0]}"' class ExecutableError(CommandError): - def __init__(self, cwd, cmd, message): - super().__init__(cwd, cmd, message) + def __init__(self, cwd: str, cmd: List[str], msg: str): + super().__init__(cwd, cmd, msg) class WorkingDirectoryError(CommandError): - def __init__(self, cwd, cmd, message): - super().__init__(cwd, cmd, message) + def __init__(self, cwd: str, cmd: List[str], msg: str): + super().__init__(cwd, cmd, msg) def __str__(self): - return '{}: "{}"'.format(self.msg, self.cwd) + return f'{self.msg}: "{self.cwd}"' class CommandResultError(CommandError): - def __init__(self, cwd, cmd, returncode, stdout, stderr, message="Got a non-zero returncode"): - super().__init__(cwd, cmd, message) + def __init__( + self, + cwd: str, + cmd: List[str], + returncode: Union[int, Any], + stdout: bytes, + stderr: bytes, + msg: str = "Got a non-zero returncode", + ): + super().__init__(cwd, cmd, msg) self.returncode = returncode self.stdout = scrub_secrets(stdout.decode("utf-8"), env_secrets()) self.stderr = scrub_secrets(stderr.decode("utf-8"), env_secrets()) - self.args = (cwd, self.cmd, returncode, self.stdout, self.stderr, message) + self.args = (cwd, self.cmd, returncode, self.stdout, self.stderr, msg) def __str__(self): - return "{} running: {}".format(self.msg, self.cmd) + return f"{self.msg} running: {self.cmd}" class InvalidConnectionException(RuntimeException): - def __init__(self, thread_id, known, node=None): + def __init__(self, thread_id, known: List): self.thread_id = thread_id self.known = known super().__init__( - msg="connection never acquired for thread {}, have {}".format( - self.thread_id, self.known - ) + msg="connection never acquired for thread {self.thread_id}, have {self.known}" ) @@ -440,611 +502,1863 @@ class DuplicateYamlKeyException(CompilationException): pass -def raise_compiler_error(msg, node=None) -> NoReturn: - raise CompilationException(msg, node) +class ConnectionException(Exception): + """ + There was a problem with the connection that returned a bad response, + timed out, or resulted in a file that is corrupt. + """ + pass -def raise_parsing_error(msg, node=None) -> NoReturn: - raise ParsingException(msg, node) +# event level exception +class EventCompilationException(CompilationException): + def __init__(self, msg: str, node): + self.msg = scrub_secrets(msg, env_secrets()) + self.node = node + super().__init__(msg=self.msg) -def raise_database_error(msg, node=None) -> NoReturn: - raise DatabaseException(msg, node) +# compilation level exceptions +class GraphDependencyNotFound(CompilationException): + def __init__(self, node, dependency: str): + self.node = node + self.dependency = dependency + super().__init__(msg=self.get_message()) -def raise_dependency_error(msg) -> NoReturn: - raise DependencyException(scrub_secrets(msg, env_secrets())) + def get_message(self) -> str: + msg = f"'{self.node.unique_id}' depends on '{self.dependency}' which is not in the graph!" + return msg -def raise_git_cloning_error(error: CommandResultError) -> NoReturn: - error.cmd = scrub_secrets(str(error.cmd), env_secrets()) - raise error +# client level exceptions -def raise_git_cloning_problem(repo) -> NoReturn: - repo = scrub_secrets(repo, env_secrets()) - msg = """\ - Something went wrong while cloning {} - Check the debug logs for more information - """ - raise RuntimeException(msg.format(repo)) +class NoSupportedLanguagesFound(CompilationException): + def __init__(self, node): + self.node = node + self.msg = f"No supported_languages found in materialization macro {self.node.name}" + super().__init__(msg=self.msg) -def disallow_secret_env_var(env_var_name) -> NoReturn: - """Raise an error when a secret env var is referenced outside allowed - rendering contexts""" - msg = ( - "Secret env vars are allowed only in profiles.yml or packages.yml. " - "Found '{env_var_name}' referenced elsewhere." - ) - raise_parsing_error(msg.format(env_var_name=env_var_name)) +class MaterializtionMacroNotUsed(CompilationException): + def __init__(self, node): + self.node = node + self.msg = "Only materialization macros can be used with this function" + super().__init__(msg=self.msg) -def invalid_type_error( - method_name, arg_name, got_value, expected_type, version="0.13.0" -) -> NoReturn: - """Raise a CompilationException when an adapter method available to macros - has changed. - """ - got_type = type(got_value) - msg = ( - "As of {version}, 'adapter.{method_name}' expects argument " - "'{arg_name}' to be of type '{expected_type}', instead got " - "{got_value} ({got_type})" - ) - raise_compiler_error( - msg.format( - version=version, - method_name=method_name, - arg_name=arg_name, - expected_type=expected_type, - got_value=got_value, - got_type=got_type, - ) - ) +class UndefinedCompilation(CompilationException): + def __init__(self, name: str, node): + self.name = name + self.node = node + self.msg = f"{self.name} is undefined" + super().__init__(msg=self.msg) -def invalid_bool_error(got_value, macro_name) -> NoReturn: - """Raise a CompilationException when a macro expects a boolean but gets some - other value. - """ - msg = ( - "Macro '{macro_name}' returns '{got_value}'. It is not type 'bool' " - "and cannot not be converted reliably to a bool." - ) - raise_compiler_error(msg.format(macro_name=macro_name, got_value=got_value)) +class CaughtMacroExceptionWithNode(CompilationException): + def __init__(self, exc, node): + self.exc = exc + self.node = node + super().__init__(msg=str(exc)) -def ref_invalid_args(model, args) -> NoReturn: - raise_compiler_error("ref() takes at most two arguments ({} given)".format(len(args)), model) +class CaughtMacroException(CompilationException): + def __init__(self, exc): + self.exc = exc + super().__init__(msg=str(exc)) -def metric_invalid_args(model, args) -> NoReturn: - raise_compiler_error( - "metric() takes at most two arguments ({} given)".format(len(args)), model - ) +class MacroNameNotString(CompilationException): + def __init__(self, kwarg_value): + self.kwarg_value = kwarg_value + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + msg = ( + f"The macro_name parameter ({self.kwarg_value}) " + "to adapter.dispatch was not a string" + ) + return msg -def ref_bad_context(model, args) -> NoReturn: - ref_args = ", ".join("'{}'".format(a) for a in args) - ref_string = "{{{{ ref({}) }}}}".format(ref_args) - base_error_msg = """dbt was unable to infer all dependencies for the model "{model_name}". -This typically happens when ref() is placed within a conditional block. +class MissingControlFlowStartTag(CompilationException): + def __init__(self, tag, expected_tag: str, tag_parser): + self.tag = tag + self.expected_tag = expected_tag + self.tag_parser = tag_parser + super().__init__(msg=self.get_message()) -To fix this, add the following hint to the top of the model "{model_name}": + def get_message(self) -> str: + linepos = self.tag_parser.linepos(self.tag.start) + msg = ( + f"Got an unexpected control flow end tag, got {self.tag.block_type_name} but " + f"expected {self.expected_tag} next (@ {linepos})" + ) + return msg --- depends_on: {ref_string}""" - # This explicitly references model['name'], instead of model['alias'], for - # better error messages. Ex. If models foo_users and bar_users are aliased - # to 'users', in their respective schemas, then you would want to see - # 'bar_users' in your error messge instead of just 'users'. - if isinstance(model, dict): # TODO: remove this path - model_name = model["name"] - model_path = model["path"] - else: - model_name = model.name - model_path = model.path - error_msg = base_error_msg.format( - model_name=model_name, model_path=model_path, ref_string=ref_string - ) - raise_compiler_error(error_msg, model) +class UnexpectedControlFlowEndTag(CompilationException): + def __init__(self, tag, expected_tag: str, tag_parser): + self.tag = tag + self.expected_tag = expected_tag + self.tag_parser = tag_parser + super().__init__(msg=self.get_message()) -def doc_invalid_args(model, args) -> NoReturn: - raise_compiler_error("doc() takes at most two arguments ({} given)".format(len(args)), model) + def get_message(self) -> str: + linepos = self.tag_parser.linepos(self.tag.start) + msg = ( + f"Got an unexpected control flow end tag, got {self.tag.block_type_name} but " + f"never saw a preceeding {self.expected_tag} (@ {linepos})" + ) + return msg -def doc_target_not_found( - model, target_doc_name: str, target_doc_package: Optional[str] -) -> NoReturn: - target_package_string = "" +class UnexpectedMacroEOF(CompilationException): + def __init__(self, expected_name: str, actual_name: str): + self.expected_name = expected_name + self.actual_name = actual_name + super().__init__(msg=self.get_message()) - if target_doc_package is not None: - target_package_string = "in package '{}' ".format(target_doc_package) + def get_message(self) -> str: + msg = f'unexpected EOF, expected {self.expected_name}, got "{self.actual_name}"' + return msg - msg = ("Documentation for '{}' depends on doc '{}' {} which was not found").format( - model.unique_id, target_doc_name, target_package_string - ) - raise_compiler_error(msg, model) +class MacroNamespaceNotString(CompilationException): + def __init__(self, kwarg_type: Any): + self.kwarg_type = kwarg_type + super().__init__(msg=self.get_message()) -def get_not_found_or_disabled_msg( - original_file_path, - unique_id, - resource_type_title, - target_name: str, - target_kind: str, - target_package: Optional[str] = None, - disabled: Optional[bool] = None, -) -> str: - if disabled is None: - reason = "was not found or is disabled" - elif disabled is True: - reason = "is disabled" - else: - reason = "was not found" - - target_package_string = "" - if target_package is not None: - target_package_string = "in package '{}' ".format(target_package) - - return "{} '{}' ({}) depends on a {} named '{}' {}which {}".format( - resource_type_title, - unique_id, - original_file_path, - target_kind, - target_name, - target_package_string, - reason, - ) + def get_message(self) -> str: + msg = ( + "The macro_namespace parameter to adapter.dispatch " + f"is a {self.kwarg_type}, not a string" + ) + return msg -def target_not_found( - node, - target_name: str, - target_kind: str, - target_package: Optional[str] = None, - disabled: Optional[bool] = None, -) -> NoReturn: - msg = get_not_found_or_disabled_msg( - original_file_path=node.original_file_path, - unique_id=node.unique_id, - resource_type_title=node.resource_type.title(), - target_name=target_name, - target_kind=target_kind, - target_package=target_package, - disabled=disabled, - ) +class NestedTags(CompilationException): + def __init__(self, outer, inner): + self.outer = outer + self.inner = inner + super().__init__(msg=self.get_message()) - raise_compiler_error(msg, node) + def get_message(self) -> str: + msg = ( + f"Got nested tags: {self.outer.block_type_name} (started at {self.outer.start}) did " + f"not have a matching {{{{% end{self.outer.block_type_name} %}}}} before a " + f"subsequent {self.inner.block_type_name} was found (started at {self.inner.start})" + ) + return msg -def dependency_not_found(model, target_model_name): - raise_compiler_error( - "'{}' depends on '{}' which is not in the graph!".format( - model.unique_id, target_model_name - ), - model, - ) +class BlockDefinitionNotAtTop(CompilationException): + def __init__(self, tag_parser, tag_start): + self.tag_parser = tag_parser + self.tag_start = tag_start + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + position = self.tag_parser.linepos(self.tag_start) + msg = ( + f"Got a block definition inside control flow at {position}. " + "All dbt block definitions must be at the top level" + ) + return msg -def macro_not_found(model, target_macro_id): - raise_compiler_error( - model, - "'{}' references macro '{}' which is not defined!".format( - model.unique_id, target_macro_id - ), - ) +class MissingCloseTag(CompilationException): + def __init__(self, block_type_name: str, linecount: int): + self.block_type_name = block_type_name + self.linecount = linecount + super().__init__(msg=self.get_message()) -def macro_invalid_dispatch_arg(macro_name) -> NoReturn: - msg = """\ - The "packages" argument of adapter.dispatch() has been deprecated. - Use the "macro_namespace" argument instead. + def get_message(self) -> str: + msg = f"Reached EOF without finding a close tag for {self.block_type_name} (searched from line {self.linecount})" + return msg - Raised during dispatch for: {} - For more information, see: +class GitCloningProblem(RuntimeException): + def __init__(self, repo: str): + self.repo = scrub_secrets(repo, env_secrets()) + super().__init__(msg=self.get_message()) - https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch - """ - raise_compiler_error(msg.format(macro_name)) + def get_message(self) -> str: + msg = f"""\ + Something went wrong while cloning {self.repo} + Check the debug logs for more information + """ + return msg -def materialization_not_available(model, adapter_type): - materialization = model.get_materialization() +class GitCloningError(InternalException): + def __init__(self, repo: str, revision: str, error: CommandResultError): + self.repo = repo + self.revision = revision + self.error = error + super().__init__(msg=self.get_message()) - raise_compiler_error( - "Materialization '{}' is not available for {}!".format(materialization, adapter_type), - model, - ) + def get_message(self) -> str: + stderr = self.error.stderr.strip() + if "usage: git" in stderr: + stderr = stderr.split("\nusage: git")[0] + if re.match("fatal: destination path '(.+)' already exists", stderr): + self.error.cmd = list(scrub_secrets(str(self.error.cmd), env_secrets())) + raise self.error + msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{stderr}" + return scrub_secrets(msg, env_secrets()) -def missing_materialization(model, adapter_type): - materialization = model.get_materialization() - valid_types = "'default'" +class GitCheckoutError(InternalException): + def __init__(self, repo: str, revision: str, error: CommandResultError): + self.repo = repo + self.revision = revision + self.stderr = error.stderr.strip() + super().__init__(msg=self.get_message()) - if adapter_type != "default": - valid_types = "'default' and '{}'".format(adapter_type) + def get_message(self) -> str: + msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{self.stderr}" + return scrub_secrets(msg, env_secrets()) - raise_compiler_error( - "No materialization '{}' was found for adapter {}! (searched types {})".format( - materialization, adapter_type, valid_types - ), - model, - ) +class InvalidMaterializationArg(CompilationException): + def __init__(self, name: str, argument: str): + self.name = name + self.argument = argument + super().__init__(msg=self.get_message()) -def bad_package_spec(repo, spec, error_message): - msg = "Error checking out spec='{}' for repo {}\n{}".format(spec, repo, error_message) - raise InternalException(scrub_secrets(msg, env_secrets())) + def get_message(self) -> str: + msg = f"materialization '{self.name}' received unknown argument '{self.argument}'." + return msg -def raise_cache_inconsistent(message): - raise InternalException("Cache inconsistency detected: {}".format(message)) +class SymbolicLinkError(CompilationException): + def __init__(self): + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + msg = ( + "dbt encountered an error when attempting to create a symbolic link. " + "If this error persists, please create an issue at: \n\n" + "https://github.com/dbt-labs/dbt-core" + ) -def missing_config(model, name): - raise_compiler_error( - "Model '{}' does not define a required config parameter '{}'.".format( - model.unique_id, name - ), - model, - ) + return msg -def missing_relation(relation, model=None): - raise_compiler_error("Relation {} not found!".format(relation), model) +# context level exceptions -def raise_dataclass_not_dict(obj): - msg = ( - 'The object ("{obj}") was used as a dictionary. This ' - "capability has been removed from objects of this type." - ) - raise_compiler_error(msg) +class ZipStrictWrongType(CompilationException): + def __init__(self, exc): + self.exc = exc + msg = str(self.exc) + super().__init__(msg=msg) -def relation_wrong_type(relation, expected_type, model=None): - raise_compiler_error( - ( - "Trying to create {expected_type} {relation}, " - "but it currently exists as a {current_type}. Either " - "drop {relation} manually, or run dbt with " - "`--full-refresh` and dbt will drop it for you." - ).format(relation=relation, current_type=relation.type, expected_type=expected_type), - model, - ) +class SetStrictWrongType(CompilationException): + def __init__(self, exc): + self.exc = exc + msg = str(self.exc) + super().__init__(msg=msg) -def package_not_found(package_name): - raise_dependency_error("Package {} was not found in the package index".format(package_name)) +class LoadAgateTableValueError(CompilationException): + def __init__(self, exc: ValueError, node): + self.exc = exc + self.node = node + msg = str(self.exc) + super().__init__(msg=msg) -def package_version_not_found( - package_name, version_range, available_versions, should_version_check -): - base_msg = ( - "Could not find a matching compatible version for package {}\n" - " Requested range: {}\n" - " Compatible versions: {}\n" - ) - addendum = ( - ( - "\n" - " Not shown: package versions incompatible with installed version of dbt-core\n" - " To include them, run 'dbt --no-version-check deps'" - ) - if should_version_check - else "" - ) - msg = base_msg.format(package_name, version_range, available_versions) + addendum - raise_dependency_error(msg) +class LoadAgateTableNotSeed(CompilationException): + def __init__(self, resource_type, node): + self.resource_type = resource_type + self.node = node + msg = f"can only load_agate_table for seeds (got a {self.resource_type})" + super().__init__(msg=msg) -def invalid_materialization_argument(name, argument): - raise_compiler_error( - "materialization '{}' received unknown argument '{}'.".format(name, argument) - ) +class MacrosSourcesUnWriteable(CompilationException): + def __init__(self, node): + self.node = node + msg = 'cannot "write" macros or sources' + super().__init__(msg=msg) -def system_error(operation_name): - raise_compiler_error( - "dbt encountered an error when attempting to {}. " - "If this error persists, please create an issue at: \n\n" - "https://github.com/dbt-labs/dbt-core".format(operation_name) - ) +class PackageNotInDeps(CompilationException): + def __init__(self, package_name: str, node): + self.package_name = package_name + self.node = node + msg = f"Node package named {self.package_name} not found!" + super().__init__(msg=msg) -class ConnectionException(Exception): - """ - There was a problem with the connection that returned a bad response, - timed out, or resulted in a file that is corrupt. - """ +class OperationsCannotRefEphemeralNodes(CompilationException): + def __init__(self, target_name: str, node): + self.target_name = target_name + self.node = node + msg = f"Operations can not ref() ephemeral nodes, but {target_name} is ephemeral" + super().__init__(msg=msg) - pass +class InvalidPersistDocsValueType(CompilationException): + def __init__(self, persist_docs: Any): + self.persist_docs = persist_docs + msg = ( + "Invalid value provided for 'persist_docs'. Expected dict " + f"but received {type(self.persist_docs)}" + ) + super().__init__(msg=msg) -def raise_dep_not_found(node, node_description, required_pkg): - raise_compiler_error( - 'Error while parsing {}.\nThe required package "{}" was not found. ' - "Is the package installed?\nHint: You may need to run " - "`dbt deps`.".format(node_description, required_pkg), - node=node, - ) +class InvalidInlineModelConfig(CompilationException): + def __init__(self, node): + self.node = node + msg = "Invalid inline model config" + super().__init__(msg=msg) -def multiple_matching_relations(kwargs, matches): - raise_compiler_error( - "get_relation returned more than one relation with the given args. " - "Please specify a database or schema to narrow down the result set." - "\n{}\n\n{}".format(kwargs, matches) - ) +class ConflictingConfigKeys(CompilationException): + def __init__(self, oldkey: str, newkey: str, node): + self.oldkey = oldkey + self.newkey = newkey + self.node = node + msg = f'Invalid config, has conflicting keys "{self.oldkey}" and "{self.newkey}"' + super().__init__(msg=msg) -def get_relation_returned_multiple_results(kwargs, matches): - multiple_matching_relations(kwargs, matches) +class InvalidNumberSourceArgs(CompilationException): + def __init__(self, args, node): + self.args = args + self.node = node + msg = f"source() takes exactly two arguments ({len(self.args)} given)" + super().__init__(msg=msg) -def approximate_relation_match(target, relation): - raise_compiler_error( - "When searching for a relation, dbt found an approximate match. " - "Instead of guessing \nwhich relation to use, dbt will move on. " - "Please delete {relation}, or rename it to be less ambiguous." - "\nSearched for: {target}\nFound: {relation}".format(target=target, relation=relation) - ) +class RequiredVarNotFound(CompilationException): + def __init__(self, var_name: str, merged: Dict, node): + self.var_name = var_name + self.merged = merged + self.node = node + super().__init__(msg=self.get_message()) -def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn: - duped_name = node_1.name - if node_1.package_name != node_2.package_name: - extra = ' ("{}" and "{}" are both in the "{}" namespace)'.format( - node_1.package_name, node_2.package_name, namespace - ) - else: - extra = "" - - raise_compiler_error( - 'dbt found two macros with the name "{}" in the namespace "{}"{}. ' - "Since these macros have the same name and exist in the same " - "namespace, dbt will be unable to decide which to call. To fix this, " - "change the name of one of these macros:\n- {} ({})\n- {} ({})".format( - duped_name, - namespace, - extra, - node_1.unique_id, - node_1.original_file_path, - node_2.unique_id, - node_2.original_file_path, - ) - ) + def get_message(self) -> str: + if self.node is not None: + node_name = self.node.name + else: + node_name = "" + dct = {k: self.merged[k] for k in self.merged} + pretty_vars = json.dumps(dct, sort_keys=True, indent=4) -def raise_duplicate_resource_name(node_1, node_2): - duped_name = node_1.name - node_type = NodeType(node_1.resource_type) - pluralized = ( - node_type.pluralize() - if node_1.resource_type == node_2.resource_type - else "resources" # still raise if ref() collision, e.g. model + seed - ) + msg = f"Required var '{self.var_name}' not found in config:\nVars supplied to {node_name} = {pretty_vars}" + return msg - action = "looking for" - # duplicate 'ref' targets - if node_type in NodeType.refable(): - formatted_name = f'ref("{duped_name}")' - # duplicate sources - elif node_type == NodeType.Source: - duped_name = node_1.get_full_source_name() - formatted_name = node_1.get_source_representation() - # duplicate docs blocks - elif node_type == NodeType.Documentation: - formatted_name = f'doc("{duped_name}")' - # duplicate generic tests - elif node_type == NodeType.Test and hasattr(node_1, "test_metadata"): - column_name = f'column "{node_1.column_name}" in ' if node_1.column_name else "" - model_name = node_1.file_key_name - duped_name = f'{node_1.name}" defined on {column_name}"{model_name}' - action = "running" - formatted_name = "tests" - # all other resource types - else: - formatted_name = duped_name - - # should this be raise_parsing_error instead? - raise_compiler_error( - f""" -dbt found two {pluralized} with the name "{duped_name}". -Since these resources have the same name, dbt will be unable to find the correct resource -when {action} {formatted_name}. +class PackageNotFoundForMacro(CompilationException): + def __init__(self, package_name: str): + self.package_name = package_name + msg = f"Could not find package '{self.package_name}'" + super().__init__(msg=msg) -To fix this, change the name of one of these resources: -- {node_1.unique_id} ({node_1.original_file_path}) -- {node_2.unique_id} ({node_2.original_file_path}) - """.strip() - ) +class DisallowSecretEnvVar(ParsingException): + def __init__(self, env_var_name: str): + self.env_var_name = env_var_name + super().__init__(msg=self.get_message()) -def raise_ambiguous_alias(node_1, node_2, duped_name=None): - if duped_name is None: - duped_name = f"{node_1.database}.{node_1.schema}.{node_1.alias}" - - raise_compiler_error( - 'dbt found two resources with the database representation "{}".\ndbt ' - "cannot create two resources with identical database representations. " - "To fix this,\nchange the configuration of one of these resources:" - "\n- {} ({})\n- {} ({})".format( - duped_name, - node_1.unique_id, - node_1.original_file_path, - node_2.unique_id, - node_2.original_file_path, + def get_message(self) -> str: + msg = ( + "Secret env vars are allowed only in profiles.yml or packages.yml. " + f"Found '{self.env_var_name}' referenced elsewhere." ) - ) + return msg -def raise_ambiguous_catalog_match(unique_id, match_1, match_2): - def get_match_string(match): - return "{}.{}".format( - match.get("metadata", {}).get("schema"), - match.get("metadata", {}).get("name"), - ) +class InvalidMacroArgType(CompilationException): + def __init__(self, method_name: str, arg_name: str, got_value: Any, expected_type): + self.method_name = method_name + self.arg_name = arg_name + self.got_value = got_value + self.expected_type = expected_type + super().__init__(msg=self.get_message()) - raise_compiler_error( - "dbt found two relations in your warehouse with similar database " - "identifiers. dbt\nis unable to determine which of these relations " - 'was created by the model "{unique_id}".\nIn order for dbt to ' - "correctly generate the catalog, one of the following relations must " - "be deleted or renamed:\n\n - {match_1_s}\n - {match_2_s}".format( - unique_id=unique_id, - match_1_s=get_match_string(match_1), - match_2_s=get_match_string(match_2), + def get_message(self) -> str: + got_type = type(self.got_value) + msg = ( + f"'adapter.{self.method_name}' expects argument " + f"'{self.arg_name}' to be of type '{self.expected_type}', instead got " + f"{self.got_value} ({got_type})" ) - ) - + return msg -def raise_patch_targets_not_found(patches): - patch_list = "\n\t".join( - "model {} (referenced in path {})".format(p.name, p.original_file_path) - for p in patches.values() - ) - raise_compiler_error( - "dbt could not find models for the following patches:\n\t{}".format(patch_list) - ) +class InvalidBoolean(CompilationException): + def __init__(self, return_value: Any, macro_name: str): + self.return_value = return_value + self.macro_name = macro_name + super().__init__(msg=self.get_message()) -def _fix_dupe_msg(path_1: str, path_2: str, name: str, type_name: str) -> str: - if path_1 == path_2: - return f"remove one of the {type_name} entries for {name} in this file:\n - {path_1!s}\n" - else: - return ( - f"remove the {type_name} entry for {name} in one of these files:\n" - f" - {path_1!s}\n{path_2!s}" + def get_message(self) -> str: + msg = ( + f"Macro '{self.macro_name}' returns '{self.return_value}'. It is not type 'bool' " + "and cannot not be converted reliably to a bool." ) + return msg -def raise_duplicate_patch_name(patch_1, existing_patch_path): - name = patch_1.name - fix = _fix_dupe_msg( - patch_1.original_file_path, - existing_patch_path, - name, - "resource", - ) - raise_compiler_error( - f"dbt found two schema.yml entries for the same resource named " - f"{name}. Resources and their associated columns may only be " - f"described a single time. To fix this, {fix}" - ) +class RefInvalidArgs(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + def get_message(self) -> str: + msg = f"ref() takes at most two arguments ({len(self.args)} given)" + return msg + + +class MetricInvalidArgs(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"metric() takes at most two arguments ({len(self.args)} given)" + return msg + + +class RefBadContext(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + # This explicitly references model['name'], instead of model['alias'], for + # better error messages. Ex. If models foo_users and bar_users are aliased + # to 'users', in their respective schemas, then you would want to see + # 'bar_users' in your error messge instead of just 'users'. + if isinstance(self.node, dict): + model_name = self.node["name"] + else: + model_name = self.node.name + + ref_args = ", ".join("'{}'".format(a) for a in self.args) + ref_string = f"{{{{ ref({ref_args}) }}}}" + + msg = f"""dbt was unable to infer all dependencies for the model "{model_name}". +This typically happens when ref() is placed within a conditional block. + +To fix this, add the following hint to the top of the model "{model_name}": + +-- depends_on: {ref_string}""" + + return msg + + +class InvalidDocArgs(CompilationException): + def __init__(self, node, args): + self.node = node + self.args = args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"doc() takes at most two arguments ({len(self.args)} given)" + return msg + + +class DocTargetNotFound(CompilationException): + def __init__(self, node, target_doc_name: str, target_doc_package: Optional[str]): + self.node = node + self.target_doc_name = target_doc_name + self.target_doc_package = target_doc_package + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + target_package_string = "" + if self.target_doc_package is not None: + target_package_string = f"in package '{self. target_doc_package}' " + msg = f"Documentation for '{self.node.unique_id}' depends on doc '{self.target_doc_name}' {target_package_string} which was not found" + return msg + + +class MacroInvalidDispatchArg(CompilationException): + def __init__(self, macro_name: str): + self.macro_name = macro_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"""\ + The "packages" argument of adapter.dispatch() has been deprecated. + Use the "macro_namespace" argument instead. + + Raised during dispatch for: {self.macro_name} + + For more information, see: + + https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch + """ + return msg + + +class DuplicateMacroName(CompilationException): + def __init__(self, node_1, node_2, namespace: str): + self.node_1 = node_1 + self.node_2 = node_2 + self.namespace = namespace + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + duped_name = self.node_1.name + if self.node_1.package_name != self.node_2.package_name: + extra = f' ("{self.node_1.package_name}" and "{self.node_2.package_name}" are both in the "{self.namespace}" namespace)' + else: + extra = "" + + msg = ( + f'dbt found two macros with the name "{duped_name}" in the namespace "{self.namespace}"{extra}. ' + "Since these macros have the same name and exist in the same " + "namespace, dbt will be unable to decide which to call. To fix this, " + f"change the name of one of these macros:\n- {self.node_1.unique_id} " + f"({self.node_1.original_file_path})\n- {self.node_2.unique_id} ({self.node_2.original_file_path})" + ) + + return msg + + +# parser level exceptions +class InvalidDictParse(ParsingException): + def __init__(self, exc: ValidationError, node): + self.exc = exc + self.node = node + msg = self.validator_error_message(exc) + super().__init__(msg=msg) + + +class InvalidConfigUpdate(ParsingException): + def __init__(self, exc: ValidationError, node): + self.exc = exc + self.node = node + msg = self.validator_error_message(exc) + super().__init__(msg=msg) + + +class PythonParsingException(ParsingException): + def __init__(self, exc: SyntaxError, node): + self.exc = exc + self.node = node + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + validated_exc = self.validator_error_message(self.exc) + msg = f"{validated_exc}\n{self.exc.text}" + return msg + + +class PythonLiteralEval(ParsingException): + def __init__(self, exc: Exception, node): + self.exc = exc + self.node = node + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{self.exc}\n" + "https://docs.python.org/3/library/ast.html#ast.literal_eval\n" + "In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures" + ) + + return msg + + +class InvalidModelConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class YamlParseListFailure(ParsingException): + def __init__( + self, + path: str, + key: str, + yaml_data: List, + cause, + ): + self.path = path + self.key = key + self.yaml_data = yaml_data + self.cause = cause + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + if isinstance(self.cause, str): + reason = self.cause + elif isinstance(self.cause, ValidationError): + reason = self.validator_error_message(self.cause) + else: + reason = self.cause.msg + msg = f"Invalid {self.key} config given in {self.path} @ {self.key}: {self.yaml_data} - {reason}" + return msg + + +class YamlParseDictFailure(ParsingException): + def __init__( + self, + path: str, + key: str, + yaml_data: Dict[str, Any], + cause, + ): + self.path = path + self.key = key + self.yaml_data = yaml_data + self.cause = cause + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + if isinstance(self.cause, str): + reason = self.cause + elif isinstance(self.cause, ValidationError): + reason = self.validator_error_message(self.cause) + else: + reason = self.cause.msg + msg = f"Invalid {self.key} config given in {self.path} @ {self.key}: {self.yaml_data} - {reason}" + return msg + + +class YamlLoadFailure(ParsingException): + def __init__(self, project_name: Optional[str], path: str, exc: ValidationException): + self.project_name = project_name + self.path = path + self.exc = exc + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + reason = self.validator_error_message(self.exc) + + msg = f"Error reading {self.project_name}: {self.path} - {reason}" + + return msg + + +class InvalidTestConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class InvalidSchemaConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class InvalidSnapshopConfig(ParsingException): + def __init__(self, exc: ValidationError, node): + self.msg = self.validator_error_message(exc) + self.node = node + super().__init__(msg=self.msg) + + +class SameKeyNested(CompilationException): + def __init__(self): + msg = "Test cannot have the same key at the top-level and in config" + super().__init__(msg=msg) + + +class TestArgIncludesModel(CompilationException): + def __init__(self): + msg = 'Test arguments include "model", which is a reserved argument' + super().__init__(msg=msg) + + +class UnexpectedTestNamePattern(CompilationException): + def __init__(self, test_name: str): + self.test_name = test_name + msg = f"Test name string did not match expected pattern: {self.test_name}" + super().__init__(msg=msg) + + +class CustomMacroPopulatingConfigValues(CompilationException): + def __init__( + self, target_name: str, column_name: Optional[str], name: str, key: str, err_msg: str + ): + self.target_name = target_name + self.column_name = column_name + self.name = name + self.key = key + self.err_msg = err_msg + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + # Generic tests do not include custom macros in the Jinja + # rendering context, so this will almost always fail. As it + # currently stands, the error message is inscrutable, which + # has caused issues for some projects migrating from + # pre-0.20.0 to post-0.20.0. + # See https://github.com/dbt-labs/dbt-core/issues/4103 + # and https://github.com/dbt-labs/dbt-core/issues/5294 + + msg = ( + f"The {self.target_name}.{self.column_name} column's " + f'"{self.name}" test references an undefined ' + f"macro in its {self.key} configuration argument. " + f"The macro {self.err_msg}.\n" + "Please note that the generic test configuration parser " + "currently does not support using custom macros to " + "populate configuration values" + ) + return msg + + +class TagsNotListOfStrings(CompilationException): + def __init__(self, tags: Any): + self.tags = tags + msg = f"got {self.tags} ({type(self.tags)}) for tags, expected a list of strings" + super().__init__(msg=msg) + + +class TagNotString(CompilationException): + def __init__(self, tag: Any): + self.tag = tag + msg = f"got {self.tag} ({type(self.tag)}) for tag, expected a str" + super().__init__(msg=msg) + + +class TestNameNotString(ParsingException): + def __init__(self, test_name: Any): + self.test_name = test_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = f"test name must be a str, got {type(self.test_name)} (value {self.test_name})" + return msg + + +class TestArgsNotDict(ParsingException): + def __init__(self, test_args: Any): + self.test_args = test_args + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = f"test arguments must be a dict, got {type(self.test_args)} (value {self.test_args})" + return msg + + +class TestDefinitionDictLength(ParsingException): + def __init__(self, test): + self.test = test + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = ( + "test definition dictionary must have exactly one key, got" + f" {self.test} instead ({len(self.test)} keys)" + ) + return msg + + +class TestInvalidType(ParsingException): + def __init__(self, test: Any): + self.test = test + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"test must be dict or str, got {type(self.test)} (value {self.test})" + return msg + + +# This is triggered across multiple files +class EnvVarMissing(ParsingException): + def __init__(self, var: str): + self.var = var + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Env var required but not provided: '{self.var}'" + return msg + + +class TargetNotFound(CompilationException): + def __init__( + self, + node, + target_name: str, + target_kind: str, + target_package: Optional[str] = None, + disabled: Optional[bool] = None, + ): + self.node = node + self.target_name = target_name + self.target_kind = target_kind + self.target_package = target_package + self.disabled = disabled + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + original_file_path = self.node.original_file_path + unique_id = self.node.unique_id + resource_type_title = self.node.resource_type.title() + + if self.disabled is None: + reason = "was not found or is disabled" + elif self.disabled is True: + reason = "is disabled" + else: + reason = "was not found" + + target_package_string = "" + if self.target_package is not None: + target_package_string = f"in package '{self.target_package}' " + + msg = ( + f"{resource_type_title} '{unique_id}' ({original_file_path}) depends on a " + f"{self.target_kind} named '{self.target_name}' {target_package_string}which {reason}" + ) + return msg + + +class DuplicateSourcePatchName(CompilationException): + def __init__(self, patch_1, patch_2): + self.patch_1 = patch_1 + self.patch_2 = patch_2 + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + name = f"{self.patch_1.overrides}.{self.patch_1.name}" + fix = self._fix_dupe_msg( + self.patch_1.path, + self.patch_2.path, + name, + "sources", + ) + msg = ( + f"dbt found two schema.yml entries for the same source named " + f"{self.patch_1.name} in package {self.patch_1.overrides}. Sources may only be " + f"overridden a single time. To fix this, {fix}" + ) + return msg + + +class DuplicateMacroPatchName(CompilationException): + def __init__(self, patch_1, existing_patch_path): + self.patch_1 = patch_1 + self.existing_patch_path = existing_patch_path + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + package_name = self.patch_1.package_name + name = self.patch_1.name + fix = self._fix_dupe_msg( + self.patch_1.original_file_path, self.existing_patch_path, name, "macros" + ) + msg = ( + f"dbt found two schema.yml entries for the same macro in package " + f"{package_name} named {name}. Macros may only be described a single " + f"time. To fix this, {fix}" + ) + return msg + + +# core level exceptions +class DuplicateAlias(AliasException): + def __init__(self, kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str): + self.kwargs = kwargs + self.aliases = aliases + self.canonical_key = canonical_key + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + # dupe found: go through the dict so we can have a nice-ish error + key_names = ", ".join( + "{}".format(k) for k in self.kwargs if self.aliases.get(k) == self.canonical_key + ) + msg = f'Got duplicate keys: ({key_names}) all map to "{self.canonical_key}"' + return msg + + +# Postgres Exceptions + + +class UnexpectedDbReference(NotImplementedException): + def __init__(self, adapter, database, expected): + self.adapter = adapter + self.database = database + self.expected = expected + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Cross-db references not allowed in {self.adapter} ({self.database} vs {self.expected})" + return msg + + +class CrossDbReferenceProhibited(CompilationException): + def __init__(self, adapter, exc_msg: str): + self.adapter = adapter + self.exc_msg = exc_msg + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Cross-db references not allowed in adapter {self.adapter}: Got {self.exc_msg}" + return msg + + +class IndexConfigNotDict(CompilationException): + def __init__(self, raw_index: Any): + self.raw_index = raw_index + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Invalid index config:\n" + f" Got: {self.raw_index}\n" + f' Expected a dictionary with at minimum a "columns" key' + ) + return msg + + +class InvalidIndexConfig(CompilationException): + def __init__(self, exc: TypeError): + self.exc = exc + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + validator_msg = self.validator_error_message(self.exc) + msg = f"Could not parse index config: {validator_msg}" + return msg + + +# adapters exceptions +class InvalidMacroResult(CompilationException): + def __init__(self, freshness_macro_name: str, table): + self.freshness_macro_name = freshness_macro_name + self.table = table + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f'Got an invalid result from "{self.freshness_macro_name}" macro: {[tuple(r) for r in self.table]}' + + return msg + + +class SnapshotTargetNotSnapshotTable(CompilationException): + def __init__(self, missing: List): + self.missing = missing + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = 'Snapshot target is not a snapshot table (missing "{}")'.format( + '", "'.join(self.missing) + ) + return msg + + +class SnapshotTargetIncomplete(CompilationException): + def __init__(self, extra: List, missing: List): + self.extra = extra + self.missing = missing + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + 'Snapshot target has ("{}") but not ("{}") - is it an ' + "unmigrated previous version archive?".format( + '", "'.join(self.extra), '", "'.join(self.missing) + ) + ) + return msg + + +class RenameToNoneAttempted(CompilationException): + def __init__(self, src_name: str, dst_name: str, name: str): + self.src_name = src_name + self.dst_name = dst_name + self.name = name + self.msg = f"Attempted to rename {self.src_name} to {self.dst_name} for {self.name}" + super().__init__(msg=self.msg) + + +class NullRelationDropAttempted(CompilationException): + def __init__(self, name: str): + self.name = name + self.msg = f"Attempted to drop a null relation for {self.name}" + super().__init__(msg=self.msg) + + +class NullRelationCacheAttempted(CompilationException): + def __init__(self, name: str): + self.name = name + self.msg = f"Attempted to cache a null relation for {self.name}" + super().__init__(msg=self.msg) + + +class InvalidQuoteConfigType(CompilationException): + def __init__(self, quote_config: Any): + self.quote_config = quote_config + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + 'The seed configuration value of "quote_columns" has an ' + f"invalid type {type(self.quote_config)}" + ) + return msg + + +class MultipleDatabasesNotAllowed(CompilationException): + def __init__(self, databases): + self.databases = databases + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = str(self.databases) + return msg + + +class RelationTypeNull(CompilationException): + def __init__(self, relation): + self.relation = relation + self.msg = f"Tried to drop relation {self.relation}, but its type is null." + super().__init__(msg=self.msg) + + +class MaterializationNotAvailable(CompilationException): + def __init__(self, model, adapter_type: str): + self.model = model + self.adapter_type = adapter_type + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + materialization = self.model.get_materialization() + msg = f"Materialization '{materialization}' is not available for {self.adapter_type}!" + return msg + + +class RelationReturnedMultipleResults(CompilationException): + def __init__(self, kwargs: Mapping[str, Any], matches: List): + self.kwargs = kwargs + self.matches = matches + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + "get_relation returned more than one relation with the given args. " + "Please specify a database or schema to narrow down the result set." + f"\n{self.kwargs}\n\n{self.matches}" + ) + return msg + + +class ApproximateMatch(CompilationException): + def __init__(self, target, relation): + self.target = target + self.relation = relation + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + + msg = ( + "When searching for a relation, dbt found an approximate match. " + "Instead of guessing \nwhich relation to use, dbt will move on. " + f"Please delete {self.relation}, or rename it to be less ambiguous." + f"\nSearched for: {self.target}\nFound: {self.relation}" + ) + + return msg + + +# adapters exceptions +class UnexpectedNull(DatabaseException): + def __init__(self, field_name: str, source): + self.field_name = field_name + self.source = source + msg = ( + f"Expected a non-null value when querying field '{self.field_name}' of table " + f" {self.source} but received value 'null' instead" + ) + super().__init__(msg) + + +class UnexpectedNonTimestamp(DatabaseException): + def __init__(self, field_name: str, source, dt: Any): + self.field_name = field_name + self.source = source + self.type_name = type(dt).__name__ + msg = ( + f"Expected a timestamp value when querying field '{self.field_name}' of table " + f"{self.source} but received value of type '{self.type_name}' instead" + ) + super().__init__(msg) + + +# deps exceptions +class MultipleVersionGitDeps(DependencyException): + def __init__(self, git: str, requested): + self.git = git + self.requested = requested + msg = ( + "git dependencies should contain exactly one version. " + f"{self.git} contains: {self.requested}" + ) + super().__init__(msg) + + +class DuplicateProjectDependency(DependencyException): + def __init__(self, project_name: str): + self.project_name = project_name + msg = ( + f'Found duplicate project "{self.project_name}". This occurs when ' + "a dependency has the same project name as some other dependency." + ) + super().__init__(msg) + + +class DuplicateDependencyToRoot(DependencyException): + def __init__(self, project_name: str): + self.project_name = project_name + msg = ( + "Found a dependency with the same name as the root project " + f'"{self.project_name}". Package names must be unique in a project.' + " Please rename one of these packages." + ) + super().__init__(msg) + + +class MismatchedDependencyTypes(DependencyException): + def __init__(self, new, old): + self.new = new + self.old = old + msg = ( + f"Cannot incorporate {self.new} ({self.new.__class__.__name__}) in {self.old} " + f"({self.old.__class__.__name__}): mismatched types" + ) + super().__init__(msg) + + +class PackageVersionNotFound(DependencyException): + def __init__( + self, + package_name: str, + version_range, + available_versions: List[str], + should_version_check: bool, + ): + self.package_name = package_name + self.version_range = version_range + self.available_versions = available_versions + self.should_version_check = should_version_check + super().__init__(self.get_message()) + + def get_message(self) -> str: + base_msg = ( + "Could not find a matching compatible version for package {}\n" + " Requested range: {}\n" + " Compatible versions: {}\n" + ) + addendum = ( + ( + "\n" + " Not shown: package versions incompatible with installed version of dbt-core\n" + " To include them, run 'dbt --no-version-check deps'" + ) + if self.should_version_check + else "" + ) + msg = ( + base_msg.format(self.package_name, self.version_range, self.available_versions) + + addendum + ) + return msg + + +class PackageNotFound(DependencyException): + def __init__(self, package_name: str): + self.package_name = package_name + msg = f"Package {self.package_name} was not found in the package index" + super().__init__(msg) + + +# config level exceptions + + +class ProfileConfigInvalid(DbtProfileError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class ProjectContractInvalid(DbtProjectError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class ProjectContractBroken(DbtProjectError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class ConfigContractBroken(DbtProjectError): + def __init__(self, exc: ValidationError): + self.exc = exc + msg = self.validator_error_message(self.exc) + super().__init__(msg=msg) + + +class NonUniquePackageName(CompilationException): + def __init__(self, project_name: str): + self.project_name = project_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + "dbt found more than one package with the name " + f'"{self.project_name}" included in this project. Package ' + "names must be unique in a project. Please rename " + "one of these packages." + ) + return msg + + +class UninstalledPackagesFound(CompilationException): + def __init__( + self, + count_packages_specified: int, + count_packages_installed: int, + packages_install_path: str, + ): + self.count_packages_specified = count_packages_specified + self.count_packages_installed = count_packages_installed + self.packages_install_path = packages_install_path + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"dbt found {self.count_packages_specified} package(s) " + "specified in packages.yml, but only " + f"{self.count_packages_installed} package(s) installed " + f'in {self.packages_install_path}. Run "dbt deps" to ' + "install package dependencies." + ) + return msg + + +class VarsArgNotYamlDict(CompilationException): + def __init__(self, var_type): + self.var_type = var_type + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + type_name = self.var_type.__name__ + + msg = f"The --vars argument must be a YAML dictionary, but was of type '{type_name}'" + return msg + + +# contracts level + + +class DuplicateMacroInPackage(CompilationException): + def __init__(self, macro, macro_mapping: Mapping): + self.macro = macro + self.macro_mapping = macro_mapping + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + other_path = self.macro_mapping[self.macro.unique_id].original_file_path + # subtract 2 for the "Compilation Error" indent + # note that the line wrap eats newlines, so if you want newlines, + # this is the result :( + msg = line_wrap_message( + f"""\ + dbt found two macros named "{self.macro.name}" in the project + "{self.macro.package_name}". + + + To fix this error, rename or remove one of the following + macros: + + - {self.macro.original_file_path} + + - {other_path} + """, + subtract=2, + ) + return msg + + +class DuplicateMaterializationName(CompilationException): + def __init__(self, macro, other_macro): + self.macro = macro + self.other_macro = other_macro + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + macro_name = self.macro.name + macro_package_name = self.macro.package_name + other_package_name = self.other_macro.macro.package_name + + msg = ( + f"Found two materializations with the name {macro_name} (packages " + f"{macro_package_name} and {other_package_name}). dbt cannot resolve " + "this ambiguity" + ) + return msg + + +# jinja exceptions +class MissingConfig(CompilationException): + def __init__(self, unique_id: str, name: str): + self.unique_id = unique_id + self.name = name + msg = ( + f"Model '{self.unique_id}' does not define a required config parameter '{self.name}'." + ) + super().__init__(msg=msg) + + +class MissingMaterialization(CompilationException): + def __init__(self, model, adapter_type): + self.model = model + self.adapter_type = adapter_type + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + materialization = self.model.get_materialization() + + valid_types = "'default'" + + if self.adapter_type != "default": + valid_types = f"'default' and '{self.adapter_type}'" + + msg = f"No materialization '{materialization}' was found for adapter {self.adapter_type}! (searched types {valid_types})" + return msg + + +class MissingRelation(CompilationException): + def __init__(self, relation, model=None): + self.relation = relation + self.model = model + msg = f"Relation {self.relation} not found!" + super().__init__(msg=msg) -def raise_duplicate_macro_patch_name(patch_1, existing_patch_path): - package_name = patch_1.package_name - name = patch_1.name - fix = _fix_dupe_msg(patch_1.original_file_path, existing_patch_path, name, "macros") - raise_compiler_error( - f"dbt found two schema.yml entries for the same macro in package " - f"{package_name} named {name}. Macros may only be described a single " - f"time. To fix this, {fix}" - ) +class AmbiguousAlias(CompilationException): + def __init__(self, node_1, node_2, duped_name=None): + self.node_1 = node_1 + self.node_2 = node_2 + if duped_name is None: + self.duped_name = f"{self.node_1.database}.{self.node_1.schema}.{self.node_1.alias}" + else: + self.duped_name = duped_name + super().__init__(msg=self.get_message()) -def raise_duplicate_source_patch_name(patch_1, patch_2): - name = f"{patch_1.overrides}.{patch_1.name}" - fix = _fix_dupe_msg( - patch_1.path, - patch_2.path, - name, - "sources", - ) - raise_compiler_error( - f"dbt found two schema.yml entries for the same source named " - f"{patch_1.name} in package {patch_1.overrides}. Sources may only be " - f"overridden a single time. To fix this, {fix}" - ) + def get_message(self) -> str: + + msg = ( + f'dbt found two resources with the database representation "{self.duped_name}".\ndbt ' + "cannot create two resources with identical database representations. " + "To fix this,\nchange the configuration of one of these resources:" + f"\n- {self.node_1.unique_id} ({self.node_1.original_file_path})\n- {self.node_2.unique_id} ({self.node_2.original_file_path})" + ) + return msg -def raise_invalid_property_yml_version(path, issue): - raise_compiler_error( - "The yml property file at {} is invalid because {}. Please consult the " - "documentation for more information on yml property file syntax:\n\n" - "https://docs.getdbt.com/reference/configs-and-properties".format(path, issue) - ) +class AmbiguousCatalogMatch(CompilationException): + def __init__(self, unique_id: str, match_1, match_2): + self.unique_id = unique_id + self.match_1 = match_1 + self.match_2 = match_2 + super().__init__(msg=self.get_message()) + def get_match_string(self, match): + match_schema = match.get("metadata", {}).get("schema") + match_name = match.get("metadata", {}).get("name") + return f"{match_schema}.{match_name}" -def raise_unrecognized_credentials_type(typename, supported_types): - raise_compiler_error( - 'Unrecognized credentials type "{}" - supported types are ({})'.format( - typename, ", ".join('"{}"'.format(t) for t in supported_types) + def get_message(self) -> str: + msg = ( + "dbt found two relations in your warehouse with similar database identifiers. " + "dbt\nis unable to determine which of these relations was created by the model " + f'"{self.unique_id}".\nIn order for dbt to correctly generate the catalog, one ' + "of the following relations must be deleted or renamed:\n\n - " + f"{self.get_match_string(self.match_1)}\n - {self.get_match_string(self.match_2)}" ) - ) + return msg + + +class CacheInconsistency(InternalException): + def __init__(self, msg: str): + self.msg = msg + formatted_msg = f"Cache inconsistency detected: {self.msg}" + super().__init__(msg=formatted_msg) + + +class NewNameAlreadyInCache(CacheInconsistency): + def __init__(self, old_key: str, new_key: str): + self.old_key = old_key + self.new_key = new_key + msg = ( + f'in rename of "{self.old_key}" -> "{self.new_key}", new name is in the cache already' + ) + super().__init__(msg) + + +class ReferencedLinkNotCached(CacheInconsistency): + def __init__(self, referenced_key: str): + self.referenced_key = referenced_key + msg = f"in add_link, referenced link key {self.referenced_key} not in cache!" + super().__init__(msg) + + +class DependentLinkNotCached(CacheInconsistency): + def __init__(self, dependent_key: str): + self.dependent_key = dependent_key + msg = f"in add_link, dependent link key {self.dependent_key} not in cache!" + super().__init__(msg) + + +class TruncatedModelNameCausedCollision(CacheInconsistency): + def __init__(self, new_key, relations: Dict): + self.new_key = new_key + self.relations = relations + super().__init__(self.get_message()) + + def get_message(self) -> str: + # Tell user when collision caused by model names truncated during + # materialization. + match = re.search("__dbt_backup|__dbt_tmp$", self.new_key.identifier) + if match: + truncated_model_name_prefix = self.new_key.identifier[: match.start()] + message_addendum = ( + "\n\nName collisions can occur when the length of two " + "models' names approach your database's builtin limit. " + "Try restructuring your project such that no two models " + f"share the prefix '{truncated_model_name_prefix}'. " + "Then, clean your warehouse of any removed models." + ) + else: + message_addendum = "" + + msg = f"in rename, new key {self.new_key} already in cache: {list(self.relations.keys())}{message_addendum}" + + return msg + + +class NoneRelationFound(CacheInconsistency): + def __init__(self): + msg = "in get_relations, a None relation was found in the cache!" + super().__init__(msg) + + +# this is part of the context and also raised in dbt.contracts.relation.py +class DataclassNotDict(CompilationException): + def __init__(self, obj: Any): + self.obj = obj + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f'The object ("{self.obj}") was used as a dictionary. This ' + "capability has been removed from objects of this type." + ) + + return msg + + +class DependencyNotFound(CompilationException): + def __init__(self, node, node_description, required_pkg): + self.node = node + self.node_description = node_description + self.required_pkg = required_pkg + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Error while parsing {self.node_description}.\nThe required package " + f'"{self.required_pkg}" was not found. Is the package installed?\n' + "Hint: You may need to run `dbt deps`." + ) + + return msg + + +class DuplicatePatchPath(CompilationException): + def __init__(self, patch_1, existing_patch_path): + self.patch_1 = patch_1 + self.existing_patch_path = existing_patch_path + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + name = self.patch_1.name + fix = self._fix_dupe_msg( + self.patch_1.original_file_path, + self.existing_patch_path, + name, + "resource", + ) + msg = ( + f"dbt found two schema.yml entries for the same resource named " + f"{name}. Resources and their associated columns may only be " + f"described a single time. To fix this, {fix}" + ) + return msg + + +# should this inherit ParsingException instead? +class DuplicateResourceName(CompilationException): + def __init__(self, node_1, node_2): + self.node_1 = node_1 + self.node_2 = node_2 + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + duped_name = self.node_1.name + node_type = NodeType(self.node_1.resource_type) + pluralized = ( + node_type.pluralize() + if self.node_1.resource_type == self.node_2.resource_type + else "resources" # still raise if ref() collision, e.g. model + seed + ) + + action = "looking for" + # duplicate 'ref' targets + if node_type in NodeType.refable(): + formatted_name = f'ref("{duped_name}")' + # duplicate sources + elif node_type == NodeType.Source: + duped_name = self.node_1.get_full_source_name() + formatted_name = self.node_1.get_source_representation() + # duplicate docs blocks + elif node_type == NodeType.Documentation: + formatted_name = f'doc("{duped_name}")' + # duplicate generic tests + elif node_type == NodeType.Test and hasattr(self.node_1, "test_metadata"): + column_name = ( + f'column "{self.node_1.column_name}" in ' if self.node_1.column_name else "" + ) + model_name = self.node_1.file_key_name + duped_name = f'{self.node_1.name}" defined on {column_name}"{model_name}' + action = "running" + formatted_name = "tests" + # all other resource types + else: + formatted_name = duped_name + + msg = f""" +dbt found two {pluralized} with the name "{duped_name}". + +Since these resources have the same name, dbt will be unable to find the correct resource +when {action} {formatted_name}. + +To fix this, change the name of one of these resources: +- {self.node_1.unique_id} ({self.node_1.original_file_path}) +- {self.node_2.unique_id} ({self.node_2.original_file_path}) + """.strip() + return msg + + +class InvalidPropertyYML(CompilationException): + def __init__(self, path: str, issue: str): + self.path = path + self.issue = issue + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"The yml property file at {self.path} is invalid because {self.issue}. " + "Please consult the documentation for more information on yml property file " + "syntax:\n\nhttps://docs.getdbt.com/reference/configs-and-properties" + ) + return msg + + +class PropertyYMLMissingVersion(InvalidPropertyYML): + def __init__(self, path: str): + self.path = path + self.issue = f"the yml property file {self.path} is missing a version tag" + super().__init__(self.path, self.issue) + + +class PropertyYMLVersionNotInt(InvalidPropertyYML): + def __init__(self, path: str, version: Any): + self.path = path + self.version = version + self.issue = ( + "its 'version:' tag must be an integer (e.g. version: 2)." + f" {self.version} is not an integer" + ) + super().__init__(self.path, self.issue) + + +class PropertyYMLInvalidTag(InvalidPropertyYML): + def __init__(self, path: str, version: int): + self.path = path + self.version = version + self.issue = f"its 'version:' tag is set to {self.version}. Only 2 is supported" + super().__init__(self.path, self.issue) + + +class RelationWrongType(CompilationException): + def __init__(self, relation, expected_type, model=None): + self.relation = relation + self.expected_type = expected_type + self.model = model + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"Trying to create {self.expected_type} {self.relation}, " + f"but it currently exists as a {self.relation.type}. Either " + f"drop {self.relation} manually, or run dbt with " + "`--full-refresh` and dbt will drop it for you." + ) + + return msg + + +# These are copies of what's in dbt/context/exceptions_jinja.py to not immediately break adapters +# utilizing these functions as exceptions. These are direct copies to avoid circular imports. +# They will be removed in 1 (or 2?) versions. Issue to be created to ensure it happens. + +# TODO: add deprecation to functions +def warn(msg, node=None): + warn_or_error(JinjaLogWarning(msg=msg, node_info=get_node_info())) + return "" + + +def missing_config(model, name) -> NoReturn: + raise MissingConfig(unique_id=model.unique_id, name=name) + + +def missing_materialization(model, adapter_type) -> NoReturn: + raise MissingMaterialization(model=model, adapter_type=adapter_type) + + +def missing_relation(relation, model=None) -> NoReturn: + raise MissingRelation(relation, model) + + +def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: + raise AmbiguousAlias(node_1, node_2, duped_name) + + +def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: + raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + + +def raise_cache_inconsistent(message) -> NoReturn: + raise CacheInconsistency(message) -def raise_not_implemented(msg): - raise NotImplementedException("ERROR: {}".format(msg)) +def raise_dataclass_not_dict(obj) -> NoReturn: + raise DataclassNotDict(obj) + +# note: this is called all over the code in addition to in jinja +def raise_compiler_error(msg, node=None) -> NoReturn: + raise CompilationException(msg, node) + + +def raise_database_error(msg, node=None) -> NoReturn: + raise DatabaseException(msg, node) + + +def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: + raise DependencyNotFound(node, node_description, required_pkg) + + +def raise_dependency_error(msg) -> NoReturn: + raise DependencyException(scrub_secrets(msg, env_secrets())) + + +def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: + raise DuplicatePatchPath(patch_1, existing_patch_path) + + +def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: + raise DuplicateResourceName(node_1, node_2) + + +def raise_invalid_property_yml_version(path, issue) -> NoReturn: + raise InvalidPropertyYML(path, issue) + + +def raise_not_implemented(msg) -> NoReturn: + raise NotImplementedException(msg) + + +def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: + raise RelationWrongType(relation, expected_type, model) + + +# these were implemented in core so deprecating here by calling the new exception directly def raise_duplicate_alias( kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str ) -> NoReturn: - # dupe found: go through the dict so we can have a nice-ish error - key_names = ", ".join("{}".format(k) for k in kwargs if aliases.get(k) == canonical_key) + raise DuplicateAlias(kwargs, aliases, canonical_key) + - raise AliasException(f'Got duplicate keys: ({key_names}) all map to "{canonical_key}"') +def raise_duplicate_source_patch_name(patch_1, patch_2): + raise DuplicateSourcePatchName(patch_1, patch_2) -def warn(msg, node=None): - dbt.events.functions.warn_or_error( - JinjaLogWarning(msg=msg, node_info=get_node_info()), +def raise_duplicate_macro_patch_name(patch_1, existing_patch_path): + raise DuplicateMacroPatchName(patch_1, existing_patch_path) + + +def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn: + raise DuplicateMacroName(node_1, node_2, namespace) + + +def approximate_relation_match(target, relation): + raise ApproximateMatch(target, relation) + + +def get_relation_returned_multiple_results(kwargs, matches): + raise RelationReturnedMultipleResults(kwargs, matches) + + +def system_error(operation_name): + # Note: This was converted for core to use SymbolicLinkError because it's the only way it was used. Maintaining flexibility here for now. + msg = ( + f"dbt encountered an error when attempting to {operation_name}. " + "If this error persists, please create an issue at: \n\n" + "https://github.com/dbt-labs/dbt-core" + ) + raise CompilationException(msg) + + +def invalid_materialization_argument(name, argument): + raise InvalidMaterializationArg(name, argument) + + +def bad_package_spec(repo, spec, error_message): + msg = f"Error checking out spec='{spec}' for repo {repo}\n{error_message}" + raise InternalException(scrub_secrets(msg, env_secrets())) + + +def raise_git_cloning_error(error: CommandResultError) -> NoReturn: + error.cmd = list(scrub_secrets(str(error.cmd), env_secrets())) + raise error + + +def raise_git_cloning_problem(repo) -> NoReturn: + raise GitCloningProblem(repo) + + +def macro_invalid_dispatch_arg(macro_name) -> NoReturn: + raise MacroInvalidDispatchArg(macro_name) + + +def dependency_not_found(node, dependency): + raise GraphDependencyNotFound(node, dependency) + + +def target_not_found( + node, + target_name: str, + target_kind: str, + target_package: Optional[str] = None, + disabled: Optional[bool] = None, +) -> NoReturn: + raise TargetNotFound( node=node, + target_name=target_name, + target_kind=target_kind, + target_package=target_package, + disabled=disabled, ) - return "" -# Update this when a new function should be added to the -# dbt context's `exceptions` key! -CONTEXT_EXPORTS = { - fn.__name__: fn - for fn in [ - warn, - missing_config, - missing_materialization, - missing_relation, - raise_ambiguous_alias, - raise_ambiguous_catalog_match, - raise_cache_inconsistent, - raise_dataclass_not_dict, - raise_compiler_error, - raise_database_error, - raise_dep_not_found, - raise_dependency_error, - raise_duplicate_patch_name, - raise_duplicate_resource_name, - raise_invalid_property_yml_version, - raise_not_implemented, - relation_wrong_type, - ] -} - - -def wrapper(model): - def wrap(func): - @functools.wraps(func) - def inner(*args, **kwargs): - try: - return func(*args, **kwargs) - except RuntimeException as exc: - exc.add_node(model) - raise exc - - return inner - - return wrap - - -def wrapped_exports(model): - wrap = wrapper(model) - return {name: wrap(export) for name, export in CONTEXT_EXPORTS.items()} +def doc_target_not_found( + model, target_doc_name: str, target_doc_package: Optional[str] +) -> NoReturn: + raise DocTargetNotFound( + node=model, target_doc_name=target_doc_name, target_doc_package=target_doc_package + ) + + +def doc_invalid_args(model, args) -> NoReturn: + raise InvalidDocArgs(node=model, args=args) + + +def ref_bad_context(model, args) -> NoReturn: + raise RefBadContext(node=model, args=args) + + +def metric_invalid_args(model, args) -> NoReturn: + raise MetricInvalidArgs(node=model, args=args) + + +def ref_invalid_args(model, args) -> NoReturn: + raise RefInvalidArgs(node=model, args=args) + + +def invalid_bool_error(got_value, macro_name) -> NoReturn: + raise InvalidBoolean(return_value=got_value, macro_name=macro_name) + + +def invalid_type_error(method_name, arg_name, got_value, expected_type) -> NoReturn: + """Raise a CompilationException when an adapter method available to macros + has changed. + """ + raise InvalidMacroArgType(method_name, arg_name, got_value, expected_type) + + +def disallow_secret_env_var(env_var_name) -> NoReturn: + """Raise an error when a secret env var is referenced outside allowed + rendering contexts""" + raise DisallowSecretEnvVar(env_var_name) + + +def raise_parsing_error(msg, node=None) -> NoReturn: + raise ParsingException(msg, node) + + +# These are the exceptions functions that were not called within dbt-core but will remain here but deprecated to give a chance to rework +# TODO: is this valid? Should I create a special exception class for this? +def raise_unrecognized_credentials_type(typename, supported_types): + msg = 'Unrecognized credentials type "{}" - supported types are ({})'.format( + typename, ", ".join('"{}"'.format(t) for t in supported_types) + ) + raise CompilationException(msg) + + +def raise_patch_targets_not_found(patches): + patch_list = "\n\t".join( + f"model {p.name} (referenced in path {p.original_file_path})" for p in patches.values() + ) + msg = f"dbt could not find models for the following patches:\n\t{patch_list}" + raise CompilationException(msg) + + +def multiple_matching_relations(kwargs, matches): + raise RelationReturnedMultipleResults(kwargs, matches) + + +# while this isn't in our code I wouldn't be surpised it's in adapter code +def materialization_not_available(model, adapter_type): + raise MaterializationNotAvailable(model, adapter_type) + + +def macro_not_found(model, target_macro_id): + msg = f"'{model.unique_id}' references macro '{target_macro_id}' which is not defined!" + raise CompilationException(msg=msg, node=model) diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 21bc74fbfc5..9c245214d83 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -18,7 +18,7 @@ from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.nodes import ManifestNode, BaseNode from dbt.contracts.graph.unparsed import UnparsedNode, Docs -from dbt.exceptions import ParsingException, validator_error_message, InternalException +from dbt.exceptions import InternalException, InvalidConfigUpdate, InvalidDictParse from dbt import hooks from dbt.node_types import NodeType, ModelLanguage from dbt.parser.search import FileBlock @@ -216,7 +216,6 @@ def _create_parsetime_node( try: return self.parse_from_dict(dct, validate=True) except ValidationError as exc: - msg = validator_error_message(exc) # this is a bit silly, but build an UnparsedNode just for error # message reasons node = self._create_error_node( @@ -225,7 +224,7 @@ def _create_parsetime_node( original_file_path=block.path.original_file_path, raw_code=block.contents, ) - raise ParsingException(msg, node=node) + raise InvalidDictParse(exc, node=node) def _context_for(self, parsed_node: IntermediateNode, config: ContextConfig) -> Dict[str, Any]: return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config) @@ -364,8 +363,7 @@ def render_update(self, node: IntermediateNode, config: ContextConfig) -> None: self.update_parsed_node_config(node, config, context=context) except ValidationError as exc: # we got a ValidationError - probably bad types in config() - msg = validator_error_message(exc) - raise ParsingException(msg, node=node) from exc + raise InvalidConfigUpdate(exc, node=node) from exc def add_result_node(self, block: FileBlock, node: ManifestNode): if node.config.enabled: diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index 3b1149e53a5..af0282c953f 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -21,7 +21,19 @@ UnparsedNodeUpdate, UnparsedExposure, ) -from dbt.exceptions import raise_compiler_error, raise_parsing_error, UndefinedMacroException +from dbt.exceptions import ( + CustomMacroPopulatingConfigValues, + SameKeyNested, + TagNotString, + TagsNotListOfStrings, + TestArgIncludesModel, + TestArgsNotDict, + TestDefinitionDictLength, + TestInvalidType, + TestNameNotString, + UnexpectedTestNamePattern, + UndefinedMacroException, +) from dbt.parser.search import FileBlock @@ -222,9 +234,7 @@ def __init__( test_name, test_args = self.extract_test_args(test, column_name) self.args: Dict[str, Any] = test_args if "model" in self.args: - raise_compiler_error( - 'Test arguments include "model", which is a reserved argument', - ) + raise TestArgIncludesModel() self.package_name: str = package_name self.target: Testable = target @@ -232,9 +242,7 @@ def __init__( match = self.TEST_NAME_PATTERN.match(test_name) if match is None: - raise_compiler_error( - "Test name string did not match expected pattern: {}".format(test_name) - ) + raise UnexpectedTestNamePattern(test_name) groups = match.groupdict() self.name: str = groups["test_name"] @@ -251,9 +259,7 @@ def __init__( value = self.args.pop(key, None) # 'modifier' config could be either top level arg or in config if value and "config" in self.args and key in self.args["config"]: - raise_compiler_error( - "Test cannot have the same key at the top-level and in config" - ) + raise SameKeyNested() if not value and "config" in self.args: value = self.args["config"].pop(key, None) if isinstance(value, str): @@ -261,22 +267,12 @@ def __init__( try: value = get_rendered(value, render_ctx, native=True) except UndefinedMacroException as e: - - # Generic tests do not include custom macros in the Jinja - # rendering context, so this will almost always fail. As it - # currently stands, the error message is inscrutable, which - # has caused issues for some projects migrating from - # pre-0.20.0 to post-0.20.0. - # See https://github.com/dbt-labs/dbt-core/issues/4103 - # and https://github.com/dbt-labs/dbt-core/issues/5294 - raise_compiler_error( - f"The {self.target.name}.{column_name} column's " - f'"{self.name}" test references an undefined ' - f"macro in its {key} configuration argument. " - f"The macro {e.msg}.\n" - "Please note that the generic test configuration parser " - "currently does not support using custom macros to " - "populate configuration values" + raise CustomMacroPopulatingConfigValues( + target_name=self.target.name, + column_name=column_name, + name=self.name, + key=key, + err_msg=e.msg ) if value is not None: @@ -314,9 +310,7 @@ def _bad_type(self) -> TypeError: @staticmethod def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: if not isinstance(test, dict): - raise_parsing_error( - "test must be dict or str, got {} (value {})".format(type(test), test) - ) + raise TestInvalidType(test) # If the test is a dictionary with top-level keys, the test name is "test_name" # and the rest are arguments @@ -330,20 +324,13 @@ def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: else: test = list(test.items()) if len(test) != 1: - raise_parsing_error( - "test definition dictionary must have exactly one key, got" - " {} instead ({} keys)".format(test, len(test)) - ) + raise TestDefinitionDictLength(test) test_name, test_args = test[0] if not isinstance(test_args, dict): - raise_parsing_error( - "test arguments must be dict, got {} (value {})".format(type(test_args), test_args) - ) + raise TestArgsNotDict(test_args) if not isinstance(test_name, str): - raise_parsing_error( - "test name must be a str, got {} (value {})".format(type(test_name), test_name) - ) + raise TestNameNotString(test_name) test_args = deepcopy(test_args) if name is not None: test_args["column_name"] = name @@ -434,12 +421,10 @@ def tags(self) -> List[str]: if isinstance(tags, str): tags = [tags] if not isinstance(tags, list): - raise_compiler_error( - f"got {tags} ({type(tags)}) for tags, expected a list of strings" - ) + raise TagsNotListOfStrings(tags) for tag in tags: if not isinstance(tag, str): - raise_compiler_error(f"got {tag} ({type(tag)}) for tag, expected a str") + raise TagNotString(tag) return tags[:] def macro_name(self) -> str: diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 2e284b43cfa..9da68736031 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -71,9 +71,7 @@ ResultNode, ) from dbt.contracts.util import Writable -from dbt.exceptions import ( - target_not_found, -) +from dbt.exceptions import TargetNotFound, AmbiguousAlias from dbt.parser.base import Parser from dbt.parser.analysis import AnalysisParser from dbt.parser.generic_test import GenericTestParser @@ -989,7 +987,7 @@ def invalid_target_fail_unless_test( ) ) else: - target_not_found( + raise TargetNotFound( node=node, target_name=target_name, target_kind=target_kind, @@ -1017,11 +1015,11 @@ def _check_resource_uniqueness( existing_node = names_resources.get(name) if existing_node is not None: - dbt.exceptions.raise_duplicate_resource_name(existing_node, node) + raise dbt.exceptions.DuplicateResourceName(existing_node, node) existing_alias = alias_resources.get(full_node_name) if existing_alias is not None: - dbt.exceptions.raise_ambiguous_alias(existing_alias, node, full_node_name) + raise AmbiguousAlias(node_1=existing_alias, node_2=node, duped_name=full_node_name) names_resources[name] = node alias_resources[full_node_name] = node diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index 8303e2f9c52..41ddfe0a5f3 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -29,7 +29,13 @@ # New for Python models :p import ast from dbt.dataclass_schema import ValidationError -from dbt.exceptions import ParsingException, validator_error_message, UndefinedMacroException +from dbt.exceptions import ( + InvalidModelConfig, + ParsingException, + PythonLiteralEval, + PythonParsingException, + UndefinedMacroException, +) dbt_function_key_words = set(["ref", "source", "config", "get"]) @@ -91,12 +97,7 @@ def _safe_eval(self, node): try: return ast.literal_eval(node) except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc: - msg = validator_error_message( - f"Error when trying to literal_eval an arg to dbt.ref(), dbt.source(), dbt.config() or dbt.config.get() \n{exc}\n" - "https://docs.python.org/3/library/ast.html#ast.literal_eval\n" - "In dbt python model, `dbt.ref`, `dbt.source`, `dbt.config`, `dbt.config.get` function args only support Python literal structures" - ) - raise ParsingException(msg, node=self.dbt_node) from exc + raise PythonLiteralEval(exc, node=self.dbt_node) from exc def _get_call_literals(self, node): # List of literals @@ -199,8 +200,7 @@ def parse_python_model(self, node, config, context): try: tree = ast.parse(node.raw_code, filename=node.original_file_path) except SyntaxError as exc: - msg = validator_error_message(exc) - raise ParsingException(f"{msg}\n{exc.text}", node=node) from exc + raise PythonParsingException(exc, node=node) from exc # We are doing a validator and a parser because visit_FunctionDef in parser # would actually make the parser not doing the visit_Calls any more @@ -251,8 +251,7 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None: except ValidationError as exc: # we got a ValidationError - probably bad types in config() - msg = validator_error_message(exc) - raise ParsingException(msg, node=node) from exc + raise InvalidModelConfig(exc, node=node) from exc return elif not flags.STATIC_PARSER: diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 831647d0322..b5fd8558889 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -50,16 +50,22 @@ UnparsedSourceDefinition, ) from dbt.exceptions import ( - validator_error_message, + CompilationException, + DuplicateMacroPatchName, + DuplicatePatchPath, + DuplicateSourcePatchName, JSONValidationException, - raise_invalid_property_yml_version, - ValidationException, - ParsingException, - raise_duplicate_patch_name, - raise_duplicate_macro_patch_name, InternalException, - raise_duplicate_source_patch_name, - CompilationException, + InvalidSchemaConfig, + InvalidTestConfig, + ParsingException, + PropertyYMLInvalidTag, + PropertyYMLMissingVersion, + PropertyYMLVersionNotInt, + ValidationException, + YamlLoadFailure, + YamlParseDictFailure, + YamlParseListFailure, ) from dbt.events.functions import warn_or_error from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroPatchNotFound @@ -91,34 +97,13 @@ ) -def error_context( - path: str, - key: str, - data: Any, - cause: Union[str, ValidationException, JSONValidationException], -) -> str: - """Provide contextual information about an error while parsing""" - if isinstance(cause, str): - reason = cause - elif isinstance(cause, ValidationError): - reason = validator_error_message(cause) - else: - reason = cause.msg - return "Invalid {key} config given in {path} @ {key}: {data} - {reason}".format( - key=key, path=path, data=data, reason=reason - ) - - def yaml_from_file(source_file: SchemaSourceFile) -> Dict[str, Any]: """If loading the yaml fails, raise an exception.""" path = source_file.path.relative_path try: return load_yaml_text(source_file.contents, source_file.path) except ValidationException as e: - reason = validator_error_message(e) - raise ParsingException( - "Error reading {}: {} - {}".format(source_file.project_name, path, reason) - ) + raise YamlLoadFailure(source_file.project_name, path, e) class ParserRef: @@ -262,7 +247,6 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List GenericTestNode.validate(dct) return GenericTestNode.from_dict(dct) except ValidationError as exc: - msg = validator_error_message(exc) # this is a bit silly, but build an UnparsedNode just for error # message reasons node = self._create_error_node( @@ -271,7 +255,7 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List original_file_path=target.original_file_path, raw_code=raw_code, ) - raise ParsingException(msg, node=node) from exc + raise InvalidTestConfig(exc, node) # lots of time spent in this method def _parse_generic_test( @@ -413,8 +397,7 @@ def render_test_update(self, node, config, builder, schema_file_id): # env_vars should have been updated in the context env_var method except ValidationError as exc: # we got a ValidationError - probably bad types in config() - msg = validator_error_message(exc) - raise ParsingException(msg, node=node) from exc + raise InvalidSchemaConfig(exc, node=node) from exc def parse_node(self, block: GenericTestBlock) -> GenericTestNode: """In schema parsing, we rewrite most of the part of parse_node that @@ -554,25 +537,16 @@ def parse_file(self, block: FileBlock, dct: Dict = None) -> None: def check_format_version(file_path, yaml_dct) -> None: if "version" not in yaml_dct: - raise_invalid_property_yml_version( - file_path, - "the yml property file {} is missing a version tag".format(file_path), - ) + raise PropertyYMLMissingVersion(file_path) version = yaml_dct["version"] # if it's not an integer, the version is malformed, or not # set. Either way, only 'version: 2' is supported. if not isinstance(version, int): - raise_invalid_property_yml_version( - file_path, - "its 'version:' tag must be an integer (e.g. version: 2)." - " {} is not an integer".format(version), - ) + raise PropertyYMLVersionNotInt(file_path, version) + if version != 2: - raise_invalid_property_yml_version( - file_path, - "its 'version:' tag is set to {}. Only 2 is supported".format(version), - ) + raise PropertyYMLInvalidTag(file_path, version) Parsed = TypeVar("Parsed", UnpatchedSourceDefinition, ParsedNodePatch, ParsedMacroPatch) @@ -633,8 +607,9 @@ def get_key_dicts(self) -> Iterable[Dict[str, Any]]: # check that entry is a dict and that all dict values # are strings if coerce_dict_str(entry) is None: - msg = error_context(path, self.key, data, "expected a dict with string keys") - raise ParsingException(msg) + raise YamlParseListFailure( + path, self.key, data, "expected a dict with string keys" + ) if "name" not in entry: raise ParsingException("Entry did not contain a name") @@ -681,8 +656,7 @@ def _target_from_dict(self, cls: Type[T], data: Dict[str, Any]) -> T: cls.validate(data) return cls.from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(path, self.key, data, exc) # The other parse method returns TestBlocks. This one doesn't. # This takes the yaml dictionaries in 'sources' keys and uses them @@ -703,7 +677,7 @@ def parse(self) -> List[TestBlock]: # source patches must be unique key = (patch.overrides, patch.name) if key in self.manifest.source_patches: - raise_duplicate_source_patch_name(patch, self.manifest.source_patches[key]) + raise DuplicateSourcePatchName(patch, self.manifest.source_patches[key]) self.manifest.source_patches[key] = patch source_file.source_patches.append(key) else: @@ -807,8 +781,7 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: self.normalize_docs_attribute(data, path) node = self._target_type().from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(path, self.key, data, exc) else: yield node @@ -932,7 +905,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: if node: if node.patch_path: package_name, existing_file_path = node.patch_path.split("://") - raise_duplicate_patch_name(patch, existing_file_path) + raise DuplicatePatchPath(patch, existing_file_path) source_file.append_patch(patch.yaml_key, node.unique_id) # re-calculate the node config with the patch config. Always do this @@ -988,7 +961,7 @@ def parse_patch(self, block: TargetBlock[UnparsedMacroUpdate], refs: ParserRef) return if macro.patch_path: package_name, existing_file_path = macro.patch_path.split("://") - raise_duplicate_macro_patch_name(patch, existing_file_path) + raise DuplicateMacroPatchName(patch, existing_file_path) source_file.macro_patches[patch.name] = unique_id macro.patch(patch) @@ -1091,8 +1064,7 @@ def parse(self): UnparsedExposure.validate(data) unparsed = UnparsedExposure.from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(self.yaml.path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) self.parse_exposure(unparsed) @@ -1209,6 +1181,5 @@ def parse(self): unparsed = UnparsedMetric.from_dict(data) except (ValidationError, JSONValidationException) as exc: - msg = error_context(self.yaml.path, self.key, data, exc) - raise ParsingException(msg) from exc + raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) self.parse_metric(unparsed) diff --git a/core/dbt/parser/snapshots.py b/core/dbt/parser/snapshots.py index 7fc46d1a05a..dffc7d90641 100644 --- a/core/dbt/parser/snapshots.py +++ b/core/dbt/parser/snapshots.py @@ -4,7 +4,7 @@ from dbt.dataclass_schema import ValidationError from dbt.contracts.graph.nodes import IntermediateSnapshotNode, SnapshotNode -from dbt.exceptions import ParsingException, validator_error_message +from dbt.exceptions import InvalidSnapshopConfig from dbt.node_types import NodeType from dbt.parser.base import SQLParser from dbt.parser.search import BlockContents, BlockSearcher, FileBlock @@ -68,7 +68,7 @@ def transform(self, node: IntermediateSnapshotNode) -> SnapshotNode: self.set_snapshot_attributes(parsed_node) return parsed_node except ValidationError as exc: - raise ParsingException(validator_error_message(exc), node) + raise InvalidSnapshopConfig(exc, node) def parse_file(self, file_block: FileBlock) -> None: blocks = BlockSearcher( diff --git a/core/dbt/task/generate.py b/core/dbt/task/generate.py index 48db2e772ba..87723a530a1 100644 --- a/core/dbt/task/generate.py +++ b/core/dbt/task/generate.py @@ -22,7 +22,7 @@ ColumnMetadata, CatalogArtifact, ) -from dbt.exceptions import InternalException +from dbt.exceptions import InternalException, AmbiguousCatalogMatch from dbt.include.global_project import DOCS_INDEX_FILE_PATH from dbt.events.functions import fire_event from dbt.events.types import ( @@ -119,7 +119,7 @@ def make_unique_id_map( unique_ids = source_map.get(table.key(), set()) for unique_id in unique_ids: if unique_id in sources: - dbt.exceptions.raise_ambiguous_catalog_match( + raise AmbiguousCatalogMatch( unique_id, sources[unique_id].to_dict(omit_none=True), table.to_dict(omit_none=True), diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 5b88d039904..bc8f9a2de75 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -23,9 +23,9 @@ from dbt.exceptions import ( CompilationException, InternalException, + MissingMaterialization, RuntimeException, ValidationException, - missing_materialization, ) from dbt.events.functions import fire_event, get_invocation_id, info from dbt.events.types import ( @@ -252,7 +252,7 @@ def execute(self, model, manifest): ) if materialization_macro is None: - missing_materialization(model, self.adapter.type()) + raise MissingMaterialization(model=model, adapter_type=self.adapter.type()) if "config" not in context: raise InternalException( @@ -400,7 +400,7 @@ def safe_run_hooks( thread_id="main", timing=[], message=f"{hook_type.value} failed, error:\n {exc.msg}", - adapter_response=exc.msg, + adapter_response={}, execution_time=0, failures=1, ) diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 226005497e4..14005203296 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -243,7 +243,7 @@ def call_runner(self, runner): if result.status in (NodeStatus.Error, NodeStatus.Fail) and fail_fast: self._raise_next_tick = FailFastException( - message="Failing early due to test failure or runtime error", + msg="Failing early due to test failure or runtime error", result=result, node=getattr(result, "node", None), ) diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index e48dc94e4e4..26d6d46f028 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -21,7 +21,11 @@ LogTestResult, LogStartLine, ) -from dbt.exceptions import InternalException, invalid_bool_error, missing_materialization +from dbt.exceptions import ( + InternalException, + InvalidBoolean, + MissingMaterialization, +) from dbt.graph import ( ResourceTypeSelector, ) @@ -47,7 +51,7 @@ def convert_bool_type(field) -> bool: try: return bool(strtobool(field)) # type: ignore except ValueError: - raise invalid_bool_error(field, "get_test_sql") + raise InvalidBoolean(field, "get_test_sql") # need this so we catch both true bools and 0/1 return bool(field) @@ -97,7 +101,7 @@ def execute_test( ) if materialization_macro is None: - missing_materialization(test, self.adapter.type()) + raise MissingMaterialization(model=test, adapter_type=self.adapter.type()) if "config" not in context: raise InternalException( diff --git a/core/dbt/utils.py b/core/dbt/utils.py index b7cc6475319..987371b6b02 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -15,7 +15,7 @@ from pathlib import PosixPath, WindowsPath from contextlib import contextmanager -from dbt.exceptions import ConnectionException +from dbt.exceptions import ConnectionException, DuplicateAlias from dbt.events.functions import fire_event from dbt.events.types import RetryExternalCall, RecordRetryException from dbt import flags @@ -365,7 +365,7 @@ def translate_mapping(self, kwargs: Mapping[str, Any]) -> Dict[str, Any]: for key, value in kwargs.items(): canonical_key = self.aliases.get(key, key) if canonical_key in result: - dbt.exceptions.raise_duplicate_alias(kwargs, self.aliases, canonical_key) + raise DuplicateAlias(kwargs, self.aliases, canonical_key) result[canonical_key] = self.translate_value(value) return result diff --git a/plugins/postgres/dbt/adapters/postgres/impl.py b/plugins/postgres/dbt/adapters/postgres/impl.py index 3664e8d2a51..78b86234eae 100644 --- a/plugins/postgres/dbt/adapters/postgres/impl.py +++ b/plugins/postgres/dbt/adapters/postgres/impl.py @@ -8,7 +8,13 @@ from dbt.adapters.postgres import PostgresColumn from dbt.adapters.postgres import PostgresRelation from dbt.dataclass_schema import dbtClassMixin, ValidationError -import dbt.exceptions +from dbt.exceptions import ( + CrossDbReferenceProhibited, + IndexConfigNotDict, + InvalidIndexConfig, + RuntimeException, + UnexpectedDbReference, +) import dbt.utils @@ -40,14 +46,9 @@ def parse(cls, raw_index) -> Optional["PostgresIndexConfig"]: cls.validate(raw_index) return cls.from_dict(raw_index) except ValidationError as exc: - msg = dbt.exceptions.validator_error_message(exc) - dbt.exceptions.raise_compiler_error(f"Could not parse index config: {msg}") + raise InvalidIndexConfig(exc) except TypeError: - dbt.exceptions.raise_compiler_error( - f"Invalid index config:\n" - f" Got: {raw_index}\n" - f' Expected a dictionary with at minimum a "columns" key' - ) + raise IndexConfigNotDict(raw_index) @dataclass @@ -73,11 +74,7 @@ def verify_database(self, database): database = database.strip('"') expected = self.config.credentials.database if database.lower() != expected.lower(): - raise dbt.exceptions.NotImplementedException( - "Cross-db references not allowed in {} ({} vs {})".format( - self.type(), database, expected - ) - ) + raise UnexpectedDbReference(self.type(), database, expected) # return an empty string on success so macros can call this return "" @@ -110,12 +107,8 @@ def _get_catalog_schemas(self, manifest): schemas = super()._get_catalog_schemas(manifest) try: return schemas.flatten() - except dbt.exceptions.RuntimeException as exc: - dbt.exceptions.raise_compiler_error( - "Cross-db references not allowed in adapter {}: Got {}".format( - self.type(), exc.msg - ) - ) + except RuntimeException as exc: + raise CrossDbReferenceProhibited(self.type(), exc.msg) def _link_cached_relations(self, manifest): schemas: Set[str] = set() diff --git a/tests/functional/duplicates/test_duplicate_model.py b/tests/functional/duplicates/test_duplicate_model.py index 031ba6236c0..fbcd1b79671 100644 --- a/tests/functional/duplicates/test_duplicate_model.py +++ b/tests/functional/duplicates/test_duplicate_model.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationException, DuplicateResourceName from dbt.tests.fixtures.project import write_project_files from dbt.tests.util import run_dbt, get_manifest @@ -108,7 +108,7 @@ def packages(self): def test_duplicate_model_enabled_across_packages(self, project): run_dbt(["deps"]) message = "dbt found two models with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(DuplicateResourceName) as exc: run_dbt(["run"]) assert message in str(exc.value) diff --git a/tests/functional/exit_codes/test_exit_codes.py b/tests/functional/exit_codes/test_exit_codes.py index 955953a0dc0..54b5cb6865e 100644 --- a/tests/functional/exit_codes/test_exit_codes.py +++ b/tests/functional/exit_codes/test_exit_codes.py @@ -99,7 +99,7 @@ def packages(self): } def test_deps_fail(self, project): - with pytest.raises(dbt.exceptions.InternalException) as exc: + with pytest.raises(dbt.exceptions.GitCheckoutError) as exc: run_dbt(['deps']) expected_msg = "Error checking out spec='bad-branch'" assert expected_msg in str(exc.value) diff --git a/tests/functional/schema_tests/test_schema_v2_tests.py b/tests/functional/schema_tests/test_schema_v2_tests.py index 00c14cd711b..44a6696931b 100644 --- a/tests/functional/schema_tests/test_schema_v2_tests.py +++ b/tests/functional/schema_tests/test_schema_v2_tests.py @@ -95,7 +95,7 @@ alt_local_utils__macros__type_timestamp_sql, all_quotes_schema__schema_yml, ) -from dbt.exceptions import ParsingException, CompilationException +from dbt.exceptions import ParsingException, CompilationException, DuplicateResourceName from dbt.contracts.results import TestStatus @@ -904,9 +904,9 @@ def test_generic_test_collision( project, ): """These tests collide, since only the configs differ""" - with pytest.raises(CompilationException) as exc: + with pytest.raises(DuplicateResourceName) as exc: run_dbt() - assert "dbt found two tests with the name" in str(exc) + assert "dbt found two tests with the name" in str(exc.value) class TestGenericTestsConfigCustomMacros: From 1e35339389ded85631128b66f57c62ca60649c88 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Tue, 20 Dec 2022 11:01:19 +0900 Subject: [PATCH 073/156] Fix typo in util.py (#6037) * Fix typo in util.py identifer -> identifier * Add change log * Update .changes/unreleased/Fixes-20221117-220320.yaml Co-authored-by: Emily Rockman --- .changes/unreleased/Fixes-20221117-220320.yaml | 7 +++++++ core/dbt/tests/util.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Fixes-20221117-220320.yaml diff --git a/.changes/unreleased/Fixes-20221117-220320.yaml b/.changes/unreleased/Fixes-20221117-220320.yaml new file mode 100644 index 00000000000..2f71fe213fc --- /dev/null +++ b/.changes/unreleased/Fixes-20221117-220320.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Fix typo in util.py +time: 2022-11-17T22:03:20.4836855+09:00 +custom: + Author: eltociear + Issue: "4904" + PR: "6037" diff --git a/core/dbt/tests/util.py b/core/dbt/tests/util.py index bb8b03131b5..3904a90a37d 100644 --- a/core/dbt/tests/util.py +++ b/core/dbt/tests/util.py @@ -236,7 +236,7 @@ def run_sql_with_adapter(adapter, sql, fetch=None): return adapter.run_sql_for_tests(sql, fetch, conn) -# Get a Relation object from the identifer (name of table/view). +# Get a Relation object from the identifier (name of table/view). # Uses the default database and schema. If you need a relation # with a different schema, it should be constructed in the test. # Uses: From c39ea807e83f7429fb0aabaee1574c4a9f1919bb Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Tue, 20 Dec 2022 10:56:08 -0600 Subject: [PATCH 074/156] add back validator_error_message (#6465) --- core/dbt/exceptions.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index 2db130bb44e..515ec86054b 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -2362,3 +2362,14 @@ def materialization_not_available(model, adapter_type): def macro_not_found(model, target_macro_id): msg = f"'{model.unique_id}' references macro '{target_macro_id}' which is not defined!" raise CompilationException(msg=msg, node=model) + + +# adapters use this to format messages. it should be deprecated but live on for now +def validator_error_message(exc): + """Given a dbt.dataclass_schema.ValidationError (which is basically a + jsonschema.ValidationError), return the relevant parts as a string + """ + if not isinstance(exc, dbt.dataclass_schema.ValidationError): + return str(exc) + path = "[%s]" % "][".join(map(repr, exc.relative_path)) + return "at path {}: {}".format(path, exc.message) From ce9d0afb8a3cc5f397fe35891e6e784edbffc3e2 Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Tue, 20 Dec 2022 10:56:36 -0800 Subject: [PATCH 075/156] [CT-1591] Don't parse empty Python files (#6433) --- .../unreleased/Fixes-20221213-113915.yaml | 6 ++ core/dbt/parser/models.py | 64 ++++++++++--------- test/unit/test_parser.py | 14 ++++ 3 files changed, 54 insertions(+), 30 deletions(-) create mode 100644 .changes/unreleased/Fixes-20221213-113915.yaml diff --git a/.changes/unreleased/Fixes-20221213-113915.yaml b/.changes/unreleased/Fixes-20221213-113915.yaml new file mode 100644 index 00000000000..b92a2d6cbc9 --- /dev/null +++ b/.changes/unreleased/Fixes-20221213-113915.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: '[CT-1591] Don''t parse empty Python files' +time: 2022-12-13T11:39:15.818464-08:00 +custom: + Author: aranke + Issue: "6345" diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index 41ddfe0a5f3..39bb18be714 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -37,7 +37,6 @@ UndefinedMacroException, ) - dbt_function_key_words = set(["ref", "source", "config", "get"]) dbt_function_full_names = set(["dbt.ref", "dbt.source", "dbt.config", "dbt.config.get"]) @@ -197,41 +196,46 @@ def get_compiled_path(cls, block: FileBlock): return block.path.relative_path def parse_python_model(self, node, config, context): + config_keys_used = [] + config_keys_defaults = [] + try: tree = ast.parse(node.raw_code, filename=node.original_file_path) except SyntaxError as exc: raise PythonParsingException(exc, node=node) from exc - # We are doing a validator and a parser because visit_FunctionDef in parser - # would actually make the parser not doing the visit_Calls any more - dbtValidator = PythonValidationVisitor() - dbtValidator.visit(tree) - dbtValidator.check_error(node) + # Only parse if AST tree has instructions in body + if tree.body: + # We are doing a validator and a parser because visit_FunctionDef in parser + # would actually make the parser not doing the visit_Calls any more + dbt_validator = PythonValidationVisitor() + dbt_validator.visit(tree) + dbt_validator.check_error(node) + + dbt_parser = PythonParseVisitor(node) + dbt_parser.visit(tree) + + for (func, args, kwargs) in dbt_parser.dbt_function_calls: + if func == "get": + num_args = len(args) + if num_args == 0: + raise ParsingException( + "dbt.config.get() requires at least one argument", + node=node, + ) + if num_args > 2: + raise ParsingException( + f"dbt.config.get() takes at most 2 arguments ({num_args} given)", + node=node, + ) + key = args[0] + default_value = args[1] if num_args == 2 else None + config_keys_used.append(key) + config_keys_defaults.append(default_value) + continue + + context[func](*args, **kwargs) - dbtParser = PythonParseVisitor(node) - dbtParser.visit(tree) - config_keys_used = [] - config_keys_defaults = [] - for (func, args, kwargs) in dbtParser.dbt_function_calls: - if func == "get": - num_args = len(args) - if num_args == 0: - raise ParsingException( - "dbt.config.get() requires at least one argument", - node=node, - ) - if num_args > 2: - raise ParsingException( - f"dbt.config.get() takes at most 2 arguments ({num_args} given)", - node=node, - ) - key = args[0] - default_value = args[1] if num_args == 2 else None - config_keys_used.append(key) - config_keys_defaults.append(default_value) - continue - - context[func](*args, **kwargs) if config_keys_used: # this is being used in macro build_config_dict context["config"]( diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 7ca68f0e1fd..38e439a696f 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -571,6 +571,8 @@ def model1(dbt, session): return dbt.ref("some_model") """ +python_model_empty_file = """ """ + python_model_multiple_returns = """ def model(dbt, session): dbt.config(materialized='table') @@ -749,6 +751,11 @@ def test_python_model_incorrect_function_name(self): with self.assertRaises(ParsingException): self.parser.parse_file(block) + def test_python_model_empty_file(self): + block = self.file_block_for(python_model_empty_file, "nested/py_model.py") + self.parser.manifest.files[block.file.file_id] = block.file + self.assertIsNone(self.parser.parse_file(block)) + def test_python_model_multiple_returns(self): block = self.file_block_for(python_model_multiple_returns, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file @@ -786,6 +793,13 @@ def test_python_model_custom_materialization(self): node = list(self.parser.manifest.nodes.values())[0] self.assertEqual(node.get_materialization(), "view") + def test_python_model_custom_materialization(self): + block = self.file_block_for(python_model_custom_materialization, 'nested/py_model.py') + self.parser.manifest.files[block.file.file_id] = block.file + self.parser.parse_file(block) + node = list(self.parser.manifest.nodes.values())[0] + self.assertEqual(node.get_materialization(), "view") + class StaticModelParserTest(BaseParserTest): def setUp(self): super().setUp() From 9ecb6e50e4b1c7f1ac99143a5a2cce4e545a9ac3 Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Tue, 20 Dec 2022 17:10:01 -0700 Subject: [PATCH 076/156] Treat dense text blobs as binary for `git grep` (#6462) --- .changes/unreleased/Under the Hood-20221219-193435.yaml | 6 ++++++ .gitattributes | 2 ++ 2 files changed, 8 insertions(+) create mode 100644 .changes/unreleased/Under the Hood-20221219-193435.yaml create mode 100644 .gitattributes diff --git a/.changes/unreleased/Under the Hood-20221219-193435.yaml b/.changes/unreleased/Under the Hood-20221219-193435.yaml new file mode 100644 index 00000000000..82388dbb759 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221219-193435.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Treat dense text blobs as binary for `git grep` +time: 2022-12-19T19:34:35.890275-07:00 +custom: + Author: dbeatty10 + Issue: "6294" diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..ff6cbc4608f --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +core/dbt/include/index.html binary +tests/functional/artifacts/data/state/*/manifest.json binary From 76fd12c7cdd3f8a0e0c921363fc1fada3eacb278 Mon Sep 17 00:00:00 2001 From: justbldwn <91483530+justbldwn@users.noreply.github.com> Date: Tue, 3 Jan 2023 13:18:55 -0500 Subject: [PATCH 077/156] adding pre-commit install to make dev (#6417) * :sparkles: adding pre-commit install to make dev * :art: updating format of Makefile and CONTRIBUTING.md * :memo: adding changelog via changie new * :sparkles: adding dev_req to Makefile + docs * :art: remove dev_req from docs, dry makefile * Align names of `.PHONY` targets with their associated rules Co-authored-by: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Co-authored-by: Doug Beatty --- .changes/unreleased/Fixes-20221212-115912.yaml | 7 +++++++ CONTRIBUTING.md | 7 +++++-- Makefile | 9 +++++++-- 3 files changed, 19 insertions(+), 4 deletions(-) create mode 100644 .changes/unreleased/Fixes-20221212-115912.yaml diff --git a/.changes/unreleased/Fixes-20221212-115912.yaml b/.changes/unreleased/Fixes-20221212-115912.yaml new file mode 100644 index 00000000000..1dc428830eb --- /dev/null +++ b/.changes/unreleased/Fixes-20221212-115912.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: add pre-commit install to make dev script in Makefile +time: 2022-12-12T11:59:12.175136-05:00 +custom: + Author: justbldwn + Issue: "6269" + PR: "6417" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3bbd8d14d5f..893979fd9ac 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -96,12 +96,15 @@ brew install postgresql ### Installation -First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies) with: +First make sure that you set up your `virtualenv` as described in [Setting up an environment](#setting-up-an-environment). Also ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-core` (and its dependencies): ```sh make dev -# or +``` +or, alternatively: +```sh pip install -r dev-requirements.txt -r editable-requirements.txt +pre-commit install ``` When installed in this way, any changes you make to your local copy of the source code will be reflected immediately in your next `dbt` run. diff --git a/Makefile b/Makefile index 90510ea3855..566c4de9e4d 100644 --- a/Makefile +++ b/Makefile @@ -19,11 +19,16 @@ CI_FLAGS =\ LOG_DIR=./logs\ DBT_LOG_FORMAT=json -.PHONY: dev -dev: ## Installs dbt-* packages in develop mode along with development dependencies. +.PHONY: dev_req +dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies. @\ pip install -r dev-requirements.txt -r editable-requirements.txt +.PHONY: dev +dev: dev_req ## Installs dbt-* packages in develop mode along with development dependencies and pre-commit. + @\ + pre-commit install + .PHONY: mypy mypy: .env ## Runs mypy against staged changes for static type checking. @\ From 6ef3fbbf7608955a8828bb99d03ce6aee29ffd79 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Tue, 3 Jan 2023 17:14:56 -0500 Subject: [PATCH 078/156] Partial parsing bug with empty schema file - ensure None is not passed to load_yaml_text (#6494) --- .../unreleased/Fixes-20230101-223405.yaml | 6 +++ core/dbt/context/base.py | 3 +- core/dbt/flags.py | 39 ++++++++++++++----- core/dbt/parser/schemas.py | 6 +-- .../test_partial_parsing.py | 2 + .../context_methods/test_builtin_functions.py | 5 ++- 6 files changed, 44 insertions(+), 17 deletions(-) create mode 100644 .changes/unreleased/Fixes-20230101-223405.yaml diff --git a/.changes/unreleased/Fixes-20230101-223405.yaml b/.changes/unreleased/Fixes-20230101-223405.yaml new file mode 100644 index 00000000000..d90e24aaa56 --- /dev/null +++ b/.changes/unreleased/Fixes-20230101-223405.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Bug when partial parsing with an empty schema file +time: 2023-01-01T22:34:05.97322-05:00 +custom: + Author: gshank + Issue: "4850" diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 59984cb96ab..fc218538bac 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -634,9 +634,8 @@ def flags(self) -> Any: {% endif %} This supports all flags defined in flags submodule (core/dbt/flags.py) - TODO: Replace with object that provides read-only access to flag values """ - return flags + return flags.get_flag_obj() @contextmember @staticmethod diff --git a/core/dbt/flags.py b/core/dbt/flags.py index 484071aa609..e2d969ccb36 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -1,7 +1,9 @@ -import os +# Do not import the os package because we expose this package in jinja +from os import name as os_name, path as os_path, getcwd as os_getcwd, getenv as os_getenv import multiprocessing +from argparse import Namespace -if os.name != "nt": +if os_name != "nt": # https://bugs.python.org/issue41567 import multiprocessing.popen_spawn_posix # type: ignore from pathlib import Path @@ -10,14 +12,14 @@ # PROFILES_DIR must be set before the other flags # It also gets set in main.py and in set_from_args because the rpc server # doesn't go through exactly the same main arg processing. -GLOBAL_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt") -LOCAL_PROFILES_DIR = os.getcwd() +GLOBAL_PROFILES_DIR = os_path.join(os_path.expanduser("~"), ".dbt") +LOCAL_PROFILES_DIR = os_getcwd() # Use the current working directory if there is a profiles.yml file present there -if os.path.exists(Path(LOCAL_PROFILES_DIR) / Path("profiles.yml")): +if os_path.exists(Path(LOCAL_PROFILES_DIR) / Path("profiles.yml")): DEFAULT_PROFILES_DIR = LOCAL_PROFILES_DIR else: DEFAULT_PROFILES_DIR = GLOBAL_PROFILES_DIR -PROFILES_DIR = os.path.expanduser(os.getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR)) +PROFILES_DIR = os_path.expanduser(os_getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR)) STRICT_MODE = False # Only here for backwards compatibility FULL_REFRESH = False # subcommand @@ -88,7 +90,7 @@ def env_set_truthy(key: str) -> Optional[str]: """Return the value if it was set to a "truthy" string value or None otherwise. """ - value = os.getenv(key) + value = os_getenv(key) if not value or value.lower() in ("0", "false", "f"): return None return value @@ -101,7 +103,7 @@ def env_set_bool(env_value): def env_set_path(key: str) -> Optional[Path]: - value = os.getenv(key) + value = os_getenv(key) if value is None: return value else: @@ -181,7 +183,7 @@ def get_flag_value(flag, args, user_config): if flag == "PRINTER_WIDTH": # must be ints flag_value = int(flag_value) if flag == "PROFILES_DIR": - flag_value = os.path.abspath(flag_value) + flag_value = os_path.abspath(flag_value) return flag_value @@ -205,7 +207,7 @@ def _load_flag_value(flag, args, user_config): def _get_flag_value_from_env(flag): # Environment variables use pattern 'DBT_{flag name}' env_flag = _get_env_flag(flag) - env_value = os.getenv(env_flag) + env_value = os_getenv(env_flag) if env_value is None or env_value == "": return None @@ -241,4 +243,21 @@ def get_flag_dict(): "log_cache_events": LOG_CACHE_EVENTS, "quiet": QUIET, "no_print": NO_PRINT, + "cache_selected_only": CACHE_SELECTED_ONLY, + "target_path": TARGET_PATH, + "log_path": LOG_PATH, } + + +# This is used by core/dbt/context/base.py to return a flag object +# in Jinja. +def get_flag_obj(): + new_flags = Namespace() + for k, v in get_flag_dict().items(): + setattr(new_flags, k.upper(), v) + # The following 3 are CLI arguments only so they're not full-fledged flags, + # but we put in flags for users. + setattr(new_flags, "FULL_REFRESH", FULL_REFRESH) + setattr(new_flags, "STORE_FAILURES", STORE_FAILURES) + setattr(new_flags, "WHICH", WHICH) + return new_flags diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index b5fd8558889..5e81c83fdfb 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -99,11 +99,11 @@ def yaml_from_file(source_file: SchemaSourceFile) -> Dict[str, Any]: """If loading the yaml fails, raise an exception.""" - path = source_file.path.relative_path try: - return load_yaml_text(source_file.contents, source_file.path) + # source_file.contents can sometimes be None + return load_yaml_text(source_file.contents or "", source_file.path) except ValidationException as e: - raise YamlLoadFailure(source_file.project_name, path, e) + raise YamlLoadFailure(source_file.project_name, source_file.path.relative_path, e) class ParserRef: diff --git a/test/integration/068_partial_parsing_tests/test_partial_parsing.py b/test/integration/068_partial_parsing_tests/test_partial_parsing.py index 648abdc4657..fce32b42cf1 100644 --- a/test/integration/068_partial_parsing_tests/test_partial_parsing.py +++ b/test/integration/068_partial_parsing_tests/test_partial_parsing.py @@ -477,6 +477,8 @@ def test_postgres_skip_macros(self): # initial run so we have a msgpack file self.setup_directories() self.copy_file('test-files/model_one.sql', 'models/model_one.sql') + # use empty_schema file for bug #4850 + self.copy_file('test-files/empty_schema.yml', 'models/eschema.yml') results = self.run_dbt() # add a new ref override macro diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py index 3a5dff3f2f7..529087c851a 100644 --- a/tests/functional/context_methods/test_builtin_functions.py +++ b/tests/functional/context_methods/test_builtin_functions.py @@ -112,8 +112,9 @@ def test_builtin_invocation_args_dict_function(self, project): expected = "invocation_result: {'debug': True, 'log_format': 'json', 'write_json': True, 'use_colors': True, 'printer_width': 80, 'version_check': True, 'partial_parse': True, 'static_parser': True, 'profiles_dir': " assert expected in str(result) - expected = "'send_anonymous_usage_stats': False, 'quiet': False, 'no_print': False, 'macro': 'validate_invocation', 'args': '{my_variable: test_variable}', 'which': 'run-operation', 'rpc_method': 'run-operation', 'indirect_selection': 'eager'}" - assert expected in str(result) + expected = ("'send_anonymous_usage_stats': False", "'quiet': False", "'no_print': False", "'cache_selected_only': False", "'macro': 'validate_invocation'", "'args': '{my_variable: test_variable}'", "'which': 'run-operation'", "'rpc_method': 'run-operation'", "'indirect_selection': 'eager'") + for element in expected: + assert element in str(result) def test_builtin_dbt_metadata_envs_function(self, project, monkeypatch): envs = { From 8217ad4722c9b4ce99be3bf3a96ba7b92bec22a7 Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Wed, 4 Jan 2023 10:46:52 +0100 Subject: [PATCH 079/156] Fix issue link for 5859 changelog entry (#6496) --- .changes/1.4.0/Features-20220408-165459.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changes/1.4.0/Features-20220408-165459.yaml b/.changes/1.4.0/Features-20220408-165459.yaml index 12cdf74c757..18675c7244a 100644 --- a/.changes/1.4.0/Features-20220408-165459.yaml +++ b/.changes/1.4.0/Features-20220408-165459.yaml @@ -4,4 +4,4 @@ body: Added favor-state flag to optionally favor state nodes even if unselected time: 2022-04-08T16:54:59.696564+01:00 custom: Author: daniel-murray josephberni - Issue: "2968" + Issue: "5016" From 748a9328117d601560f056f3186e6a52572ff7a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Jan 2023 09:02:37 -0800 Subject: [PATCH 080/156] Update agate requirement from <1.6.4,>=1.6 to >=1.6,<1.7.1 in /core (#6506) * Update agate requirement from <1.6.4,>=1.6 to >=1.6,<1.7.1 in /core Updates the requirements on [agate](https://github.com/wireservice/agate) to permit the latest version. - [Release notes](https://github.com/wireservice/agate/releases) - [Changelog](https://github.com/wireservice/agate/blob/master/CHANGELOG.rst) - [Commits](https://github.com/wireservice/agate/compare/1.6.0...1.7.0) --- updated-dependencies: - dependency-name: agate dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Add automated changelog yaml from template for bot PR Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Github Build Bot --- .changes/unreleased/Dependencies-20230104-000306.yaml | 6 ++++++ core/setup.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Dependencies-20230104-000306.yaml diff --git a/.changes/unreleased/Dependencies-20230104-000306.yaml b/.changes/unreleased/Dependencies-20230104-000306.yaml new file mode 100644 index 00000000000..9da884ff595 --- /dev/null +++ b/.changes/unreleased/Dependencies-20230104-000306.yaml @@ -0,0 +1,6 @@ +kind: "Dependencies" +body: "Update agate requirement from <1.6.4,>=1.6 to >=1.6,<1.7.1 in /core" +time: 2023-01-04T00:03:06.00000Z +custom: + Author: dependabot[bot] + PR: 6506 diff --git a/core/setup.py b/core/setup.py index 241a70ab6bb..c2c04458ace 100644 --- a/core/setup.py +++ b/core/setup.py @@ -47,7 +47,7 @@ }, install_requires=[ "Jinja2==3.1.2", - "agate>=1.6,<1.6.4", + "agate>=1.6,<1.7.1", "betterproto==1.2.5", "click>=7.0,<9", "colorama>=0.3.9,<0.4.7", From d453964546a017dc3e29e2c82b528353a7597f30 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Wed, 4 Jan 2023 14:28:26 -0500 Subject: [PATCH 081/156] CT 1551 partial parsing logging events (#6474) --- .../Under the Hood-20221221-121904.yaml | 6 + core/dbt/events/proto_types.py | 161 +------------- core/dbt/events/types.proto | 119 +--------- core/dbt/events/types.py | 204 +----------------- core/dbt/parser/manifest.py | 60 +++--- core/dbt/parser/partial.py | 29 +-- tests/unit/test_events.py | 25 +-- 7 files changed, 71 insertions(+), 533 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20221221-121904.yaml diff --git a/.changes/unreleased/Under the Hood-20221221-121904.yaml b/.changes/unreleased/Under the Hood-20221221-121904.yaml new file mode 100644 index 00000000000..d1f2f03bef7 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221221-121904.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Prune partial parsing logging events +time: 2022-12-21T12:19:04.7402-05:00 +custom: + Author: gshank + Issue: "6313" diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 5ee384643d3..bd886243295 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -855,28 +855,13 @@ class MacroFileParse(betterproto.Message): @dataclass -class PartialParsingFullReparseBecauseOfError(betterproto.Message): - """I013""" - - info: "EventInfo" = betterproto.message_field(1) - - -@dataclass -class PartialParsingExceptionFile(betterproto.Message): +class PartialParsingExceptionProcessingFile(betterproto.Message): """I014""" info: "EventInfo" = betterproto.message_field(1) file: str = betterproto.string_field(2) -@dataclass -class PartialParsingFile(betterproto.Message): - """I015""" - - info: "EventInfo" = betterproto.message_field(1) - file_id: str = betterproto.string_field(2) - - @dataclass class PartialParsingException(betterproto.Message): """I016""" @@ -895,77 +880,11 @@ class PartialParsingSkipParsing(betterproto.Message): @dataclass -class PartialParsingMacroChangeStartFullParse(betterproto.Message): - """I018""" - - info: "EventInfo" = betterproto.message_field(1) - - -@dataclass -class PartialParsingProjectEnvVarsChanged(betterproto.Message): - """I019""" - - info: "EventInfo" = betterproto.message_field(1) - - -@dataclass -class PartialParsingProfileEnvVarsChanged(betterproto.Message): - """I020""" - - info: "EventInfo" = betterproto.message_field(1) - - -@dataclass -class PartialParsingDeletedMetric(betterproto.Message): - """I021""" - - info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) - - -@dataclass -class ManifestWrongMetadataVersion(betterproto.Message): - """I022""" - - info: "EventInfo" = betterproto.message_field(1) - version: str = betterproto.string_field(2) - - -@dataclass -class PartialParsingVersionMismatch(betterproto.Message): - """I023""" - - info: "EventInfo" = betterproto.message_field(1) - saved_version: str = betterproto.string_field(2) - current_version: str = betterproto.string_field(3) - - -@dataclass -class PartialParsingFailedBecauseConfigChange(betterproto.Message): +class UnableToPartialParse(betterproto.Message): """I024""" info: "EventInfo" = betterproto.message_field(1) - - -@dataclass -class PartialParsingFailedBecauseProfileChange(betterproto.Message): - """I025""" - - info: "EventInfo" = betterproto.message_field(1) - - -@dataclass -class PartialParsingFailedBecauseNewProjectDependency(betterproto.Message): - """I026""" - - info: "EventInfo" = betterproto.message_field(1) - - -@dataclass -class PartialParsingFailedBecauseHashChanged(betterproto.Message): - """I027""" - - info: "EventInfo" = betterproto.message_field(1) + reason: str = betterproto.string_field(2) @dataclass @@ -985,13 +904,6 @@ class ParsedFileLoadFailed(betterproto.Message): exc_info: str = betterproto.string_field(4) -@dataclass -class PartialParseSaveFileNotFound(betterproto.Message): - """I030""" - - info: "EventInfo" = betterproto.message_field(1) - - @dataclass class StaticParserCausedJinjaRendering(betterproto.Message): """I031""" @@ -1075,75 +987,12 @@ class PartialParsingEnabled(betterproto.Message): @dataclass -class PartialParsingAddedFile(betterproto.Message): +class PartialParsingFile(betterproto.Message): """I041""" info: "EventInfo" = betterproto.message_field(1) file_id: str = betterproto.string_field(2) - - -@dataclass -class PartialParsingDeletedFile(betterproto.Message): - """I042""" - - info: "EventInfo" = betterproto.message_field(1) - file_id: str = betterproto.string_field(2) - - -@dataclass -class PartialParsingUpdatedFile(betterproto.Message): - """I043""" - - info: "EventInfo" = betterproto.message_field(1) - file_id: str = betterproto.string_field(2) - - -@dataclass -class PartialParsingNodeMissingInSourceFile(betterproto.Message): - """I044""" - - info: "EventInfo" = betterproto.message_field(1) - file_id: str = betterproto.string_field(2) - - -@dataclass -class PartialParsingMissingNodes(betterproto.Message): - """I045""" - - info: "EventInfo" = betterproto.message_field(1) - file_id: str = betterproto.string_field(2) - - -@dataclass -class PartialParsingChildMapMissingUniqueID(betterproto.Message): - """I046""" - - info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) - - -@dataclass -class PartialParsingUpdateSchemaFile(betterproto.Message): - """I047""" - - info: "EventInfo" = betterproto.message_field(1) - file_id: str = betterproto.string_field(2) - - -@dataclass -class PartialParsingDeletedSource(betterproto.Message): - """I048""" - - info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) - - -@dataclass -class PartialParsingDeletedExposure(betterproto.Message): - """I049""" - - info: "EventInfo" = betterproto.message_field(1) - unique_id: str = betterproto.string_field(2) + operation: str = betterproto.string_field(3) @dataclass diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 1c330106d92..ec10b906432 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -653,23 +653,14 @@ message MacroFileParse { string path = 2; } -// I013 -message PartialParsingFullReparseBecauseOfError { - EventInfo info = 1; -} +// Skipping I013 // I014 -message PartialParsingExceptionFile { +message PartialParsingExceptionProcessingFile { EventInfo info = 1; string file = 2; } -// I015 -message PartialParsingFile { - EventInfo info = 1; - string file_id = 2; -} - // I016 message PartialParsingException { EventInfo info = 1; @@ -681,59 +672,18 @@ message PartialParsingSkipParsing { EventInfo info = 1; } -// I018 -message PartialParsingMacroChangeStartFullParse { - EventInfo info = 1; -} - -// I019 -message PartialParsingProjectEnvVarsChanged { - EventInfo info = 1; -} -// I020 -message PartialParsingProfileEnvVarsChanged { - EventInfo info = 1; -} +// Skipped I018, I019, I020, I021, I022, I023 -// I021 -message PartialParsingDeletedMetric { - EventInfo info = 1; - string unique_id = 2; -} - -// I022 -message ManifestWrongMetadataVersion { - EventInfo info = 1; - string version = 2; -} - -// I023 -message PartialParsingVersionMismatch { - EventInfo info = 1; - string saved_version = 2; - string current_version = 3; -} // I024 -message PartialParsingFailedBecauseConfigChange { +message UnableToPartialParse { EventInfo info = 1; + string reason = 2; } -// I025 -message PartialParsingFailedBecauseProfileChange { - EventInfo info = 1; -} +// Skipped I025, I026, I027 -// I026 -message PartialParsingFailedBecauseNewProjectDependency { - EventInfo info = 1; -} - -// I027 -message PartialParsingFailedBecauseHashChanged { - EventInfo info = 1; -} // I028 message PartialParsingNotEnabled { @@ -748,10 +698,8 @@ message ParsedFileLoadFailed { string exc_info = 4; } -// I030 -message PartialParseSaveFileNotFound { - EventInfo info = 1; -} +// Skipping I030 + // I031 message StaticParserCausedJinjaRendering { @@ -816,58 +764,13 @@ message PartialParsingEnabled { } // I041 -message PartialParsingAddedFile { - EventInfo info = 1; - string file_id = 2; -} - -// I042 -message PartialParsingDeletedFile { - EventInfo info = 1; - string file_id = 2; -} - -// I043 -message PartialParsingUpdatedFile { - EventInfo info = 1; - string file_id = 2; -} - -// I044 -message PartialParsingNodeMissingInSourceFile { - EventInfo info = 1; - string file_id=2; -} - -// I045 -message PartialParsingMissingNodes { - EventInfo info = 1; - string file_id = 2; -} - -// I046 -message PartialParsingChildMapMissingUniqueID { - EventInfo info = 1; - string unique_id = 2; -} - -// I047 -message PartialParsingUpdateSchemaFile { +message PartialParsingFile { EventInfo info = 1; string file_id = 2; + string operation = 3; } -// I048 -message PartialParsingDeletedSource { - EventInfo info = 1; - string unique_id = 2; -} - -// I049 -message PartialParsingDeletedExposure { - EventInfo info = 1; - string unique_id = 2; -} +// Skipped I042, I043, I044, I045, I046, I047, I048, I049 // I050 message InvalidDisabledTargetInTestNode { diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 0a0cd04fe1d..b7a8870cf36 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -975,19 +975,11 @@ def message(self) -> str: return f"Parsing {self.path}" -@dataclass -class PartialParsingFullReparseBecauseOfError( - InfoLevel, pt.PartialParsingFullReparseBecauseOfError -): - def code(self): - return "I013" - - def message(self) -> str: - return "Partial parsing enabled but an error occurred. Switching to a full re-parse." +# Skipping I013 @dataclass -class PartialParsingExceptionFile(DebugLevel, pt.PartialParsingExceptionFile): +class PartialParsingExceptionProcessingFile(DebugLevel, pt.PartialParsingExceptionProcessingFile): def code(self): return "I014" @@ -995,13 +987,7 @@ def message(self) -> str: return f"Partial parsing exception processing file {self.file}" -@dataclass -class PartialParsingFile(DebugLevel, pt.PartialParsingFile): - def code(self): - return "I015" - - def message(self) -> str: - return f"PP file: {self.file_id}" +# Skipped I015 @dataclass @@ -1022,112 +1008,19 @@ def message(self) -> str: return "Partial parsing enabled, no changes found, skipping parsing" -@dataclass -class PartialParsingMacroChangeStartFullParse( - InfoLevel, pt.PartialParsingMacroChangeStartFullParse -): - def code(self): - return "I018" - - def message(self) -> str: - return "Change detected to override macro used during parsing. Starting full parse." - - -@dataclass -class PartialParsingProjectEnvVarsChanged(InfoLevel, pt.PartialParsingProjectEnvVarsChanged): - def code(self): - return "I019" - - def message(self) -> str: - return "Unable to do partial parsing because env vars used in dbt_project.yml have changed" - - -@dataclass -class PartialParsingProfileEnvVarsChanged(InfoLevel, pt.PartialParsingProfileEnvVarsChanged): - def code(self): - return "I020" - - def message(self) -> str: - return "Unable to do partial parsing because env vars used in profiles.yml have changed" - - -@dataclass -class PartialParsingDeletedMetric(DebugLevel, pt.PartialParsingDeletedMetric): - def code(self): - return "I021" - - def message(self) -> str: - return f"Partial parsing: deleted metric {self.unique_id}" - - -@dataclass -class ManifestWrongMetadataVersion(DebugLevel, pt.ManifestWrongMetadataVersion): - def code(self): - return "I022" - - def message(self) -> str: - return ( - "Manifest metadata did not contain correct version. " - f"Contained '{self.version}' instead." - ) +# Skipped I018, I019, I020, I021, I022, I023 @dataclass -class PartialParsingVersionMismatch(InfoLevel, pt.PartialParsingVersionMismatch): - def code(self): - return "I023" - - def message(self) -> str: - return ( - "Unable to do partial parsing because of a dbt version mismatch. " - f"Saved manifest version: {self.saved_version}. " - f"Current version: {self.current_version}." - ) - - -@dataclass -class PartialParsingFailedBecauseConfigChange( - InfoLevel, pt.PartialParsingFailedBecauseConfigChange -): +class UnableToPartialParse(InfoLevel, pt.UnableToPartialParse): def code(self): return "I024" def message(self) -> str: - return ( - "Unable to do partial parsing because config vars, " - "config profile, or config target have changed" - ) - - -@dataclass -class PartialParsingFailedBecauseProfileChange( - InfoLevel, pt.PartialParsingFailedBecauseProfileChange -): - def code(self): - return "I025" - - def message(self) -> str: - return "Unable to do partial parsing because profile has changed" - - -@dataclass -class PartialParsingFailedBecauseNewProjectDependency( - InfoLevel, pt.PartialParsingFailedBecauseNewProjectDependency -): - def code(self): - return "I026" - - def message(self) -> str: - return "Unable to do partial parsing because a project dependency has been added" - + return f"Unable to do partial parsing because {self.reason}" -@dataclass -class PartialParsingFailedBecauseHashChanged(InfoLevel, pt.PartialParsingFailedBecauseHashChanged): - def code(self): - return "I027" - def message(self) -> str: - return "Unable to do partial parsing because a project config has changed" +# Skipped I025, I026, I026, I027 @dataclass @@ -1148,13 +1041,7 @@ def message(self) -> str: return f"Failed to load parsed file from disk at {self.path}: {self.exc}" -@dataclass -class PartialParseSaveFileNotFound(InfoLevel, pt.PartialParseSaveFileNotFound): - def code(self): - return "I030" - - def message(self) -> str: - return "Partial parse save file not found. Starting full parse." +# Skipped I030 @dataclass @@ -1255,84 +1142,15 @@ def message(self) -> str: @dataclass -class PartialParsingAddedFile(DebugLevel, pt.PartialParsingAddedFile): +class PartialParsingFile(DebugLevel, pt.PartialParsingFile): def code(self): return "I041" def message(self) -> str: - return f"Partial parsing: added file: {self.file_id}" - - -@dataclass -class PartialParsingDeletedFile(DebugLevel, pt.PartialParsingDeletedFile): - def code(self): - return "I042" - - def message(self) -> str: - return f"Partial parsing: deleted file: {self.file_id}" - - -@dataclass -class PartialParsingUpdatedFile(DebugLevel, pt.PartialParsingUpdatedFile): - def code(self): - return "I043" - - def message(self) -> str: - return f"Partial parsing: updated file: {self.file_id}" - - -@dataclass -class PartialParsingNodeMissingInSourceFile(DebugLevel, pt.PartialParsingNodeMissingInSourceFile): - def code(self): - return "I044" - - def message(self) -> str: - return f"Partial parsing: nodes list not found in source_file {self.file_id}" - - -@dataclass -class PartialParsingMissingNodes(DebugLevel, pt.PartialParsingMissingNodes): - def code(self): - return "I045" - - def message(self) -> str: - return f"No nodes found for source file {self.file_id}" + return f"Partial parsing: {self.operation} file: {self.file_id}" -@dataclass -class PartialParsingChildMapMissingUniqueID(DebugLevel, pt.PartialParsingChildMapMissingUniqueID): - def code(self): - return "I046" - - def message(self) -> str: - return f"Partial parsing: {self.unique_id} not found in child_map" - - -@dataclass -class PartialParsingUpdateSchemaFile(DebugLevel, pt.PartialParsingUpdateSchemaFile): - def code(self): - return "I047" - - def message(self) -> str: - return f"Partial parsing: update schema file: {self.file_id}" - - -@dataclass -class PartialParsingDeletedSource(DebugLevel, pt.PartialParsingDeletedSource): - def code(self): - return "I048" - - def message(self) -> str: - return f"Partial parsing: deleted source {self.unique_id}" - - -@dataclass -class PartialParsingDeletedExposure(DebugLevel, pt.PartialParsingDeletedExposure): - def code(self): - return "I049" - - def message(self) -> str: - return f"Partial parsing: deleted exposure {self.unique_id}" +# Skipped I042, I043, I044, I045, I046, I047, I048, I049 @dataclass diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 9da68736031..108b73e06f4 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -20,24 +20,13 @@ from dbt.helper_types import PathSet from dbt.events.functions import fire_event, get_invocation_id, warn_or_error from dbt.events.types import ( - PartialParsingFullReparseBecauseOfError, - PartialParsingExceptionFile, - PartialParsingFile, + PartialParsingExceptionProcessingFile, PartialParsingException, PartialParsingSkipParsing, - PartialParsingMacroChangeStartFullParse, - ManifestWrongMetadataVersion, - PartialParsingVersionMismatch, - PartialParsingFailedBecauseConfigChange, - PartialParsingFailedBecauseProfileChange, - PartialParsingFailedBecauseNewProjectDependency, - PartialParsingFailedBecauseHashChanged, + UnableToPartialParse, PartialParsingNotEnabled, ParsedFileLoadFailed, - PartialParseSaveFileNotFound, InvalidDisabledTargetInTestNode, - PartialParsingProjectEnvVarsChanged, - PartialParsingProfileEnvVarsChanged, NodeNotFoundOrDisabled, ) from dbt.logger import DbtProcessState @@ -260,7 +249,11 @@ def load(self): except Exception as exc: # pp_files should still be the full set and manifest is new manifest, # since get_parsing_files failed - fire_event(PartialParsingFullReparseBecauseOfError()) + fire_event( + UnableToPartialParse( + reason="an error occurred. Switching to full reparse." + ) + ) # Get traceback info tb_info = traceback.format_exc() @@ -284,8 +277,7 @@ def load(self): source_file = self.manifest.files[file_id] if source_file: parse_file_type = source_file.parse_file_type - fire_event(PartialParsingExceptionFile(file=file_id)) - fire_event(PartialParsingFile(file_id=source_file.file_id)) + fire_event(PartialParsingExceptionProcessingFile(file=file_id)) exc_info["parse_file_type"] = parse_file_type fire_event(PartialParsingException(exc_info=exc_info)) @@ -312,7 +304,11 @@ def load(self): # If we're partially parsing check that certain macros have not been changed if self.partially_parsing and self.skip_partial_parsing_because_of_macros(): - fire_event(PartialParsingMacroChangeStartFullParse()) + fire_event( + UnableToPartialParse( + reason="change detected to override macro. Starting full parse." + ) + ) # Get new Manifest with original file records and move over the macros self.manifest = self.new_manifest # contains newly read files @@ -544,7 +540,7 @@ def write_manifest_for_partial_parse(self): # saved manifest not matching the code version. if self.manifest.metadata.dbt_version != __version__: fire_event( - ManifestWrongMetadataVersion(version=self.manifest.metadata.dbt_version) + UnableToPartialParse(reason="saved manifest contained the wrong version") ) self.manifest.metadata.dbt_version = __version__ manifest_msgpack = self.manifest.to_msgpack() @@ -563,35 +559,37 @@ def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]: if manifest.metadata.dbt_version != __version__: # #3757 log both versions because of reports of invalid cases of mismatch. - fire_event( - PartialParsingVersionMismatch( - saved_version=manifest.metadata.dbt_version, current_version=__version__ - ) - ) + fire_event(UnableToPartialParse(reason="of a version mismatch")) # If the version is wrong, the other checks might not work return False, ReparseReason.version_mismatch if self.manifest.state_check.vars_hash != manifest.state_check.vars_hash: - fire_event(PartialParsingFailedBecauseConfigChange()) + fire_event( + UnableToPartialParse( + reason="config vars, config profile, or config target have changed" + ) + ) valid = False reparse_reason = ReparseReason.vars_changed if self.manifest.state_check.profile_hash != manifest.state_check.profile_hash: # Note: This should be made more granular. We shouldn't need to invalidate # partial parsing if a non-used profile section has changed. - fire_event(PartialParsingFailedBecauseProfileChange()) + fire_event(UnableToPartialParse(reason="profile has changed")) valid = False reparse_reason = ReparseReason.profile_changed if ( self.manifest.state_check.project_env_vars_hash != manifest.state_check.project_env_vars_hash ): - fire_event(PartialParsingProjectEnvVarsChanged()) + fire_event( + UnableToPartialParse(reason="env vars used in dbt_project.yml have changed") + ) valid = False reparse_reason = ReparseReason.proj_env_vars_changed if ( self.manifest.state_check.profile_env_vars_hash != manifest.state_check.profile_env_vars_hash ): - fire_event(PartialParsingProfileEnvVarsChanged()) + fire_event(UnableToPartialParse(reason="env vars used in profiles.yml have changed")) valid = False reparse_reason = ReparseReason.prof_env_vars_changed @@ -601,7 +599,7 @@ def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]: if k not in manifest.state_check.project_hashes } if missing_keys: - fire_event(PartialParsingFailedBecauseNewProjectDependency()) + fire_event(UnableToPartialParse(reason="a project dependency has been added")) valid = False reparse_reason = ReparseReason.deps_changed @@ -609,7 +607,7 @@ def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]: if key in manifest.state_check.project_hashes: old_value = manifest.state_check.project_hashes[key] if new_value != old_value: - fire_event(PartialParsingFailedBecauseHashChanged()) + fire_event(UnableToPartialParse(reason="a project config has changed")) valid = False reparse_reason = ReparseReason.project_config_changed return valid, reparse_reason @@ -662,7 +660,9 @@ def read_manifest_for_partial_parse(self) -> Optional[Manifest]: ) reparse_reason = ReparseReason.load_file_failure else: - fire_event(PartialParseSaveFileNotFound()) + fire_event( + UnableToPartialParse(reason="saved manifest not found. Starting full parse.") + ) reparse_reason = ReparseReason.file_not_found # this event is only fired if a full reparse is needed diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py index 63ef33429c4..eafb49efe76 100644 --- a/core/dbt/parser/partial.py +++ b/core/dbt/parser/partial.py @@ -10,16 +10,7 @@ from dbt.events.functions import fire_event from dbt.events.types import ( PartialParsingEnabled, - PartialParsingAddedFile, - PartialParsingDeletedFile, - PartialParsingUpdatedFile, - PartialParsingNodeMissingInSourceFile, - PartialParsingMissingNodes, - PartialParsingChildMapMissingUniqueID, - PartialParsingUpdateSchemaFile, - PartialParsingDeletedSource, - PartialParsingDeletedExposure, - PartialParsingDeletedMetric, + PartialParsingFile, ) from dbt.constants import DEFAULT_ENV_PLACEHOLDER from dbt.node_types import NodeType @@ -234,7 +225,7 @@ def add_to_saved(self, file_id): self.saved_files[file_id] = source_file # update pp_files to parse self.add_to_pp_files(source_file) - fire_event(PartialParsingAddedFile(file_id=file_id)) + fire_event(PartialParsingFile(operation="added", file_id=file_id)) def handle_added_schema_file(self, source_file): source_file.pp_dict = source_file.dict_from_yaml.copy() @@ -282,7 +273,7 @@ def delete_from_saved(self, file_id): if saved_source_file.parse_file_type == ParseFileType.Documentation: self.delete_doc_node(saved_source_file) - fire_event(PartialParsingDeletedFile(file_id=file_id)) + fire_event(PartialParsingFile(operation="deleted", file_id=file_id)) # Updates for non-schema files def update_in_saved(self, file_id): @@ -297,7 +288,7 @@ def update_in_saved(self, file_id): self.update_doc_in_saved(new_source_file, old_source_file) else: raise Exception(f"Invalid parse_file_type in source_file {file_id}") - fire_event(PartialParsingUpdatedFile(file_id=file_id)) + fire_event(PartialParsingFile(operation="updated", file_id=file_id)) # Models, seeds, snapshots: patches and tests # analyses: patches, no tests @@ -312,10 +303,6 @@ def update_mssat_in_saved(self, new_source_file, old_source_file): unique_ids = [] if old_source_file.nodes: unique_ids = old_source_file.nodes - else: - # It's not clear when this would actually happen. - # Logging in case there are other associated errors. - fire_event(PartialParsingNodeMissingInSourceFile(file_id=old_source_file.file_id)) # replace source_file in saved and add to parsing list file_id = new_source_file.file_id @@ -386,7 +373,6 @@ def remove_mssat_file(self, source_file): # nodes [unique_ids] -- SQL files # There should always be a node for a SQL file if not source_file.nodes: - fire_event(PartialParsingMissingNodes(file_id=source_file.file_id)) return # There is generally only 1 node for SQL files, except for macros and snapshots for unique_id in source_file.nodes: @@ -398,8 +384,6 @@ def schedule_referencing_nodes_for_parsing(self, unique_id): # Look at "children", i.e. nodes that reference this node if unique_id in self.saved_manifest.child_map: self.schedule_nodes_for_parsing(self.saved_manifest.child_map[unique_id]) - else: - fire_event(PartialParsingChildMapMissingUniqueID(unique_id=unique_id)) def schedule_nodes_for_parsing(self, unique_ids): for unique_id in unique_ids: @@ -611,7 +595,7 @@ def change_schema_file(self, file_id): # schedule parsing self.add_to_pp_files(saved_schema_file) # schema_file pp_dict should have been generated already - fire_event(PartialParsingUpdateSchemaFile(file_id=file_id)) + fire_event(PartialParsingFile(operation="updated", file_id=file_id)) # Delete schema files -- a variation on change_schema_file def delete_schema_file(self, file_id): @@ -883,7 +867,6 @@ def delete_schema_source(self, schema_file, source_dict): self.deleted_manifest.sources[unique_id] = source schema_file.sources.remove(unique_id) self.schedule_referencing_nodes_for_parsing(unique_id) - fire_event(PartialParsingDeletedSource(unique_id=unique_id)) def delete_schema_macro_patch(self, schema_file, macro): # This is just macro patches that need to be reapplied @@ -912,7 +895,6 @@ def delete_schema_exposure(self, schema_file, exposure_dict): unique_id ) schema_file.exposures.remove(unique_id) - fire_event(PartialParsingDeletedExposure(unique_id=unique_id)) elif unique_id in self.saved_manifest.disabled: self.delete_disabled(unique_id, schema_file.file_id) @@ -931,7 +913,6 @@ def delete_schema_metric(self, schema_file, metric_dict): unique_id ) schema_file.metrics.remove(unique_id) - fire_event(PartialParsingDeletedMetric(unique_id=unique_id)) elif unique_id in self.saved_manifest.disabled: self.delete_disabled(unique_id, schema_file.file_id) diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 3dbff04c303..e37d26ad552 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -255,24 +255,13 @@ def MockNode(): ParseCmdPerfInfoPath(path=""), GenericTestFileParse(path=""), MacroFileParse(path=""), - PartialParsingFullReparseBecauseOfError(), - PartialParsingExceptionFile(file=""), + PartialParsingExceptionProcessingFile(file=""), PartialParsingFile(file_id=""), PartialParsingException(exc_info={}), PartialParsingSkipParsing(), - PartialParsingMacroChangeStartFullParse(), - PartialParsingProjectEnvVarsChanged(), - PartialParsingProfileEnvVarsChanged(), - PartialParsingDeletedMetric(unique_id=""), - ManifestWrongMetadataVersion(version=""), - PartialParsingVersionMismatch(saved_version="", current_version=""), - PartialParsingFailedBecauseConfigChange(), - PartialParsingFailedBecauseProfileChange(), - PartialParsingFailedBecauseNewProjectDependency(), - PartialParsingFailedBecauseHashChanged(), + UnableToPartialParse(reason="something went wrong"), PartialParsingNotEnabled(), ParsedFileLoadFailed(path="", exc="", exc_info=""), - PartialParseSaveFileNotFound(), StaticParserCausedJinjaRendering(path=""), UsingExperimentalParser(path=""), SampleFullJinjaRendering(path=""), @@ -283,15 +272,7 @@ def MockNode(): ExperimentalParserSuccess(path=""), ExperimentalParserFailure(path=""), PartialParsingEnabled(deleted=0, added=0, changed=0), - PartialParsingAddedFile(file_id=""), - PartialParsingDeletedFile(file_id=""), - PartialParsingUpdatedFile(file_id=""), - PartialParsingNodeMissingInSourceFile(file_id=""), - PartialParsingMissingNodes(file_id=""), - PartialParsingChildMapMissingUniqueID(unique_id=""), - PartialParsingUpdateSchemaFile(file_id=""), - PartialParsingDeletedSource(unique_id=""), - PartialParsingDeletedExposure(unique_id=""), + PartialParsingFile(file_id=""), InvalidDisabledTargetInTestNode( resource_type_title="", unique_id="", From 54538409509e0d677876f1102aa3bc0c67007278 Mon Sep 17 00:00:00 2001 From: Niall Woodward Date: Wed, 4 Jan 2023 23:53:35 +0000 Subject: [PATCH 082/156] DBT_FAVOR_STATE_STATE -> DBT_FAVOR_STATE (#6392) * DBT_FAVOR_STATE_STATE -> DBT_FAVOR_STATE * add changelog --- .changes/unreleased/Fixes-20230104-141047.yaml | 7 +++++++ core/dbt/flags.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Fixes-20230104-141047.yaml diff --git a/.changes/unreleased/Fixes-20230104-141047.yaml b/.changes/unreleased/Fixes-20230104-141047.yaml new file mode 100644 index 00000000000..9d5466fbe68 --- /dev/null +++ b/.changes/unreleased/Fixes-20230104-141047.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Fix DBT_FAVOR_STATE env var +time: 2023-01-04T14:10:47.637495-08:00 +custom: + Author: NiallRees + Issue: "5859" + PR: "6392" diff --git a/core/dbt/flags.py b/core/dbt/flags.py index e2d969ccb36..cecc024d7f4 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -112,7 +112,7 @@ def env_set_path(key: str) -> Optional[Path]: MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING") DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE") -FAVOR_STATE_MODE = env_set_truthy("DBT_FAVOR_STATE_STATE") +FAVOR_STATE_MODE = env_set_truthy("DBT_FAVOR_STATE") ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH") ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER") From 9d6f961d2bfcfc2215d2fb328cb35bfbc32c7daa Mon Sep 17 00:00:00 2001 From: justbldwn <91483530+justbldwn@users.noreply.github.com> Date: Thu, 5 Jan 2023 15:28:27 -0500 Subject: [PATCH 083/156] =?UTF-8?q?=F0=9F=8E=A8=20add=20missing=20fstrings?= =?UTF-8?q?,=20convert=20format=20to=20fstring=20(#6243)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Gerda Shank --- .../unreleased/Fixes-20221113-104150.yaml | 7 ++ core/dbt/events/types.py | 70 ++++++++----------- 2 files changed, 37 insertions(+), 40 deletions(-) create mode 100644 .changes/unreleased/Fixes-20221113-104150.yaml diff --git a/.changes/unreleased/Fixes-20221113-104150.yaml b/.changes/unreleased/Fixes-20221113-104150.yaml new file mode 100644 index 00000000000..75c34bda436 --- /dev/null +++ b/.changes/unreleased/Fixes-20221113-104150.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: fix missing f-strings, convert old .format() messages to f-strings for consistency +time: 2022-11-13T10:41:50.009727-05:00 +custom: + Author: justbldwn + Issue: "6241" + PR: "6243" diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index b7a8870cf36..681702d5dd4 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -290,7 +290,7 @@ def code(self): return "A026" def message(self) -> str: - return """ + return f""" Your new dbt project "{self.project_name}" was created! For more information on how to configure the profiles.yml file, @@ -463,7 +463,7 @@ def code(self): return "E005" def message(self) -> str: - return f'Acquiring new {self.conn_type} connection "{self.conn_name}"' + return f"Acquiring new {self.conn_type} connection '{self.conn_name}'" @dataclass @@ -539,7 +539,7 @@ def code(self): def message(self) -> str: return ( f'On "{self.conn_name}": cache miss for schema ' - '"{self.database}.{self.schema}", this is inefficient' + f'"{self.database}.{self.schema}", this is inefficient' ) @@ -625,7 +625,7 @@ def code(self): def message(self) -> str: return ( f"{self.dep_key} references {str(self.ref_key)} " - "but {self.ref_key.database}.{self.ref_key.schema}" + f"but {self.ref_key.database}.{self.ref_key.schema}" "is not in the cache, skipping assumed external relation" ) @@ -683,7 +683,7 @@ def code(self): def message(self) -> str: return ( f"updated reference from {self.old_key} -> {self.cached_key} to " - "{self.new_key} -> {self.cached_key}" + f"{self.new_key} -> {self.cached_key}" ) @@ -1159,18 +1159,15 @@ def code(self): return "I050" def message(self) -> str: - target_package_string = "" + if self.target_package != target_package_string: - target_package_string = "in package '{}' ".format(self.target_package) - - msg = "{} '{}' ({}) depends on a {} named '{}' {}which is disabled".format( - self.resource_type_title, - self.unique_id, - self.original_file_path, - self.target_kind, - self.target_name, - target_package_string, + target_package_string = f"in package '{self.target_package}' " + + msg = ( + f"{self.resource_type_title} '{self.unique_id}' " + f"({self.original_file_path}) depends on a {self.target_kind} " + f"named '{self.target_name}' {target_package_string}which is disabled" ) return warning_tag(msg) @@ -1320,17 +1317,14 @@ def message(self) -> str: reason = "was not found" target_package_string = "" + if self.target_package is not None: - target_package_string = "in package '{}' ".format(self.target_package) - - msg = "{} '{}' ({}) depends on a {} named '{}' {}which {}".format( - self.resource_type_title, - self.unique_id, - self.original_file_path, - self.target_kind, - self.target_name, - target_package_string, - reason, + target_package_string = f"in package '{self.target_package}' " + + msg = ( + f"{self.resource_type_title} '{self.unique_id}' " + f"({self.original_file_path}) depends on a {self.target_kind} " + f"named '{self.target_name}' {target_package_string}which {reason}" ) return warning_tag(msg) @@ -1523,10 +1517,8 @@ def code(self): return "M019" def message(self) -> str: - return "Updates available for packages: {} \ - \nUpdate your versions in packages.yml, then run dbt deps".format( - self.packages.value - ) + return f"Updates available for packages: {self.packages.value} \ + \nUpdate your versions in packages.yml, then run dbt deps" @dataclass @@ -1884,7 +1876,7 @@ def code(self): return "Q022" def message(self) -> str: - msg = "CANCEL query {}".format(self.conn_name) + msg = f"CANCEL query {self.conn_name}" return format_fancy_output_line(msg=msg, status=red("CANCEL"), index=None, total=None) @@ -1986,7 +1978,7 @@ def code(self): return "Q033" def message(self) -> str: - msg = "OK hook: {}".format(self.statement) + msg = f"OK hook: {self.statement}" return format_fancy_output_line( msg=msg, status=green(self.status), @@ -2070,15 +2062,13 @@ def code(self): return "W003" def message(self) -> str: - prefix = "Internal error executing {}".format(self.build_path) + prefix = f"Internal error executing {self.build_path}" internal_error_string = """This is an error in dbt. Please try again. If \ the error persists, open an issue at https://github.com/dbt-labs/dbt-core """.strip() - return "{prefix}\n{error}\n\n{note}".format( - prefix=red(prefix), error=str(self.exc).strip(), note=internal_error_string - ) + return f"{red(prefix)}\n" f"{str(self.exc).strip()}\n\n" f"{internal_error_string}" @dataclass @@ -2090,8 +2080,8 @@ def message(self) -> str: node_description = self.build_path if node_description is None: node_description = self.unique_id - prefix = "Unhandled error while executing {}".format(node_description) - return "{prefix}\n{error}".format(prefix=red(prefix), error=str(self.exc).strip()) + prefix = f"Unhandled error while executing {node_description}" + return f"{red(prefix)}\n{str(self.exc).strip()}" @dataclass @@ -2100,7 +2090,7 @@ def code(self): return "W005" def message(self) -> str: - return "Error releasing connection for node {}: {!s}".format(self.node_name, self.exc) + return f"Error releasing connection for node {self.node_name}: {str(self.exc)}" @dataclass @@ -2409,9 +2399,9 @@ def message(self) -> str: if self.keyboard_interrupt: message = yellow("Exited because of keyboard interrupt.") elif self.num_errors > 0: - message = red("Completed with {} and {}:".format(error_plural, warn_plural)) + message = red(f"Completed with {error_plural} and {warn_plural}:") elif self.num_warnings > 0: - message = yellow("Completed with {}:".format(warn_plural)) + message = yellow(f"Completed with {warn_plural}:") else: message = green("Completed successfully") return message From 9ef236601bb333ffe8ac81322bc7cc057f85327e Mon Sep 17 00:00:00 2001 From: Callum McCann <101437052+callum-mcdata@users.noreply.github.com> Date: Thu, 5 Jan 2023 14:49:44 -0600 Subject: [PATCH 084/156] Updating metric attributes renamed event (#6518) * updating message * adding changie --- .changes/unreleased/Under the Hood-20230104-155257.yaml | 6 ++++++ core/dbt/events/types.py | 1 - 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Under the Hood-20230104-155257.yaml diff --git a/.changes/unreleased/Under the Hood-20230104-155257.yaml b/.changes/unreleased/Under the Hood-20230104-155257.yaml new file mode 100644 index 00000000000..2d10f09d857 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230104-155257.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Updating the deprecation warning in the metric attributes renamed event +time: 2023-01-04T15:52:57.916398-06:00 +custom: + Author: callum-mcdata + Issue: "6507" diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 681702d5dd4..6a597184cd7 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -392,7 +392,6 @@ def message(self): "\n 'sql' -> 'expression'" "\n 'type' -> 'calculation_method'" "\n 'type: expression' -> 'calculation_method: derived'" - "\nThe old metric parameter names will be fully deprecated in v1.4." f"\nPlease remove them from the metric definition of metric '{self.metric_name}'" "\nRelevant issue here: https://github.com/dbt-labs/dbt-core/issues/5849" ) From d43c070007b823588e7a170c6c37495ce319b011 Mon Sep 17 00:00:00 2001 From: Callum McCann <101437052+callum-mcdata@users.noreply.github.com> Date: Thu, 5 Jan 2023 14:49:55 -0600 Subject: [PATCH 085/156] Making Metric Timestamps Optional (#6402) * changing to optional * adding tests * tests and changie * pre-commit cleaning * formatting fixes * pre-commit update --- .../unreleased/Features-20221207-091722.yaml | 7 ++ core/dbt/contracts/graph/nodes.py | 2 +- core/dbt/contracts/graph/unparsed.py | 12 +++- tests/functional/metrics/fixtures.py | 55 +++++++++++++++ tests/functional/metrics/test_metrics.py | 69 +++++++++++++++++++ 5 files changed, 143 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Features-20221207-091722.yaml diff --git a/.changes/unreleased/Features-20221207-091722.yaml b/.changes/unreleased/Features-20221207-091722.yaml new file mode 100644 index 00000000000..16845f3663e --- /dev/null +++ b/.changes/unreleased/Features-20221207-091722.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Making timestamp optional for metrics +time: 2022-12-07T09:17:22.571877-06:00 +custom: + Author: callum-mcdata + Issue: "6398" + PR: "9400" diff --git a/core/dbt/contracts/graph/nodes.py b/core/dbt/contracts/graph/nodes.py index 033318a34c1..730e2286ccd 100644 --- a/core/dbt/contracts/graph/nodes.py +++ b/core/dbt/contracts/graph/nodes.py @@ -976,12 +976,12 @@ class Metric(GraphNode): description: str label: str calculation_method: str - timestamp: str expression: str filters: List[MetricFilter] time_grains: List[str] dimensions: List[str] resource_type: NodeType = field(metadata={"restrict": [NodeType.Metric]}) + timestamp: Optional[str] = None window: Optional[MetricTime] = None model: Optional[str] = None model_unique_id: Optional[str] = None diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py index 453dc883d7b..ba2e48c7c9c 100644 --- a/core/dbt/contracts/graph/unparsed.py +++ b/core/dbt/contracts/graph/unparsed.py @@ -484,9 +484,9 @@ class UnparsedMetric(dbtClassMixin, Replaceable): name: str label: str calculation_method: str - timestamp: str expression: str description: str = "" + timestamp: Optional[str] = None time_grains: List[str] = field(default_factory=list) dimensions: List[str] = field(default_factory=list) window: Optional[MetricTime] = None @@ -518,6 +518,16 @@ def validate(cls, data): f"The metric name '{data['name']}' is invalid. It {', '.join(e for e in errors)}" ) + if data.get("timestamp") is None and data.get("time_grains") is not None: + raise ValidationError( + f"The metric '{data['name']} has time_grains defined but is missing a timestamp dimension." + ) + + if data.get("timestamp") is None and data.get("window") is not None: + raise ValidationError( + f"The metric '{data['name']} has a window defined but is missing a timestamp dimension." + ) + if data.get("model") is None and data.get("calculation_method") != "derived": raise ValidationError("Non-derived metrics require a 'model' property") diff --git a/tests/functional/metrics/fixtures.py b/tests/functional/metrics/fixtures.py index e191f609977..8a03cb0d7fa 100644 --- a/tests/functional/metrics/fixtures.py +++ b/tests/functional/metrics/fixtures.py @@ -642,3 +642,58 @@ meta: my_meta: 'testing' """ + +metric_without_timestamp_or_timegrains_yml = """ +version: 2 + +metrics: + - name: number_of_people + label: "Number of people" + description: Total count of people + model: "ref('people')" + calculation_method: count + expression: "*" + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: 'testing' +""" + +invalid_metric_without_timestamp_with_time_grains_yml = """ +version: 2 + +metrics: + - name: number_of_people + label: "Number of people" + description: Total count of people + model: "ref('people')" + time_grains: [day, week, month] + calculation_method: count + expression: "*" + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: 'testing' +""" + +invalid_metric_without_timestamp_with_window_yml = """ +version: 2 + +metrics: + - name: number_of_people + label: "Number of people" + description: Total count of people + model: "ref('people')" + window: + count: 14 + period: day + calculation_method: count + expression: "*" + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: 'testing' +""" diff --git a/tests/functional/metrics/test_metrics.py b/tests/functional/metrics/test_metrics.py index 37446589cd2..de8c022f3d3 100644 --- a/tests/functional/metrics/test_metrics.py +++ b/tests/functional/metrics/test_metrics.py @@ -3,6 +3,7 @@ from dbt.tests.util import run_dbt, get_manifest from dbt.exceptions import ParsingException + from tests.functional.metrics.fixtures import ( mock_purchase_data_csv, models_people_sql, @@ -18,6 +19,9 @@ invalid_derived_metric_contains_model_yml, derived_metric_yml, derived_metric_old_attr_names_yml, + metric_without_timestamp_or_timegrains_yml, + invalid_metric_without_timestamp_with_time_grains_yml, + invalid_metric_without_timestamp_with_window_yml ) @@ -46,6 +50,33 @@ def test_simple_metric( assert metric_ids == expected_metric_ids +class TestSimpleMetricsNoTimestamp: + @pytest.fixture(scope="class") + def models(self): + return { + "people_metrics.yml": metric_without_timestamp_or_timegrains_yml, + "people.sql": models_people_sql, + } + + def test_simple_metric_no_timestamp( + self, + project, + ): + # initial run + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + metric_ids = list(manifest.metrics.keys()) + expected_metric_ids = [ + "metric.test.number_of_people", + ] + assert metric_ids == expected_metric_ids + + # make sure the 'expression' metric depends on the two upstream metrics + metric_test = manifest.metrics["metric.test.number_of_people"] + assert metric_test.timestamp is None + + class TestInvalidRefMetrics: @pytest.fixture(scope="class") def models(self): @@ -253,3 +284,41 @@ def models(self): "derived_metric.yml": derived_metric_old_attr_names_yml, "downstream_model.sql": downstream_model_sql, } + + +class TestInvalidTimestampTimeGrainsMetrics: + @pytest.fixture(scope="class") + def models(self): + return { + "people_metrics.yml": invalid_metric_without_timestamp_with_time_grains_yml, + "people.sql": models_people_sql, + } + + # Tests that we get a ParsingException with an invalid metric definition. + # This metric definition is missing timestamp but HAS a time_grains property + def test_simple_metric( + self, + project, + ): + # initial run + with pytest.raises(ParsingException): + run_dbt(["run"]) + + +class TestInvalidTimestampWindowMetrics: + @pytest.fixture(scope="class") + def models(self): + return { + "people_metrics.yml": invalid_metric_without_timestamp_with_window_yml, + "people.sql": models_people_sql, + } + + # Tests that we get a ParsingException with an invalid metric definition. + # This metric definition is missing timestamp but HAS a window property + def test_simple_metric( + self, + project, + ): + # initial run + with pytest.raises(ParsingException): + run_dbt(["run"]) From 94d6d19fb43f1ed466c9049dea40e203b23f89da Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Fri, 6 Jan 2023 11:59:53 -0800 Subject: [PATCH 086/156] [CT-1693] Port severity test to Pytest (#6537) --- .../Under the Hood-20230106-112855.yaml | 6 + .../045_test_severity_tests/models/model.sql | 1 - .../045_test_severity_tests/models/schema.yml | 19 --- .../seeds/null_seed.csv | 21 ---- .../045_test_severity_tests/test_severity.py | 93 -------------- .../045_test_severity_tests/tests/data.sql | 2 - tests/functional/severity/test_severity.py | 119 ++++++++++++++++++ 7 files changed, 125 insertions(+), 136 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230106-112855.yaml delete mode 100644 test/integration/045_test_severity_tests/models/model.sql delete mode 100644 test/integration/045_test_severity_tests/models/schema.yml delete mode 100644 test/integration/045_test_severity_tests/seeds/null_seed.csv delete mode 100644 test/integration/045_test_severity_tests/test_severity.py delete mode 100644 test/integration/045_test_severity_tests/tests/data.sql create mode 100644 tests/functional/severity/test_severity.py diff --git a/.changes/unreleased/Under the Hood-20230106-112855.yaml b/.changes/unreleased/Under the Hood-20230106-112855.yaml new file mode 100644 index 00000000000..1344b3397c0 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230106-112855.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: '[CT-1693] Port severity test to Pytest' +time: 2023-01-06T11:28:55.800547-08:00 +custom: + Author: aranke + Issue: "6466" diff --git a/test/integration/045_test_severity_tests/models/model.sql b/test/integration/045_test_severity_tests/models/model.sql deleted file mode 100644 index 3e29210ab0a..00000000000 --- a/test/integration/045_test_severity_tests/models/model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ source('source', 'nulls') }} diff --git a/test/integration/045_test_severity_tests/models/schema.yml b/test/integration/045_test_severity_tests/models/schema.yml deleted file mode 100644 index 207c16c16c7..00000000000 --- a/test/integration/045_test_severity_tests/models/schema.yml +++ /dev/null @@ -1,19 +0,0 @@ -version: 2 -models: - - name: model - columns: - - name: email - tests: - - not_null: - severity: "{{ 'error' if var('strict', false) else 'warn' }}" -sources: - - name: source - schema: "{{ var('test_run_schema') }}" - tables: - - name: nulls - identifier: null_seed - columns: - - name: email - tests: - - not_null: - severity: "{{ 'error' if var('strict', false) else 'warn' }}" diff --git a/test/integration/045_test_severity_tests/seeds/null_seed.csv b/test/integration/045_test_severity_tests/seeds/null_seed.csv deleted file mode 100644 index b26a87430ac..00000000000 --- a/test/integration/045_test_severity_tests/seeds/null_seed.csv +++ /dev/null @@ -1,21 +0,0 @@ -id,first_name,last_name,email,gender,ip_address,updated_at -1,Judith,Kennedy,jkennedy0@phpbb.com,Female,54.60.24.128,2015-12-24 12:19:28 -2,Arthur,Kelly,akelly1@eepurl.com,Male,62.56.24.215,2015-10-28 16:22:15 -3,Rachel,Moreno,rmoreno2@msu.edu,Female,31.222.249.23,2016-04-05 02:05:30 -4,Ralph,Turner,rturner3@hp.com,Male,157.83.76.114,2016-08-08 00:06:51 -5,Laura,Gonzales,lgonzales4@howstuffworks.com,Female,30.54.105.168,2016-09-01 08:25:38 -6,Katherine,Lopez,null,Female,169.138.46.89,2016-08-30 18:52:11 -7,Jeremy,Hamilton,jhamilton6@mozilla.org,Male,231.189.13.133,2016-07-17 02:09:46 -8,Heather,Rose,hrose7@goodreads.com,Female,87.165.201.65,2015-12-29 22:03:56 -9,Gregory,Kelly,gkelly8@trellian.com,Male,154.209.99.7,2016-03-24 21:18:16 -10,Rachel,Lopez,rlopez9@themeforest.net,Female,237.165.82.71,2016-08-20 15:44:49 -11,Donna,Welch,dwelcha@shutterfly.com,Female,103.33.110.138,2016-02-27 01:41:48 -12,Russell,Lawrence,rlawrenceb@qq.com,Male,189.115.73.4,2016-06-11 03:07:09 -13,Michelle,Montgomery,mmontgomeryc@scientificamerican.com,Female,243.220.95.82,2016-06-18 16:27:19 -14,Walter,Castillo,null,Male,71.159.238.196,2016-10-06 01:55:44 -15,Robin,Mills,rmillse@vkontakte.ru,Female,172.190.5.50,2016-10-31 11:41:21 -16,Raymond,Holmes,rholmesf@usgs.gov,Male,148.153.166.95,2016-10-03 08:16:38 -17,Gary,Bishop,gbishopg@plala.or.jp,Male,161.108.182.13,2016-08-29 19:35:20 -18,Anna,Riley,arileyh@nasa.gov,Female,253.31.108.22,2015-12-11 04:34:27 -19,Sarah,Knight,sknighti@foxnews.com,Female,222.220.3.177,2016-09-26 00:49:06 -20,Phyllis,Fox,pfoxj@creativecommons.org,Female,163.191.232.95,2016-08-21 10:35:19 diff --git a/test/integration/045_test_severity_tests/test_severity.py b/test/integration/045_test_severity_tests/test_severity.py deleted file mode 100644 index 965862a2e7a..00000000000 --- a/test/integration/045_test_severity_tests/test_severity.py +++ /dev/null @@ -1,93 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestSeverity(DBTIntegrationTest): - @property - def schema(self): - return "severity_045" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds'], - 'test-paths': ['tests'], - 'seeds': { - 'quote_columns': False, - }, - } - - def run_dbt_with_vars(self, cmd, strict_var, *args, **kwargs): - cmd.extend(['--vars', - '{{test_run_schema: {}, strict: {}}}'.format(self.unique_schema(), strict_var)]) - return self.run_dbt(cmd, *args, **kwargs) - - @use_profile('postgres') - def test_postgres_severity_warnings(self): - self.run_dbt_with_vars(['seed'], 'false') - self.run_dbt_with_vars(['run'], 'false') - results = self.run_dbt_with_vars( - ['test', '--select', 'test_type:generic'], 'false') - self.assertEqual(len(results), 2) - self.assertEqual(results[0].status, 'warn') - self.assertEqual(results[0].failures, 2) - self.assertEqual(results[1].status, 'warn') - self.assertEqual(results[1].failures, 2) - - @use_profile('postgres') - def test_postgres_severity_rendered_errors(self): - self.run_dbt_with_vars(['seed'], 'false') - self.run_dbt_with_vars(['run'], 'false') - results = self.run_dbt_with_vars( - ['test', '--select', 'test_type:generic'], 'true', expect_pass=False) - self.assertEqual(len(results), 2) - self.assertEqual(results[0].status, 'fail') - self.assertEqual(results[0].failures, 2) - self.assertEqual(results[1].status, 'fail') - self.assertEqual(results[1].failures, 2) - - @use_profile('postgres') - def test_postgres_severity_warnings_strict(self): - self.run_dbt_with_vars(['seed'], 'false') - self.run_dbt_with_vars(['run'], 'false') - results = self.run_dbt_with_vars( - ['test', '--select', 'test_type:generic'], 'false', expect_pass=True) - self.assertEqual(len(results), 2) - self.assertEqual(results[0].status, 'warn') - self.assertEqual(results[0].failures, 2) - self.assertEqual(results[1].status, 'warn') - self.assertEqual(results[1].failures, 2) - - @use_profile('postgres') - def test_postgres_data_severity_warnings(self): - self.run_dbt_with_vars(['seed'], 'false') - self.run_dbt_with_vars(['run'], 'false') - results = self.run_dbt_with_vars( - ['test', '--select', 'test_type:singular'], 'false') - self.assertEqual(len(results), 1) - self.assertEqual(results[0].status, 'warn') - self.assertEqual(results[0].failures, 2) - - @use_profile('postgres') - def test_postgres_data_severity_rendered_errors(self): - self.run_dbt_with_vars(['seed'], 'false') - self.run_dbt_with_vars(['run'], 'false') - results = self.run_dbt_with_vars( - ['test', '--select', 'test_type:singular'], 'true', expect_pass=False) - self.assertEqual(len(results), 1) - self.assertEqual(results[0].status, 'fail') - self.assertEqual(results[0].failures, 2) - - @use_profile('postgres') - def test_postgres_data_severity_warnings_strict(self): - self.run_dbt_with_vars(['seed'], 'false') - self.run_dbt_with_vars(['run'], 'false') - results = self.run_dbt_with_vars( - ['test', '--select', 'test_type:singular'], 'false', expect_pass=True) - self.assertEqual(len(results), 1) - self.assertTrue(results[0].status, 'fail') - self.assertEqual(results[0].failures, 2) diff --git a/test/integration/045_test_severity_tests/tests/data.sql b/test/integration/045_test_severity_tests/tests/data.sql deleted file mode 100644 index 65c5863ff03..00000000000 --- a/test/integration/045_test_severity_tests/tests/data.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(severity='error' if var('strict', false) else 'warn') }} -select * from {{ ref('model') }} where email is null diff --git a/tests/functional/severity/test_severity.py b/tests/functional/severity/test_severity.py new file mode 100644 index 00000000000..050ccd22325 --- /dev/null +++ b/tests/functional/severity/test_severity.py @@ -0,0 +1,119 @@ +import pytest + +from dbt.tests.util import run_dbt + +models__sample_model_sql = """ +select * from {{ source("raw", "sample_seed") }} +""" + +models__schema_yml = """ +version: 2 +sources: + - name: raw + database: "{{ target.database }}" + schema: "{{ target.schema }}" + tables: + - name: sample_seed + columns: + - name: email + tests: + - not_null: + severity: "{{ 'error' if var('strict', false) else 'warn' }}" +models: + - name: sample_model + columns: + - name: email + tests: + - not_null: + severity: "{{ 'error' if var('strict', false) else 'warn' }}" +""" + +seeds__sample_seed_csv = """id,first_name,last_name,email,gender,ip_address,updated_at +1,Judith,Kennedy,jkennedy0@phpbb.com,Female,54.60.24.128,2015-12-24 12:19:28 +2,Arthur,Kelly,akelly1@eepurl.com,Male,62.56.24.215,2015-10-28 16:22:15 +3,Rachel,Moreno,rmoreno2@msu.edu,Female,31.222.249.23,2016-04-05 02:05:30 +4,Ralph,Turner,rturner3@hp.com,Male,157.83.76.114,2016-08-08 00:06:51 +5,Laura,Gonzales,lgonzales4@howstuffworks.com,Female,30.54.105.168,2016-09-01 08:25:38 +6,Katherine,Lopez,null,Female,169.138.46.89,2016-08-30 18:52:11 +7,Jeremy,Hamilton,jhamilton6@mozilla.org,Male,231.189.13.133,2016-07-17 02:09:46 +8,Heather,Rose,hrose7@goodreads.com,Female,87.165.201.65,2015-12-29 22:03:56 +9,Gregory,Kelly,gkelly8@trellian.com,Male,154.209.99.7,2016-03-24 21:18:16 +10,Rachel,Lopez,rlopez9@themeforest.net,Female,237.165.82.71,2016-08-20 15:44:49 +11,Donna,Welch,dwelcha@shutterfly.com,Female,103.33.110.138,2016-02-27 01:41:48 +12,Russell,Lawrence,rlawrenceb@qq.com,Male,189.115.73.4,2016-06-11 03:07:09 +13,Michelle,Montgomery,mmontgomeryc@scientificamerican.com,Female,243.220.95.82,2016-06-18 16:27:19 +14,Walter,Castillo,null,Male,71.159.238.196,2016-10-06 01:55:44 +15,Robin,Mills,rmillse@vkontakte.ru,Female,172.190.5.50,2016-10-31 11:41:21 +16,Raymond,Holmes,rholmesf@usgs.gov,Male,148.153.166.95,2016-10-03 08:16:38 +17,Gary,Bishop,gbishopg@plala.or.jp,Male,161.108.182.13,2016-08-29 19:35:20 +18,Anna,Riley,arileyh@nasa.gov,Female,253.31.108.22,2015-12-11 04:34:27 +19,Sarah,Knight,sknighti@foxnews.com,Female,222.220.3.177,2016-09-26 00:49:06 +20,Phyllis,Fox,pfoxj@creativecommons.org,Female,163.191.232.95,2016-08-21 10:35:19 +""" + + +tests__sample_test_sql = """ +{{ config(severity='error' if var('strict', false) else 'warn') }} +select * from {{ ref("sample_model") }} where email is null +""" + + +@pytest.fixture(scope="class") +def models(): + return { + "sample_model.sql": models__sample_model_sql, + "schema.yml": models__schema_yml + } + + +@pytest.fixture(scope="class") +def seeds(): + return {"sample_seed.csv": seeds__sample_seed_csv} + + +@pytest.fixture(scope="class") +def tests(): + return {"null_email.sql": tests__sample_test_sql} + + +@pytest.fixture(scope="class") +def project_config_update(): + return { + 'config-version': 2, + 'seed-paths': ['seeds'], + 'test-paths': ['tests'], + "seeds": { + "quote_columns": False, + }, + } + + +class TestSeverity: + @pytest.fixture(scope="class", autouse=True) + def seed_and_run(self, project): + run_dbt(["seed"]) + run_dbt(["run"]) + + def test_generic_default(self, project): + results = run_dbt(['test', '--select', 'test_type:generic']) + assert len(results) == 2 + assert all([r.status == 'warn' for r in results]) + assert all([r.failures == 2 for r in results]) + + def test_generic_strict(self, project): + results = run_dbt(['test', '--select', 'test_type:generic', "--vars", '{"strict": True}'], expect_pass=False) + assert len(results) == 2 + assert all([r.status == 'fail' for r in results]) + assert all([r.failures == 2 for r in results]) + + def test_singular_default(self, project): + results = run_dbt(['test', '--select', 'test_type:singular']) + assert len(results) == 1 + assert all([r.status == 'warn' for r in results]) + assert all([r.failures == 2 for r in results]) + + def test_singular_strict(self, project): + results = run_dbt(['test', '--select', 'test_type:singular', "--vars", '{"strict": True}'], expect_pass=False) + assert len(results) == 1 + assert all([r.status == 'fail' for r in results]) + assert all([r.failures == 2 for r in results]) From d9a4ee126a55e9dc531921491507be472c864090 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Mon, 9 Jan 2023 11:33:44 -0500 Subject: [PATCH 087/156] CT 1549 reorg logging events to have two top level keys (#6553) * Update types.proto * pre-commit passes * Cleanup tests and tweak EventLevels * Put node_info back on SQLCommit. Add "level" to fire_event function. * use event.message() in warn_or_error * Fix logging test * Changie * Fix a couple of unit tests * import Protocol from typing_extensions for 3.7 --- .../Under the Hood-20230109-095907.yaml | 6 + core/dbt/clients/system.py | 8 +- core/dbt/events/base_types.py | 71 +- core/dbt/events/eventmgr.py | 64 +- core/dbt/events/functions.py | 69 +- core/dbt/events/proto_types.py | 1909 +++++++++++++---- core/dbt/events/types.proto | 1648 ++++++++++---- core/dbt/events/types.py | 114 +- core/dbt/parser/schemas.py | 4 +- core/dbt/task/freshness.py | 6 +- core/dbt/task/parse.py | 35 +- core/dbt/task/run.py | 11 +- core/dbt/task/seed.py | 9 +- core/dbt/task/snapshot.py | 9 +- core/dbt/task/test.py | 6 +- test/unit/test_config.py | 4 +- test/unit/test_graph_selector_methods.py | 8 +- tests/functional/logging/test_logging.py | 11 +- tests/unit/test_events.py | 62 +- tests/unit/test_proto_events.py | 114 +- 20 files changed, 2992 insertions(+), 1176 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230109-095907.yaml diff --git a/.changes/unreleased/Under the Hood-20230109-095907.yaml b/.changes/unreleased/Under the Hood-20230109-095907.yaml new file mode 100644 index 00000000000..2133f06ddad --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230109-095907.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Reorganize structured logging events to have two top keys +time: 2023-01-09T09:59:07.842187-05:00 +custom: + Author: gshank + Issue: "6311" diff --git a/core/dbt/clients/system.py b/core/dbt/clients/system.py index b776e91b1d0..0382dcb98e8 100644 --- a/core/dbt/clients/system.py +++ b/core/dbt/clients/system.py @@ -19,8 +19,8 @@ SystemErrorRetrievingModTime, SystemCouldNotWrite, SystemExecutingCmd, - SystemStdOutMsg, - SystemStdErrMsg, + SystemStdOut, + SystemStdErr, SystemReportReturnCode, ) import dbt.exceptions @@ -441,8 +441,8 @@ def run_cmd(cwd: str, cmd: List[str], env: Optional[Dict[str, Any]] = None) -> T except OSError as exc: _interpret_oserror(exc, cwd, cmd) - fire_event(SystemStdOutMsg(bmsg=out)) - fire_event(SystemStdErrMsg(bmsg=err)) + fire_event(SystemStdOut(bmsg=out)) + fire_event(SystemStdErr(bmsg=err)) if proc.returncode != 0: fire_event(SystemReportReturnCode(returncode=proc.returncode)) diff --git a/core/dbt/events/base_types.py b/core/dbt/events/base_types.py index db74016099a..fbd35b58fa1 100644 --- a/core/dbt/events/base_types.py +++ b/core/dbt/events/base_types.py @@ -3,6 +3,13 @@ import os import threading from datetime import datetime +import dbt.events.proto_types as pt +import sys + +if sys.version_info >= (3, 8): + from typing import Protocol +else: + from typing_extensions import Protocol # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # These base types define the _required structure_ for the concrete event # @@ -58,25 +65,20 @@ class EventLevel(str, Enum): class BaseEvent: """BaseEvent for proto message generated python events""" - def __post_init__(self): - super().__post_init__() - if not self.info.level: - self.info.level = self.level_tag() - assert self.info.level in ["info", "warn", "error", "debug", "test"] - if not hasattr(self.info, "msg") or not self.info.msg: - self.info.msg = self.message() - self.info.invocation_id = get_invocation_id() - self.info.extra = get_global_metadata_vars() - self.info.ts = datetime.utcnow() - self.info.pid = get_pid() - self.info.thread = get_thread_name() - self.info.code = self.code() - self.info.name = type(self).__name__ - - # This is here because although we know that info should always - # exist, mypy doesn't. - def log_level(self) -> EventLevel: - return self.info.level # type: ignore + # def __post_init__(self): + # super().__post_init__() + # if not self.info.level: + # self.info.level = self.level_tag() + # assert self.info.level in ["info", "warn", "error", "debug", "test"] + # if not hasattr(self.info, "msg") or not self.info.msg: + # self.info.msg = self.message() + # self.info.invocation_id = get_invocation_id() + # self.info.extra = get_global_metadata_vars() + # self.info.ts = datetime.utcnow() + # self.info.pid = get_pid() + # self.info.thread = get_thread_name() + # self.info.code = self.code() + # self.info.name = type(self).__name__ def level_tag(self) -> EventLevel: return EventLevel.DEBUG @@ -84,6 +86,37 @@ def level_tag(self) -> EventLevel: def message(self) -> str: raise Exception("message() not implemented for event") + def code(self) -> str: + raise Exception("code() not implemented for event") + + +class EventMsg(Protocol): + info: pt.EventInfo + data: BaseEvent + + +def msg_from_base_event(event: BaseEvent, level: EventLevel = None): + + msg_class_name = f"{type(event).__name__}Msg" + msg_cls = getattr(pt, msg_class_name) + + # level in EventInfo must be a string, not an EventLevel + msg_level: str = level.value if level else event.level_tag().value + assert msg_level is not None + event_info = pt.EventInfo( + level=msg_level, + msg=event.message(), + invocation_id=get_invocation_id(), + extra=get_global_metadata_vars(), + ts=datetime.utcnow(), + pid=get_pid(), + thread=get_thread_name(), + code=event.code(), + name=type(event).__name__, + ) + new_event = msg_cls(data=event, info=event_info) + return new_event + # DynamicLevel requires that the level be supplied on the # event construction call using the "info" function from functions.py diff --git a/core/dbt/events/eventmgr.py b/core/dbt/events/eventmgr.py index 97a7d5d4360..10bf225bef7 100644 --- a/core/dbt/events/eventmgr.py +++ b/core/dbt/events/eventmgr.py @@ -9,16 +9,16 @@ from typing import Any, Callable, List, Optional, TextIO from uuid import uuid4 -from dbt.events.base_types import BaseEvent, EventLevel +from dbt.events.base_types import BaseEvent, EventLevel, msg_from_base_event, EventMsg # A Filter is a function which takes a BaseEvent and returns True if the event # should be logged, False otherwise. -Filter = Callable[[BaseEvent], bool] +Filter = Callable[[EventMsg], bool] # Default filter which logs every event -def NoFilter(_: BaseEvent) -> bool: +def NoFilter(_: EventMsg) -> bool: return True @@ -47,13 +47,6 @@ class LineFormat(Enum): } -# We should consider fixing the problem, but log_level() can return a string for -# DynamicLevel events, even thought it is supposed to return an EventLevel. This -# function gets a string for the level, no matter what. -def _get_level_str(e: BaseEvent) -> str: - return e.log_level().value if isinstance(e.log_level(), EventLevel) else str(e.log_level()) - - # We need this function for now because the numeric log severity levels in # Python do not match those for logbook, so we have to explicitly call the # correct function by name. @@ -113,14 +106,14 @@ def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: self._python_logger = log - def create_line(self, e: BaseEvent) -> str: + def create_line(self, msg: EventMsg) -> str: raise NotImplementedError() - def write_line(self, e: BaseEvent): - line = self.create_line(e) - python_level = _log_level_map[e.log_level()] + def write_line(self, msg: EventMsg): + line = self.create_line(msg) + python_level = _log_level_map[EventLevel(msg.info.level)] if self._python_logger is not None: - send_to_logger(self._python_logger, _get_level_str(e), line) + send_to_logger(self._python_logger, msg.info.level, line) elif self._stream is not None and _log_level_map[self.level] <= python_level: self._stream.write(line + "\n") @@ -138,24 +131,26 @@ def __init__(self, event_manager: "EventManager", config: LoggerConfig) -> None: self.use_colors = config.use_colors self.use_debug_format = config.line_format == LineFormat.DebugText - def create_line(self, e: BaseEvent) -> str: - return self.create_debug_line(e) if self.use_debug_format else self.create_info_line(e) + def create_line(self, msg: EventMsg) -> str: + return self.create_debug_line(msg) if self.use_debug_format else self.create_info_line(msg) - def create_info_line(self, e: BaseEvent) -> str: + def create_info_line(self, msg: EventMsg) -> str: ts: str = datetime.utcnow().strftime("%H:%M:%S") - scrubbed_msg: str = self.scrubber(e.message()) # type: ignore + scrubbed_msg: str = self.scrubber(msg.info.msg) # type: ignore return f"{self._get_color_tag()}{ts} {scrubbed_msg}" - def create_debug_line(self, e: BaseEvent) -> str: + def create_debug_line(self, msg: EventMsg) -> str: log_line: str = "" # Create a separator if this is the beginning of an invocation # TODO: This is an ugly hack, get rid of it if we can - if type(e).__name__ == "MainReportVersion": + if msg.info.name == "MainReportVersion": separator = 30 * "=" - log_line = f"\n\n{separator} {datetime.utcnow()} | {self.event_manager.invocation_id} {separator}\n" - ts: str = datetime.utcnow().strftime("%H:%M:%S.%f") - scrubbed_msg: str = self.scrubber(e.message()) # type: ignore - level = _get_level_str(e) + log_line = ( + f"\n\n{separator} {msg.info.ts} | {self.event_manager.invocation_id} {separator}\n" + ) + ts: str = msg.info.ts.strftime("%H:%M:%S.%f") + scrubbed_msg: str = self.scrubber(msg.info.msg) # type: ignore + level = msg.info.level log_line += ( f"{self._get_color_tag()}{ts} [{level:<5}]{self._get_thread_name()} {scrubbed_msg}" ) @@ -175,11 +170,11 @@ def _get_thread_name(self) -> str: class _JsonLogger(_Logger): - def create_line(self, e: BaseEvent) -> str: - from dbt.events.functions import event_to_dict + def create_line(self, msg: EventMsg) -> str: + from dbt.events.functions import msg_to_dict - event_dict = event_to_dict(e) - raw_log_line = json.dumps(event_dict, sort_keys=True) + msg_dict = msg_to_dict(msg) + raw_log_line = json.dumps(msg_dict, sort_keys=True) line = self.scrubber(raw_log_line) # type: ignore return line @@ -187,16 +182,17 @@ def create_line(self, e: BaseEvent) -> str: class EventManager: def __init__(self) -> None: self.loggers: List[_Logger] = [] - self.callbacks: List[Callable[[BaseEvent], None]] = [] + self.callbacks: List[Callable[[EventMsg], None]] = [] self.invocation_id: str = str(uuid4()) - def fire_event(self, e: BaseEvent) -> None: + def fire_event(self, e: BaseEvent, level: EventLevel = None) -> None: + msg = msg_from_base_event(e, level=level) for logger in self.loggers: - if logger.filter(e): # type: ignore - logger.write_line(e) + if logger.filter(msg): # type: ignore + logger.write_line(msg) for callback in self.callbacks: - callback(e) + callback(msg) def add_logger(self, config: LoggerConfig): logger = ( diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index f061606632e..9722fb5fecf 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -1,9 +1,8 @@ import betterproto from dbt.constants import METADATA_ENV_PREFIX -from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut +from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut, EventMsg from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter from dbt.events.helpers import env_secrets, scrub_secrets -from dbt.events.proto_types import EventInfo from dbt.events.types import EmptyLine import dbt.flags as flags from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing @@ -59,14 +58,14 @@ def _get_stdout_config(level: Optional[EventLevel] = None) -> LoggerConfig: def _stdout_filter( - log_cache_events: bool, debug_mode: bool, quiet_mode: bool, evt: BaseEvent + log_cache_events: bool, debug_mode: bool, quiet_mode: bool, msg: EventMsg ) -> bool: return ( - not isinstance(evt, NoStdOut) - and (not isinstance(evt, Cache) or log_cache_events) - and (evt.log_level() != EventLevel.DEBUG or debug_mode) - and (evt.log_level() == EventLevel.ERROR or not quiet_mode) - and not (flags.LOG_FORMAT == "json" and type(evt) == EmptyLine) + not isinstance(msg.data, NoStdOut) + and (not isinstance(msg.data, Cache) or log_cache_events) + and (EventLevel(msg.info.level) != EventLevel.DEBUG or debug_mode) + and (EventLevel(msg.info.level) == EventLevel.ERROR or not quiet_mode) + and not (flags.LOG_FORMAT == "json" and type(msg.data) == EmptyLine) ) @@ -82,18 +81,18 @@ def _get_logfile_config(log_path: str) -> LoggerConfig: ) -def _logfile_filter(log_cache_events: bool, evt: BaseEvent) -> bool: +def _logfile_filter(log_cache_events: bool, msg: EventMsg) -> bool: return ( - not isinstance(evt, NoFile) - and not (isinstance(evt, Cache) and not log_cache_events) - and not (flags.LOG_FORMAT == "json" and type(evt) == EmptyLine) + not isinstance(msg.data, NoFile) + and not (isinstance(msg.data, Cache) and not log_cache_events) + and not (flags.LOG_FORMAT == "json" and type(msg.data) == EmptyLine) ) def _get_logbook_log_config(level: Optional[EventLevel] = None) -> LoggerConfig: config = _get_stdout_config(level) config.name = "logbook_log" - config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e, Cache) + config.filter = NoFilter if flags.LOG_CACHE_EVENTS else lambda e: not isinstance(e.data, Cache) config.logger = GLOBAL_LOGGER return config @@ -138,23 +137,27 @@ def stop_capture_stdout_logs(): # returns a dictionary representation of the event fields. # the message may contain secrets which must be scrubbed at the usage site. -def event_to_json(event: BaseEvent) -> str: - event_dict = event_to_dict(event) - raw_log_line = json.dumps(event_dict, sort_keys=True) +def msg_to_json(msg: EventMsg) -> str: + msg_dict = msg_to_dict(msg) + raw_log_line = json.dumps(msg_dict, sort_keys=True) return raw_log_line -def event_to_dict(event: BaseEvent) -> dict: - event_dict = dict() +def msg_to_dict(msg: EventMsg) -> dict: + msg_dict = dict() try: - event_dict = event.to_dict(casing=betterproto.Casing.SNAKE, include_default_values=True) # type: ignore + msg_dict = msg.to_dict(casing=betterproto.Casing.SNAKE, include_default_values=True) # type: ignore except AttributeError as exc: - event_type = type(event).__name__ + event_type = type(msg).__name__ raise Exception(f"type {event_type} is not serializable. {str(exc)}") # We don't want an empty NodeInfo in output - if "node_info" in event_dict and event_dict["node_info"]["node_name"] == "": - del event_dict["node_info"] - return event_dict + if ( + "data" in msg_dict + and "node_info" in msg_dict["data"] + and msg_dict["data"]["node_info"]["node_name"] == "" + ): + del msg_dict["data"]["node_info"] + return msg_dict def warn_or_error(event, node=None): @@ -162,24 +165,26 @@ def warn_or_error(event, node=None): # TODO: resolve this circular import when at top from dbt.exceptions import EventCompilationException - raise EventCompilationException(event.info.msg, node) + raise EventCompilationException(event.message(), node) else: fire_event(event) # an alternative to fire_event which only creates and logs the event value # if the condition is met. Does nothing otherwise. -def fire_event_if(conditional: bool, lazy_e: Callable[[], BaseEvent]) -> None: +def fire_event_if( + conditional: bool, lazy_e: Callable[[], BaseEvent], level: EventLevel = None +) -> None: if conditional: - fire_event(lazy_e()) + fire_event(lazy_e(), level=level) # top-level method for accessing the new eventing system # this is where all the side effects happen branched by event type # (i.e. - mutating the event history, printing to stdout, logging # to files, etc.) -def fire_event(e: BaseEvent) -> None: - EVENT_MANAGER.fire_event(e) +def fire_event(e: BaseEvent, level: EventLevel = None) -> None: + EVENT_MANAGER.fire_event(e, level=level) def get_metadata_vars() -> Dict[str, str]: @@ -206,11 +211,3 @@ def set_invocation_id() -> None: # This is primarily for setting the invocation_id for separate # commands in the dbt servers. It shouldn't be necessary for the CLI. EVENT_MANAGER.invocation_id = str(uuid.uuid4()) - - -# Currently used to set the level in EventInfo, so logging events can -# provide more than one "level". Might be used in the future to set -# more fields in EventInfo, once some of that information is no longer global -def info(level="info"): - info = EventInfo(level=level) - return info diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index bd886243295..124139b1db8 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -91,1974 +91,3041 @@ class GenericMessage(betterproto.Message): class MainReportVersion(betterproto.Message): """A001""" + version: str = betterproto.string_field(1) + log_version: int = betterproto.int32_field(2) + + +@dataclass +class MainReportVersionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - version: str = betterproto.string_field(2) - log_version: int = betterproto.int32_field(3) + data: "MainReportVersion" = betterproto.message_field(2) @dataclass class MainReportArgs(betterproto.Message): """A002""" - info: "EventInfo" = betterproto.message_field(1) args: Dict[str, str] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_STRING + 1, betterproto.TYPE_STRING, betterproto.TYPE_STRING ) +@dataclass +class MainReportArgsMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "MainReportArgs" = betterproto.message_field(2) + + @dataclass class MainTrackingUserState(betterproto.Message): """A003""" + user_state: str = betterproto.string_field(1) + + +@dataclass +class MainTrackingUserStateMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - user_state: str = betterproto.string_field(2) + data: "MainTrackingUserState" = betterproto.message_field(2) @dataclass class MergedFromState(betterproto.Message): """A004""" + num_merged: int = betterproto.int32_field(1) + sample: List[str] = betterproto.string_field(2) + + +@dataclass +class MergedFromStateMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - num_merged: int = betterproto.int32_field(2) - sample: List[str] = betterproto.string_field(3) + data: "MergedFromState" = betterproto.message_field(2) @dataclass class MissingProfileTarget(betterproto.Message): """A005""" + profile_name: str = betterproto.string_field(1) + target_name: str = betterproto.string_field(2) + + +@dataclass +class MissingProfileTargetMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - profile_name: str = betterproto.string_field(2) - target_name: str = betterproto.string_field(3) + data: "MissingProfileTarget" = betterproto.message_field(2) @dataclass class InvalidVarsYAML(betterproto.Message): """A008""" + pass + + +@dataclass +class InvalidVarsYAMLMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "InvalidVarsYAML" = betterproto.message_field(2) @dataclass class DbtProjectError(betterproto.Message): """A009""" + pass + + +@dataclass +class DbtProjectErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DbtProjectError" = betterproto.message_field(2) @dataclass class DbtProjectErrorException(betterproto.Message): """A010""" + exc: str = betterproto.string_field(1) + + +@dataclass +class DbtProjectErrorExceptionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "DbtProjectErrorException" = betterproto.message_field(2) @dataclass class DbtProfileError(betterproto.Message): """A011""" + pass + + +@dataclass +class DbtProfileErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DbtProfileError" = betterproto.message_field(2) @dataclass class DbtProfileErrorException(betterproto.Message): """A012""" + exc: str = betterproto.string_field(1) + + +@dataclass +class DbtProfileErrorExceptionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "DbtProfileErrorException" = betterproto.message_field(2) @dataclass class ProfileListTitle(betterproto.Message): """A013""" + pass + + +@dataclass +class ProfileListTitleMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "ProfileListTitle" = betterproto.message_field(2) @dataclass class ListSingleProfile(betterproto.Message): """A014""" + profile: str = betterproto.string_field(1) + + +@dataclass +class ListSingleProfileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - profile: str = betterproto.string_field(2) + data: "ListSingleProfile" = betterproto.message_field(2) @dataclass class NoDefinedProfiles(betterproto.Message): """A015""" + pass + + +@dataclass +class NoDefinedProfilesMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "NoDefinedProfiles" = betterproto.message_field(2) @dataclass class ProfileHelpMessage(betterproto.Message): """A016""" + pass + + +@dataclass +class ProfileHelpMessageMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "ProfileHelpMessage" = betterproto.message_field(2) @dataclass class StarterProjectPath(betterproto.Message): """A017""" + dir: str = betterproto.string_field(1) + + +@dataclass +class StarterProjectPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dir: str = betterproto.string_field(2) + data: "StarterProjectPath" = betterproto.message_field(2) @dataclass class ConfigFolderDirectory(betterproto.Message): """A018""" + dir: str = betterproto.string_field(1) + + +@dataclass +class ConfigFolderDirectoryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dir: str = betterproto.string_field(2) + data: "ConfigFolderDirectory" = betterproto.message_field(2) @dataclass class NoSampleProfileFound(betterproto.Message): """A019""" + adapter: str = betterproto.string_field(1) + + +@dataclass +class NoSampleProfileFoundMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - adapter: str = betterproto.string_field(2) + data: "NoSampleProfileFound" = betterproto.message_field(2) @dataclass class ProfileWrittenWithSample(betterproto.Message): """A020""" + name: str = betterproto.string_field(1) + path: str = betterproto.string_field(2) + + +@dataclass +class ProfileWrittenWithSampleMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - path: str = betterproto.string_field(3) + data: "ProfileWrittenWithSample" = betterproto.message_field(2) @dataclass class ProfileWrittenWithTargetTemplateYAML(betterproto.Message): """A021""" + name: str = betterproto.string_field(1) + path: str = betterproto.string_field(2) + + +@dataclass +class ProfileWrittenWithTargetTemplateYAMLMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - path: str = betterproto.string_field(3) + data: "ProfileWrittenWithTargetTemplateYAMLMsg" = betterproto.message_field(2) @dataclass class ProfileWrittenWithProjectTemplateYAML(betterproto.Message): """A022""" + name: str = betterproto.string_field(1) + path: str = betterproto.string_field(2) + + +@dataclass +class ProfileWrittenWithProjectTemplateYAMLMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) - path: str = betterproto.string_field(3) + data: "ProfileWrittenWithProjectTemplateYAML" = betterproto.message_field(2) @dataclass class SettingUpProfile(betterproto.Message): """A023""" + pass + + +@dataclass +class SettingUpProfileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "SettingUpProfile" = betterproto.message_field(2) @dataclass class InvalidProfileTemplateYAML(betterproto.Message): """A024""" + pass + + +@dataclass +class InvalidProfileTemplateYAMLMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "InvalidProfileTemplateYAML" = betterproto.message_field(2) @dataclass class ProjectNameAlreadyExists(betterproto.Message): """A025""" + name: str = betterproto.string_field(1) + + +@dataclass +class ProjectNameAlreadyExistsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) + data: "ProjectNameAlreadyExists" = betterproto.message_field(2) @dataclass class ProjectCreated(betterproto.Message): """A026""" + project_name: str = betterproto.string_field(1) + docs_url: str = betterproto.string_field(2) + slack_url: str = betterproto.string_field(3) + + +@dataclass +class ProjectCreatedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - project_name: str = betterproto.string_field(2) - docs_url: str = betterproto.string_field(3) - slack_url: str = betterproto.string_field(4) + data: "ProjectCreated" = betterproto.message_field(2) @dataclass class PackageRedirectDeprecation(betterproto.Message): """D001""" + old_name: str = betterproto.string_field(1) + new_name: str = betterproto.string_field(2) + + +@dataclass +class PackageRedirectDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - old_name: str = betterproto.string_field(2) - new_name: str = betterproto.string_field(3) + data: "PackageRedirectDeprecation" = betterproto.message_field(2) @dataclass class PackageInstallPathDeprecation(betterproto.Message): """D002""" + pass + + +@dataclass +class PackageInstallPathDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "PackageInstallPathDeprecation" = betterproto.message_field(2) @dataclass class ConfigSourcePathDeprecation(betterproto.Message): """D003""" + deprecated_path: str = betterproto.string_field(1) + exp_path: str = betterproto.string_field(2) + + +@dataclass +class ConfigSourcePathDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - deprecated_path: str = betterproto.string_field(2) - exp_path: str = betterproto.string_field(3) + data: "ConfigSourcePathDeprecation" = betterproto.message_field(2) @dataclass class ConfigDataPathDeprecation(betterproto.Message): """D004""" + deprecated_path: str = betterproto.string_field(1) + exp_path: str = betterproto.string_field(2) + + +@dataclass +class ConfigDataPathDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - deprecated_path: str = betterproto.string_field(2) - exp_path: str = betterproto.string_field(3) + data: "ConfigDataPathDeprecation" = betterproto.message_field(2) @dataclass class AdapterDeprecationWarning(betterproto.Message): """D005""" + old_name: str = betterproto.string_field(1) + new_name: str = betterproto.string_field(2) + + +@dataclass +class AdapterDeprecationWarningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - old_name: str = betterproto.string_field(2) - new_name: str = betterproto.string_field(3) + data: "AdapterDeprecationWarning" = betterproto.message_field(2) @dataclass class MetricAttributesRenamed(betterproto.Message): """D006""" + metric_name: str = betterproto.string_field(1) + + +@dataclass +class MetricAttributesRenamedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - metric_name: str = betterproto.string_field(2) + data: "MetricAttributesRenamed" = betterproto.message_field(2) @dataclass class ExposureNameDeprecation(betterproto.Message): """D007""" + exposure: str = betterproto.string_field(1) + + +@dataclass +class ExposureNameDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exposure: str = betterproto.string_field(2) + data: "ExposureNameDeprecation" = betterproto.message_field(2) @dataclass class AdapterEventDebug(betterproto.Message): """E001""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + base_msg: str = betterproto.string_field(3) + args: List[str] = betterproto.string_field(4) + + +@dataclass +class AdapterEventDebugMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - base_msg: str = betterproto.string_field(4) - args: List[str] = betterproto.string_field(5) + data: "AdapterEventDebug" = betterproto.message_field(2) @dataclass class AdapterEventInfo(betterproto.Message): """E002""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + base_msg: str = betterproto.string_field(3) + args: List[str] = betterproto.string_field(4) + + +@dataclass +class AdapterEventInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - base_msg: str = betterproto.string_field(4) - args: List[str] = betterproto.string_field(5) + data: "AdapterEventInfo" = betterproto.message_field(2) @dataclass class AdapterEventWarning(betterproto.Message): """E003""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + base_msg: str = betterproto.string_field(3) + args: List[str] = betterproto.string_field(4) + + +@dataclass +class AdapterEventWarningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - base_msg: str = betterproto.string_field(4) - args: List[str] = betterproto.string_field(5) + data: "AdapterEventWarning" = betterproto.message_field(2) @dataclass class AdapterEventError(betterproto.Message): """E004""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + base_msg: str = betterproto.string_field(3) + args: List[str] = betterproto.string_field(4) + exc_info: str = betterproto.string_field(5) + + +@dataclass +class AdapterEventErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - base_msg: str = betterproto.string_field(4) - args: List[str] = betterproto.string_field(5) - exc_info: str = betterproto.string_field(6) + data: "AdapterEventError" = betterproto.message_field(2) @dataclass class NewConnection(betterproto.Message): """E005""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_type: str = betterproto.string_field(2) + conn_name: str = betterproto.string_field(3) + + +@dataclass +class NewConnectionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_type: str = betterproto.string_field(3) - conn_name: str = betterproto.string_field(4) + data: "NewConnection" = betterproto.message_field(2) @dataclass class ConnectionReused(betterproto.Message): """E006""" + conn_name: str = betterproto.string_field(1) + + +@dataclass +class ConnectionReusedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + data: "ConnectionReused" = betterproto.message_field(2) @dataclass class ConnectionLeftOpenInCleanup(betterproto.Message): """E007""" + conn_name: str = betterproto.string_field(1) + + +@dataclass +class ConnectionLeftOpenInCleanupMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + data: "ConnectionLeftOpen" = betterproto.message_field(2) @dataclass class ConnectionClosedInCleanup(betterproto.Message): """E008""" + conn_name: str = betterproto.string_field(1) + + +@dataclass +class ConnectionClosedInCleanupMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + data: "ConnectionClosedInCleanup" = betterproto.message_field(2) @dataclass class RollbackFailed(betterproto.Message): """E009""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + exc_info: str = betterproto.string_field(3) + + +@dataclass +class RollbackFailedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) - exc_info: str = betterproto.string_field(4) + data: "RollbackFailed" = betterproto.message_field(2) @dataclass class ConnectionClosed(betterproto.Message): """E010""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + + +@dataclass +class ConnectionClosedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) + data: "ConnectionClosed" = betterproto.message_field(2) @dataclass class ConnectionLeftOpen(betterproto.Message): """E011""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + + +@dataclass +class ConnectionLeftOpenMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) + data: "ConnectionLeftOpen" = betterproto.message_field(2) @dataclass class Rollback(betterproto.Message): """E012""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + + +@dataclass +class RollbackMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) + data: "Rollback" = betterproto.message_field(2) @dataclass class CacheMiss(betterproto.Message): """E013""" + conn_name: str = betterproto.string_field(1) + database: str = betterproto.string_field(2) + schema: str = betterproto.string_field(3) + + +@dataclass +class CacheMissMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - database: str = betterproto.string_field(3) - schema: str = betterproto.string_field(4) + data: "CacheMiss" = betterproto.message_field(2) @dataclass class ListRelations(betterproto.Message): """E014""" + database: str = betterproto.string_field(1) + schema: str = betterproto.string_field(2) + relations: List["ReferenceKeyMsg"] = betterproto.message_field(3) + + +@dataclass +class ListRelationsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - database: str = betterproto.string_field(2) - schema: str = betterproto.string_field(3) - relations: List["ReferenceKeyMsg"] = betterproto.message_field(4) + data: "ListRelations" = betterproto.message_field(2) @dataclass class ConnectionUsed(betterproto.Message): """E015""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_type: str = betterproto.string_field(2) + conn_name: str = betterproto.string_field(3) + + +@dataclass +class ConnectionUsedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_type: str = betterproto.string_field(3) - conn_name: str = betterproto.string_field(4) + data: "ConnectionUsed" = betterproto.message_field(2) @dataclass class SQLQuery(betterproto.Message): """E016""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + sql: str = betterproto.string_field(3) + + +@dataclass +class SQLQueryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) - sql: str = betterproto.string_field(4) + data: "SQLQuery" = betterproto.message_field(2) @dataclass class SQLQueryStatus(betterproto.Message): """E017""" + node_info: "NodeInfo" = betterproto.message_field(1) + status: str = betterproto.string_field(2) + elapsed: float = betterproto.float_field(3) + + +@dataclass +class SQLQueryStatusMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - status: str = betterproto.string_field(3) - elapsed: float = betterproto.float_field(4) + data: "SQLQueryStatus" = betterproto.message_field(2) @dataclass class SQLCommit(betterproto.Message): """E018""" + node_info: "NodeInfo" = betterproto.message_field(1) + conn_name: str = betterproto.string_field(2) + + +@dataclass +class SQLCommitMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - conn_name: str = betterproto.string_field(3) + data: "SQLCommit" = betterproto.message_field(2) @dataclass class ColTypeChange(betterproto.Message): """E019""" + orig_type: str = betterproto.string_field(1) + new_type: str = betterproto.string_field(2) + table: "ReferenceKeyMsg" = betterproto.message_field(3) + + +@dataclass +class ColTypeChangeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - orig_type: str = betterproto.string_field(2) - new_type: str = betterproto.string_field(3) - table: "ReferenceKeyMsg" = betterproto.message_field(4) + data: "ColTypeChange" = betterproto.message_field(2) @dataclass class SchemaCreation(betterproto.Message): """E020""" + relation: "ReferenceKeyMsg" = betterproto.message_field(1) + + +@dataclass +class SchemaCreationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - relation: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "SchemaCreation" = betterproto.message_field(2) @dataclass class SchemaDrop(betterproto.Message): """E021""" + relation: "ReferenceKeyMsg" = betterproto.message_field(1) + + +@dataclass +class SchemaDropMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - relation: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "SchemaDrop" = betterproto.message_field(2) @dataclass class UncachedRelation(betterproto.Message): """E022""" + dep_key: "ReferenceKeyMsg" = betterproto.message_field(1) + ref_key: "ReferenceKeyMsg" = betterproto.message_field(2) + + +@dataclass +class UncachedRelationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dep_key: "ReferenceKeyMsg" = betterproto.message_field(2) - ref_key: "ReferenceKeyMsg" = betterproto.message_field(3) + data: "UncachedRelation" = betterproto.message_field(2) @dataclass class AddLink(betterproto.Message): """E023""" + dep_key: "ReferenceKeyMsg" = betterproto.message_field(1) + ref_key: "ReferenceKeyMsg" = betterproto.message_field(2) + + +@dataclass +class AddLinkMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dep_key: "ReferenceKeyMsg" = betterproto.message_field(2) - ref_key: "ReferenceKeyMsg" = betterproto.message_field(3) + data: "AddLink" = betterproto.message_field(2) @dataclass class AddRelation(betterproto.Message): """E024""" + relation: "ReferenceKeyMsg" = betterproto.message_field(1) + + +@dataclass +class AddRelationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - relation: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "AddRelation" = betterproto.message_field(2) @dataclass class DropMissingRelation(betterproto.Message): """E025""" + relation: "ReferenceKeyMsg" = betterproto.message_field(1) + + +@dataclass +class DropMissingRelationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - relation: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "DropMissingRelation" = betterproto.message_field(2) @dataclass class DropCascade(betterproto.Message): """E026""" + dropped: "ReferenceKeyMsg" = betterproto.message_field(1) + consequences: List["ReferenceKeyMsg"] = betterproto.message_field(2) + + +@dataclass +class DropCascadeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dropped: "ReferenceKeyMsg" = betterproto.message_field(2) - consequences: List["ReferenceKeyMsg"] = betterproto.message_field(3) + data: "DropCascade" = betterproto.message_field(2) @dataclass class DropRelation(betterproto.Message): """E027""" + dropped: "ReferenceKeyMsg" = betterproto.message_field(1) + + +@dataclass +class DropRelationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dropped: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "DropRelation" = betterproto.message_field(2) @dataclass class UpdateReference(betterproto.Message): """E028""" + old_key: "ReferenceKeyMsg" = betterproto.message_field(1) + new_key: "ReferenceKeyMsg" = betterproto.message_field(2) + cached_key: "ReferenceKeyMsg" = betterproto.message_field(3) + + +@dataclass +class UpdateReferenceMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - old_key: "ReferenceKeyMsg" = betterproto.message_field(2) - new_key: "ReferenceKeyMsg" = betterproto.message_field(3) - cached_key: "ReferenceKeyMsg" = betterproto.message_field(4) + data: "UpdateReference" = betterproto.message_field(2) @dataclass class TemporaryRelation(betterproto.Message): """E029""" + key: "ReferenceKeyMsg" = betterproto.message_field(1) + + +@dataclass +class TemporaryRelationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - key: "ReferenceKeyMsg" = betterproto.message_field(2) + data: "TemporaryRelation" = betterproto.message_field(2) @dataclass class RenameSchema(betterproto.Message): """E030""" + old_key: "ReferenceKeyMsg" = betterproto.message_field(1) + new_key: "ReferenceKeyMsg" = betterproto.message_field(2) + + +@dataclass +class RenameSchemaMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - old_key: "ReferenceKeyMsg" = betterproto.message_field(2) - new_key: "ReferenceKeyMsg" = betterproto.message_field(3) + data: "RenameSchema" = betterproto.message_field(2) @dataclass class DumpBeforeAddGraph(betterproto.Message): """E031""" - info: "EventInfo" = betterproto.message_field(1) dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE + 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE ) +@dataclass +class DumpBeforeAddGraphMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "DumpBeforeAddGraph" = betterproto.message_field(2) + + @dataclass class DumpAfterAddGraph(betterproto.Message): """E032""" - info: "EventInfo" = betterproto.message_field(1) dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE + 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE ) +@dataclass +class DumpAfterAddGraphMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "DumpAfterAddGraph" = betterproto.message_field(2) + + @dataclass class DumpBeforeRenameSchema(betterproto.Message): """E033""" - info: "EventInfo" = betterproto.message_field(1) dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE + 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE ) +@dataclass +class DumpBeforeRenameSchemaMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "DumpBeforeRenameSchema" = betterproto.message_field(2) + + @dataclass class DumpAfterRenameSchema(betterproto.Message): """E034""" - info: "EventInfo" = betterproto.message_field(1) dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE + 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE ) +@dataclass +class DumpAfterRenameSchemaMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "DumpAfterRenameSchema" = betterproto.message_field(2) + + @dataclass class AdapterImportError(betterproto.Message): """E035""" + exc: str = betterproto.string_field(1) + + +@dataclass +class AdapterImportErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "AdapterImportError" = betterproto.message_field(2) @dataclass class PluginLoadError(betterproto.Message): """E036""" + exc_info: str = betterproto.string_field(1) + + +@dataclass +class PluginLoadErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc_info: str = betterproto.string_field(2) + data: "PluginLoadError" = betterproto.message_field(2) @dataclass class NewConnectionOpening(betterproto.Message): """E037""" + node_info: "NodeInfo" = betterproto.message_field(1) + connection_state: str = betterproto.string_field(2) + + +@dataclass +class NewConnectionOpeningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - connection_state: str = betterproto.string_field(3) + data: "NewConnectionOpening" = betterproto.message_field(2) @dataclass class CodeExecution(betterproto.Message): """E038""" + conn_name: str = betterproto.string_field(1) + code_content: str = betterproto.string_field(2) + + +@dataclass +class CodeExecutionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) - code_content: str = betterproto.string_field(3) + data: "CodeExecution" = betterproto.message_field(2) @dataclass class CodeExecutionStatus(betterproto.Message): """E039""" + status: str = betterproto.string_field(1) + elapsed: float = betterproto.float_field(2) + + +@dataclass +class CodeExecutionStatusMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - status: str = betterproto.string_field(2) - elapsed: float = betterproto.float_field(3) + data: "CodeExecutionStatus" = betterproto.message_field(2) @dataclass class CatalogGenerationError(betterproto.Message): """E040""" - info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + exc: str = betterproto.string_field(1) @dataclass -class WriteCatalogFailure(betterproto.Message): - """E041""" - +class CatalogGenerationErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - num_exceptions: int = betterproto.int32_field(2) + data: "CatalogGenerationError" = betterproto.message_field(2) @dataclass -class CatalogWritten(betterproto.Message): - """E042""" +class WriteCatalogFailure(betterproto.Message): + """E041""" - info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + num_exceptions: int = betterproto.int32_field(1) @dataclass -class CannotGenerateDocs(betterproto.Message): - """E043""" - +class WriteCatalogFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "WriteCatalogFailure" = betterproto.message_field(2) @dataclass -class BuildingCatalog(betterproto.Message): - """E044""" +class CatalogWritten(betterproto.Message): + """E042""" - info: "EventInfo" = betterproto.message_field(1) + path: str = betterproto.string_field(1) @dataclass -class DatabaseErrorRunningHook(betterproto.Message): - """E045""" - +class CatalogWrittenMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - hook_type: str = betterproto.string_field(2) + data: "CatalogWritten" = betterproto.message_field(2) @dataclass -class HooksRunning(betterproto.Message): - """E046""" +class CannotGenerateDocs(betterproto.Message): + """E043""" - info: "EventInfo" = betterproto.message_field(1) - num_hooks: int = betterproto.int32_field(2) - hook_type: str = betterproto.string_field(3) + pass @dataclass -class HookFinished(betterproto.Message): - """E047""" - +class CannotGenerateDocsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - stat_line: str = betterproto.string_field(2) - execution: str = betterproto.string_field(3) - execution_time: float = betterproto.float_field(4) + data: "CannotGenerateDocs" = betterproto.message_field(2) @dataclass -class ParseCmdStart(betterproto.Message): - """I001""" +class BuildingCatalog(betterproto.Message): + """E044""" - info: "EventInfo" = betterproto.message_field(1) + pass @dataclass -class ParseCmdCompiling(betterproto.Message): - """I002""" - +class BuildingCatalogMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "BuildingCatalog" = betterproto.message_field(2) @dataclass -class ParseCmdWritingManifest(betterproto.Message): - """I003""" +class DatabaseErrorRunningHook(betterproto.Message): + """E045""" - info: "EventInfo" = betterproto.message_field(1) + hook_type: str = betterproto.string_field(1) @dataclass -class ParseCmdDone(betterproto.Message): - """I004""" - +class DatabaseErrorRunningHookMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DatabaseErrorRunningHook" = betterproto.message_field(2) @dataclass -class ManifestDependenciesLoaded(betterproto.Message): - """I005""" +class HooksRunning(betterproto.Message): + """E046""" - info: "EventInfo" = betterproto.message_field(1) + num_hooks: int = betterproto.int32_field(1) + hook_type: str = betterproto.string_field(2) @dataclass -class ManifestLoaderCreated(betterproto.Message): - """I006""" - +class HooksRunningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "HooksRunning" = betterproto.message_field(2) @dataclass -class ManifestLoaded(betterproto.Message): - """I007""" +class HookFinished(betterproto.Message): + """E047""" - info: "EventInfo" = betterproto.message_field(1) + stat_line: str = betterproto.string_field(1) + execution: str = betterproto.string_field(2) + execution_time: float = betterproto.float_field(3) @dataclass -class ManifestChecked(betterproto.Message): - """I008""" - +class HookFinishedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "HookFinished" = betterproto.message_field(2) @dataclass -class ManifestFlatGraphBuilt(betterproto.Message): - """I009""" +class ParseCmdOut(betterproto.Message): + """I001""" - info: "EventInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(1) @dataclass -class ParseCmdPerfInfoPath(betterproto.Message): - """I010""" - +class ParseCmdOutMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "ParseCmdOut" = betterproto.message_field(2) @dataclass class GenericTestFileParse(betterproto.Message): """I011""" + path: str = betterproto.string_field(1) + + +@dataclass +class GenericTestFileParseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "GenericTestFileParse" = betterproto.message_field(2) @dataclass class MacroFileParse(betterproto.Message): """I012""" + path: str = betterproto.string_field(1) + + +@dataclass +class MacroFileParseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "MacroFileParse" = betterproto.message_field(2) @dataclass class PartialParsingExceptionProcessingFile(betterproto.Message): """I014""" + file: str = betterproto.string_field(1) + + +@dataclass +class PartialParsingExceptionProcessingFileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - file: str = betterproto.string_field(2) + data: "PartialParsingExceptionProcessingFile" = betterproto.message_field(2) @dataclass class PartialParsingException(betterproto.Message): """I016""" - info: "EventInfo" = betterproto.message_field(1) exc_info: Dict[str, str] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_STRING + 1, betterproto.TYPE_STRING, betterproto.TYPE_STRING ) +@dataclass +class PartialParsingExceptionMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "PartialParsingException" = betterproto.message_field(2) + + @dataclass class PartialParsingSkipParsing(betterproto.Message): """I017""" + pass + + +@dataclass +class PartialParsingSkipParsingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "PartialParsingSkipParsing" = betterproto.message_field(2) @dataclass class UnableToPartialParse(betterproto.Message): """I024""" + reason: str = betterproto.string_field(1) + + +@dataclass +class UnableToPartialParseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - reason: str = betterproto.string_field(2) + data: "UnableToPartialParse" = betterproto.message_field(2) @dataclass class PartialParsingNotEnabled(betterproto.Message): """I028""" + pass + + +@dataclass +class PartialParsingNotEnabledMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "PartialParsingNotEnabled" = betterproto.message_field(2) @dataclass class ParsedFileLoadFailed(betterproto.Message): """I029""" + path: str = betterproto.string_field(1) + exc: str = betterproto.string_field(2) + exc_info: str = betterproto.string_field(3) + + +@dataclass +class ParsedFileLoadFailedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) - exc: str = betterproto.string_field(3) - exc_info: str = betterproto.string_field(4) + data: "ParsedFileLoadFailed" = betterproto.message_field(2) @dataclass class StaticParserCausedJinjaRendering(betterproto.Message): """I031""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParserCausedJinjaRenderingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParserCausedJinjaRendering" = betterproto.message_field(2) @dataclass class UsingExperimentalParser(betterproto.Message): """I032""" + path: str = betterproto.string_field(1) + + +@dataclass +class UsingExperimentalParserMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "UsingExperimentalParser" = betterproto.message_field(2) @dataclass class SampleFullJinjaRendering(betterproto.Message): """I033""" + path: str = betterproto.string_field(1) + + +@dataclass +class SampleFullJinjaRenderingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "SampleFullJinjaRendering" = betterproto.message_field(2) @dataclass class StaticParserFallbackJinjaRendering(betterproto.Message): """I034""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParserFallbackJinjaRenderingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParserFallbackJinjaRendering" = betterproto.message_field(2) @dataclass class StaticParsingMacroOverrideDetected(betterproto.Message): """I035""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParsingMacroOverrideDetectedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParsingMacroOverrideDetected" = betterproto.message_field(2) @dataclass class StaticParserSuccess(betterproto.Message): """I036""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParserSuccessMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParserSuccess" = betterproto.message_field(2) @dataclass class StaticParserFailure(betterproto.Message): """I037""" + path: str = betterproto.string_field(1) + + +@dataclass +class StaticParserFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "StaticParserFailure" = betterproto.message_field(2) @dataclass class ExperimentalParserSuccess(betterproto.Message): """I038""" + path: str = betterproto.string_field(1) + + +@dataclass +class ExperimentalParserSuccessMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "ExperimentalParserSuccess" = betterproto.message_field(2) @dataclass class ExperimentalParserFailure(betterproto.Message): """I039""" + path: str = betterproto.string_field(1) + + +@dataclass +class ExperimentalParserFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "ExperimentalParserFailure" = betterproto.message_field(2) @dataclass class PartialParsingEnabled(betterproto.Message): """I040""" + deleted: int = betterproto.int32_field(1) + added: int = betterproto.int32_field(2) + changed: int = betterproto.int32_field(3) + + +@dataclass +class PartialParsingEnabledMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - deleted: int = betterproto.int32_field(2) - added: int = betterproto.int32_field(3) - changed: int = betterproto.int32_field(4) + data: "PartialParsingEnabled" = betterproto.message_field(2) @dataclass class PartialParsingFile(betterproto.Message): """I041""" + file_id: str = betterproto.string_field(1) + operation: str = betterproto.string_field(2) + + +@dataclass +class PartialParsingFileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - file_id: str = betterproto.string_field(2) - operation: str = betterproto.string_field(3) + data: "PartialParsingFile" = betterproto.message_field(2) @dataclass class InvalidDisabledTargetInTestNode(betterproto.Message): """I050""" + resource_type_title: str = betterproto.string_field(1) + unique_id: str = betterproto.string_field(2) + original_file_path: str = betterproto.string_field(3) + target_kind: str = betterproto.string_field(4) + target_name: str = betterproto.string_field(5) + target_package: str = betterproto.string_field(6) + + +@dataclass +class InvalidDisabledTargetInTestNodeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - resource_type_title: str = betterproto.string_field(2) - unique_id: str = betterproto.string_field(3) - original_file_path: str = betterproto.string_field(4) - target_kind: str = betterproto.string_field(5) - target_name: str = betterproto.string_field(6) - target_package: str = betterproto.string_field(7) + data: "InvalidDisabledTargetInTestNode" = betterproto.message_field(2) @dataclass class UnusedResourceConfigPath(betterproto.Message): """I051""" + unused_config_paths: List[str] = betterproto.string_field(1) + + +@dataclass +class UnusedResourceConfigPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - unused_config_paths: List[str] = betterproto.string_field(2) + data: "UnusedResourceConfigPath" = betterproto.message_field(2) @dataclass class SeedIncreased(betterproto.Message): """I052""" + package_name: str = betterproto.string_field(1) + name: str = betterproto.string_field(2) + + +@dataclass +class SeedIncreasedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) - name: str = betterproto.string_field(3) + data: "SeedIncreased" = betterproto.message_field(2) @dataclass class SeedExceedsLimitSamePath(betterproto.Message): """I053""" + package_name: str = betterproto.string_field(1) + name: str = betterproto.string_field(2) + + +@dataclass +class SeedExceedsLimitSamePathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) - name: str = betterproto.string_field(3) + data: "SeedExceedsLimitSamePath" = betterproto.message_field(2) @dataclass class SeedExceedsLimitAndPathChanged(betterproto.Message): """I054""" + package_name: str = betterproto.string_field(1) + name: str = betterproto.string_field(2) + + +@dataclass +class SeedExceedsLimitAndPathChangedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) - name: str = betterproto.string_field(3) + data: "SeedExceedsLimitAndPathChanged" = betterproto.message_field(2) @dataclass class SeedExceedsLimitChecksumChanged(betterproto.Message): """I055""" + package_name: str = betterproto.string_field(1) + name: str = betterproto.string_field(2) + checksum_name: str = betterproto.string_field(3) + + +@dataclass +class SeedExceedsLimitChecksumChangedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) - name: str = betterproto.string_field(3) - checksum_name: str = betterproto.string_field(4) + data: "SeedExceedsLimitChecksumChanged" = betterproto.message_field(2) @dataclass class UnusedTables(betterproto.Message): """I056""" + unused_tables: List[str] = betterproto.string_field(1) + + +@dataclass +class UnusedTablesMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - unused_tables: List[str] = betterproto.string_field(2) + data: "UnusedTables" = betterproto.message_field(2) @dataclass class WrongResourceSchemaFile(betterproto.Message): """I057""" + patch_name: str = betterproto.string_field(1) + resource_type: str = betterproto.string_field(2) + plural_resource_type: str = betterproto.string_field(3) + yaml_key: str = betterproto.string_field(4) + file_path: str = betterproto.string_field(5) + + +@dataclass +class WrongResourceSchemaFileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - patch_name: str = betterproto.string_field(2) - resource_type: str = betterproto.string_field(3) - plural_resource_type: str = betterproto.string_field(4) - yaml_key: str = betterproto.string_field(5) - file_path: str = betterproto.string_field(6) + data: "WrongResourceSchemaFile" = betterproto.message_field(2) @dataclass class NoNodeForYamlKey(betterproto.Message): """I058""" + patch_name: str = betterproto.string_field(1) + yaml_key: str = betterproto.string_field(2) + file_path: str = betterproto.string_field(3) + + +@dataclass +class NoNodeForYamlKeyMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - patch_name: str = betterproto.string_field(2) - yaml_key: str = betterproto.string_field(3) - file_path: str = betterproto.string_field(4) + data: "NoNodeForYamlKey" = betterproto.message_field(2) @dataclass -class MacroPatchNotFound(betterproto.Message): +class MacroNotFoundForPatch(betterproto.Message): """I059""" + patch_name: str = betterproto.string_field(1) + + +@dataclass +class MacroNotFoundForPatchMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - patch_name: str = betterproto.string_field(2) + data: "MacroNotFoundForPatch" = betterproto.message_field(2) @dataclass class NodeNotFoundOrDisabled(betterproto.Message): """I060""" + original_file_path: str = betterproto.string_field(1) + unique_id: str = betterproto.string_field(2) + resource_type_title: str = betterproto.string_field(3) + target_name: str = betterproto.string_field(4) + target_kind: str = betterproto.string_field(5) + target_package: str = betterproto.string_field(6) + disabled: str = betterproto.string_field(7) + + +@dataclass +class NodeNotFoundOrDisabledMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - original_file_path: str = betterproto.string_field(2) - unique_id: str = betterproto.string_field(3) - resource_type_title: str = betterproto.string_field(4) - target_name: str = betterproto.string_field(5) - target_kind: str = betterproto.string_field(6) - target_package: str = betterproto.string_field(7) - disabled: str = betterproto.string_field(8) + data: "NodeNotFoundOrDisabled" = betterproto.message_field(2) @dataclass class JinjaLogWarning(betterproto.Message): """I061""" + node_info: "NodeInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(2) + + +@dataclass +class JinjaLogWarningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - msg: str = betterproto.string_field(3) + data: "JinjaLogWarning" = betterproto.message_field(2) @dataclass class GitSparseCheckoutSubdirectory(betterproto.Message): """M001""" + subdir: str = betterproto.string_field(1) + + +@dataclass +class GitSparseCheckoutSubdirectoryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - subdir: str = betterproto.string_field(2) + data: "GitSparseCheckoutSubdirectory" = betterproto.message_field(2) @dataclass class GitProgressCheckoutRevision(betterproto.Message): """M002""" + revision: str = betterproto.string_field(1) + + +@dataclass +class GitProgressCheckoutRevisionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - revision: str = betterproto.string_field(2) + data: "GitProgressCheckoutRevision" = betterproto.message_field(2) @dataclass class GitProgressUpdatingExistingDependency(betterproto.Message): """M003""" + dir: str = betterproto.string_field(1) + + +@dataclass +class GitProgressUpdatingExistingDependencyMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dir: str = betterproto.string_field(2) + data: "GitProgressUpdatingExistingDependency" = betterproto.message_field(2) @dataclass class GitProgressPullingNewDependency(betterproto.Message): """M004""" + dir: str = betterproto.string_field(1) + + +@dataclass +class GitProgressPullingNewDependencyMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - dir: str = betterproto.string_field(2) + data: "GitProgressPullingNewDependency" = betterproto.message_field(2) @dataclass class GitNothingToDo(betterproto.Message): """M005""" + sha: str = betterproto.string_field(1) + + +@dataclass +class GitNothingToDoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - sha: str = betterproto.string_field(2) + data: "GitNothingToDo" = betterproto.message_field(2) @dataclass class GitProgressUpdatedCheckoutRange(betterproto.Message): """M006""" + start_sha: str = betterproto.string_field(1) + end_sha: str = betterproto.string_field(2) + + +@dataclass +class GitProgressUpdatedCheckoutRangeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - start_sha: str = betterproto.string_field(2) - end_sha: str = betterproto.string_field(3) + data: "GitProgressUpdatedCheckoutRange" = betterproto.message_field(2) @dataclass class GitProgressCheckedOutAt(betterproto.Message): """M007""" + end_sha: str = betterproto.string_field(1) + + +@dataclass +class GitProgressCheckedOutAtMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - end_sha: str = betterproto.string_field(2) + data: "GitProgressCheckedOutAt" = betterproto.message_field(2) @dataclass class RegistryProgressGETRequest(betterproto.Message): """M008""" + url: str = betterproto.string_field(1) + + +@dataclass +class RegistryProgressGETRequestMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - url: str = betterproto.string_field(2) + data: "RegistryProgressGETRequest" = betterproto.message_field(2) @dataclass class RegistryProgressGETResponse(betterproto.Message): """M009""" + url: str = betterproto.string_field(1) + resp_code: int = betterproto.int32_field(2) + + +@dataclass +class RegistryProgressGETResponseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - url: str = betterproto.string_field(2) - resp_code: int = betterproto.int32_field(3) + data: "RegistryProgressGETResponse" = betterproto.message_field(2) @dataclass class SelectorReportInvalidSelector(betterproto.Message): """M010""" + valid_selectors: str = betterproto.string_field(1) + spec_method: str = betterproto.string_field(2) + raw_spec: str = betterproto.string_field(3) + + +@dataclass +class SelectorReportInvalidSelectorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - valid_selectors: str = betterproto.string_field(2) - spec_method: str = betterproto.string_field(3) - raw_spec: str = betterproto.string_field(4) + data: "SelectorReportInvalidSelector" = betterproto.message_field(2) @dataclass class JinjaLogInfo(betterproto.Message): """M011""" + node_info: "NodeInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(2) + + +@dataclass +class JinjaLogInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - msg: str = betterproto.string_field(3) + data: "JinjaLogInfo" = betterproto.message_field(2) @dataclass class JinjaLogDebug(betterproto.Message): """M012""" + node_info: "NodeInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(2) + + +@dataclass +class JinjaLogDebugMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - msg: str = betterproto.string_field(3) + data: "JinjaLogDebug" = betterproto.message_field(2) @dataclass class DepsNoPackagesFound(betterproto.Message): """M013""" + pass + + +@dataclass +class DepsNoPackagesFoundMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DepsNoPackagesFound" = betterproto.message_field(2) @dataclass class DepsStartPackageInstall(betterproto.Message): """M014""" + package_name: str = betterproto.string_field(1) + + +@dataclass +class DepsStartPackageInstallMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - package_name: str = betterproto.string_field(2) + data: "DepsStartPackageInstall" = betterproto.message_field(2) @dataclass class DepsInstallInfo(betterproto.Message): """M015""" + version_name: str = betterproto.string_field(1) + + +@dataclass +class DepsInstallInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - version_name: str = betterproto.string_field(2) + data: "DepsInstallInfo" = betterproto.message_field(2) @dataclass class DepsUpdateAvailable(betterproto.Message): """M016""" + version_latest: str = betterproto.string_field(1) + + +@dataclass +class DepsUpdateAvailableMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - version_latest: str = betterproto.string_field(2) + data: "DepsUpdateAvailable" = betterproto.message_field(2) @dataclass class DepsUpToDate(betterproto.Message): """M017""" + pass + + +@dataclass +class DepsUpToDateMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DepsUpToDate" = betterproto.message_field(2) @dataclass class DepsListSubdirectory(betterproto.Message): """M018""" + subdirectory: str = betterproto.string_field(1) + + +@dataclass +class DepsListSubdirectoryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - subdirectory: str = betterproto.string_field(2) + data: "DepsListSubdirectory" = betterproto.message_field(2) @dataclass class DepsNotifyUpdatesAvailable(betterproto.Message): """M019""" + packages: "ListOfStrings" = betterproto.message_field(1) + + +@dataclass +class DepsNotifyUpdatesAvailableMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - packages: "ListOfStrings" = betterproto.message_field(2) + data: "DepsNotifyUpdatesAvailable" = betterproto.message_field(2) @dataclass class RetryExternalCall(betterproto.Message): """M020""" + attempt: int = betterproto.int32_field(1) + max: int = betterproto.int32_field(2) + + +@dataclass +class RetryExternalCallMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - attempt: int = betterproto.int32_field(2) - max: int = betterproto.int32_field(3) + data: "RetryExternalCall" = betterproto.message_field(2) @dataclass class RecordRetryException(betterproto.Message): """M021""" + exc: str = betterproto.string_field(1) + + +@dataclass +class RecordRetryExceptionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "RecordRetryException" = betterproto.message_field(2) @dataclass class RegistryIndexProgressGETRequest(betterproto.Message): """M022""" + url: str = betterproto.string_field(1) + + +@dataclass +class RegistryIndexProgressGETRequestMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - url: str = betterproto.string_field(2) + data: "RegistryIndexProgressGETRequest" = betterproto.message_field(2) @dataclass class RegistryIndexProgressGETResponse(betterproto.Message): """M023""" + url: str = betterproto.string_field(1) + resp_code: int = betterproto.int32_field(2) + + +@dataclass +class RegistryIndexProgressGETResponseMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - url: str = betterproto.string_field(2) - resp_code: int = betterproto.int32_field(3) + data: "RegistryIndexProgressGETResponse" = betterproto.message_field(2) @dataclass class RegistryResponseUnexpectedType(betterproto.Message): """M024""" + response: str = betterproto.string_field(1) + + +@dataclass +class RegistryResponseUnexpectedTypeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - response: str = betterproto.string_field(2) + data: "RegistryResponseUnexpectedType" = betterproto.message_field(2) @dataclass class RegistryResponseMissingTopKeys(betterproto.Message): """M025""" + response: str = betterproto.string_field(1) + + +@dataclass +class RegistryResponseMissingTopKeysMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - response: str = betterproto.string_field(2) + data: "RegistryResponseMissingTopKeys" = betterproto.message_field(2) @dataclass class RegistryResponseMissingNestedKeys(betterproto.Message): """M026""" + response: str = betterproto.string_field(1) + + +@dataclass +class RegistryResponseMissingNestedKeysMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - response: str = betterproto.string_field(2) + data: "RegistryResponseMissingNestedKeys" = betterproto.message_field(2) @dataclass class RegistryResponseExtraNestedKeys(betterproto.Message): """m027""" + response: str = betterproto.string_field(1) + + +@dataclass +class RegistryResponseExtraNestedKeysMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - response: str = betterproto.string_field(2) + data: "RegistryResponseExtraNestedKeys" = betterproto.message_field(2) @dataclass class DepsSetDownloadDirectory(betterproto.Message): """M028""" + path: str = betterproto.string_field(1) + + +@dataclass +class DepsSetDownloadDirectoryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "DepsSetDownloadDirectory" = betterproto.message_field(2) @dataclass class DepsUnpinned(betterproto.Message): """M029""" + revision: str = betterproto.string_field(1) + git: str = betterproto.string_field(2) + + +@dataclass +class DepsUnpinnedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - revision: str = betterproto.string_field(2) - git: str = betterproto.string_field(3) + data: "DepsUnpinned" = betterproto.message_field(2) @dataclass class NoNodesForSelectionCriteria(betterproto.Message): """M030""" + spec_raw: str = betterproto.string_field(1) + + +@dataclass +class NoNodesForSelectionCriteriaMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - spec_raw: str = betterproto.string_field(2) + data: "NoNodesForSelectionCriteria" = betterproto.message_field(2) @dataclass class RunningOperationCaughtError(betterproto.Message): """Q001""" + exc: str = betterproto.string_field(1) + + +@dataclass +class RunningOperationCaughtErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "RunningOperationCaughtError" = betterproto.message_field(2) @dataclass class CompileComplete(betterproto.Message): """Q002""" + pass + + +@dataclass +class CompileCompleteMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "CompileComplete" = betterproto.message_field(2) @dataclass class FreshnessCheckComplete(betterproto.Message): """Q003""" + pass + + +@dataclass +class FreshnessCheckCompleteMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "FreshnessCheckComplete" = betterproto.message_field(2) @dataclass class SeedHeader(betterproto.Message): """Q004""" + header: str = betterproto.string_field(1) + + +@dataclass +class SeedHeaderMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - header: str = betterproto.string_field(2) + data: "SeedHeader" = betterproto.message_field(2) @dataclass class SeedHeaderSeparator(betterproto.Message): """Q005""" + len_header: int = betterproto.int32_field(1) + + +@dataclass +class SeedHeaderSeparatorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - len_header: int = betterproto.int32_field(2) + data: "SeedHeaderSeparator" = betterproto.message_field(2) @dataclass class SQLRunnerException(betterproto.Message): """Q006""" + exc: str = betterproto.string_field(1) + exc_info: str = betterproto.string_field(2) + + +@dataclass +class SQLRunnerExceptionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) - exc_info: str = betterproto.string_field(3) + data: "SQLRunnerException" = betterproto.message_field(2) @dataclass class LogTestResult(betterproto.Message): """Q007""" + node_info: "NodeInfo" = betterproto.message_field(1) + name: str = betterproto.string_field(2) + status: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + num_models: int = betterproto.int32_field(5) + execution_time: float = betterproto.float_field(6) + num_failures: int = betterproto.int32_field(7) + + +@dataclass +class LogTestResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - name: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - num_models: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - num_failures: int = betterproto.int32_field(8) + data: "LogTestResult" = betterproto.message_field(2) @dataclass class LogStartLine(betterproto.Message): """Q011""" + node_info: "NodeInfo" = betterproto.message_field(1) + description: str = betterproto.string_field(2) + index: int = betterproto.int32_field(3) + total: int = betterproto.int32_field(4) + + +@dataclass +class LogStartLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) + data: "LogStartLine" = betterproto.message_field(2) @dataclass class LogModelResult(betterproto.Message): """Q012""" + node_info: "NodeInfo" = betterproto.message_field(1) + description: str = betterproto.string_field(2) + status: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + total: int = betterproto.int32_field(5) + execution_time: int = betterproto.int32_field(6) + + +@dataclass +class LogModelResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: int = betterproto.int32_field(7) + data: "LogModelResult" = betterproto.message_field(2) @dataclass class LogSnapshotResult(betterproto.Message): """Q015""" - info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - description: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) + node_info: "NodeInfo" = betterproto.message_field(1) + description: str = betterproto.string_field(2) + status: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + total: int = betterproto.int32_field(5) + execution_time: float = betterproto.float_field(6) cfg: Dict[str, str] = betterproto.map_field( - 8, betterproto.TYPE_STRING, betterproto.TYPE_STRING + 7, betterproto.TYPE_STRING, betterproto.TYPE_STRING ) +@dataclass +class LogSnapshotResultMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "LogSnapshotResult" = betterproto.message_field(2) + + @dataclass class LogSeedResult(betterproto.Message): """Q016""" + node_info: "NodeInfo" = betterproto.message_field(1) + status: str = betterproto.string_field(2) + result_message: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + total: int = betterproto.int32_field(5) + execution_time: float = betterproto.float_field(6) + schema: str = betterproto.string_field(7) + relation: str = betterproto.string_field(8) + + +@dataclass +class LogSeedResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - status: str = betterproto.string_field(3) - result_message: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) - schema: str = betterproto.string_field(8) - relation: str = betterproto.string_field(9) + data: "LogSeedResult" = betterproto.message_field(2) @dataclass class LogFreshnessResult(betterproto.Message): """Q018""" + status: str = betterproto.string_field(1) + node_info: "NodeInfo" = betterproto.message_field(2) + index: int = betterproto.int32_field(3) + total: int = betterproto.int32_field(4) + execution_time: float = betterproto.float_field(5) + source_name: str = betterproto.string_field(6) + table_name: str = betterproto.string_field(7) + + +@dataclass +class LogFreshnessResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - status: str = betterproto.string_field(2) - node_info: "NodeInfo" = betterproto.message_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) - execution_time: float = betterproto.float_field(6) - source_name: str = betterproto.string_field(7) - table_name: str = betterproto.string_field(8) + data: "LogFreshnessResult" = betterproto.message_field(2) @dataclass class LogCancelLine(betterproto.Message): """Q022""" + conn_name: str = betterproto.string_field(1) + + +@dataclass +class LogCancelLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - conn_name: str = betterproto.string_field(2) + data: "LogCancelLine" = betterproto.message_field(2) @dataclass class DefaultSelector(betterproto.Message): """Q023""" + name: str = betterproto.string_field(1) + + +@dataclass +class DefaultSelectorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - name: str = betterproto.string_field(2) + data: "DefaultSelector" = betterproto.message_field(2) @dataclass class NodeStart(betterproto.Message): """Q024""" + node_info: "NodeInfo" = betterproto.message_field(1) + + +@dataclass +class NodeStartMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) + data: "NodeStart" = betterproto.message_field(2) @dataclass class NodeFinished(betterproto.Message): """Q025""" + node_info: "NodeInfo" = betterproto.message_field(1) + run_result: "RunResultMsg" = betterproto.message_field(2) + + +@dataclass +class NodeFinishedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - run_result: "RunResultMsg" = betterproto.message_field(4) + data: "NodeFinished" = betterproto.message_field(2) @dataclass class QueryCancelationUnsupported(betterproto.Message): """Q026""" + type: str = betterproto.string_field(1) + + +@dataclass +class QueryCancelationUnsupportedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - type: str = betterproto.string_field(2) + data: "QueryCancelationUnsupported" = betterproto.message_field(2) @dataclass class ConcurrencyLine(betterproto.Message): """Q027""" + num_threads: int = betterproto.int32_field(1) + target_name: str = betterproto.string_field(2) + node_count: int = betterproto.int32_field(3) + + +@dataclass +class ConcurrencyLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - num_threads: int = betterproto.int32_field(2) - target_name: str = betterproto.string_field(3) - node_count: int = betterproto.int32_field(4) + data: "ConcurrencyLine" = betterproto.message_field(2) @dataclass class WritingInjectedSQLForNode(betterproto.Message): """Q029""" + node_info: "NodeInfo" = betterproto.message_field(1) + + +@dataclass +class WritingInjectedSQLForNodeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) + data: "WritingInjectedSQLForNode" = betterproto.message_field(2) @dataclass class NodeCompiling(betterproto.Message): """Q030""" + node_info: "NodeInfo" = betterproto.message_field(1) + + +@dataclass +class NodeCompilingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) + data: "NodeCompiling" = betterproto.message_field(2) @dataclass class NodeExecuting(betterproto.Message): """Q031""" + node_info: "NodeInfo" = betterproto.message_field(1) + + +@dataclass +class NodeExecutingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) + data: "NodeExecuting" = betterproto.message_field(2) @dataclass class LogHookStartLine(betterproto.Message): """Q032""" + node_info: "NodeInfo" = betterproto.message_field(1) + statement: str = betterproto.string_field(2) + index: int = betterproto.int32_field(3) + total: int = betterproto.int32_field(4) + + +@dataclass +class LogHookStartLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - statement: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) + data: "LogHookStartLine" = betterproto.message_field(2) @dataclass class LogHookEndLine(betterproto.Message): """Q033""" + node_info: "NodeInfo" = betterproto.message_field(1) + statement: str = betterproto.string_field(2) + status: str = betterproto.string_field(3) + index: int = betterproto.int32_field(4) + total: int = betterproto.int32_field(5) + execution_time: float = betterproto.float_field(6) + + +@dataclass +class LogHookEndLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - statement: str = betterproto.string_field(3) - status: str = betterproto.string_field(4) - index: int = betterproto.int32_field(5) - total: int = betterproto.int32_field(6) - execution_time: float = betterproto.float_field(7) + data: "LogHookEndLine" = betterproto.message_field(2) @dataclass class SkippingDetails(betterproto.Message): """Q034""" + node_info: "NodeInfo" = betterproto.message_field(1) + resource_type: str = betterproto.string_field(2) + schema: str = betterproto.string_field(3) + node_name: str = betterproto.string_field(4) + index: int = betterproto.int32_field(5) + total: int = betterproto.int32_field(6) + + +@dataclass +class SkippingDetailsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - resource_type: str = betterproto.string_field(3) - schema: str = betterproto.string_field(4) - node_name: str = betterproto.string_field(5) - index: int = betterproto.int32_field(6) - total: int = betterproto.int32_field(7) + data: "SkippingDetails" = betterproto.message_field(2) @dataclass class NothingToDo(betterproto.Message): """Q035""" + pass + + +@dataclass +class NothingToDoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "NothingToDo" = betterproto.message_field(2) @dataclass class RunningOperationUncaughtError(betterproto.Message): """Q036""" + exc: str = betterproto.string_field(1) + + +@dataclass +class RunningOperationUncaughtErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "RunningOperationUncaughtError" = betterproto.message_field(2) @dataclass class EndRunResult(betterproto.Message): """Q037""" + results: List["RunResultMsg"] = betterproto.message_field(1) + elapsed_time: float = betterproto.float_field(2) + generated_at: datetime = betterproto.message_field(3) + success: bool = betterproto.bool_field(4) + + +@dataclass +class EndRunResultMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - results: List["RunResultMsg"] = betterproto.message_field(2) - elapsed_time: float = betterproto.float_field(3) - generated_at: datetime = betterproto.message_field(4) - success: bool = betterproto.bool_field(5) + data: "EndRunResult" = betterproto.message_field(2) @dataclass class NoNodesSelected(betterproto.Message): """Q038""" + pass + + +@dataclass +class NoNodesSelectedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "NoNodesSelected" = betterproto.message_field(2) @dataclass class CatchableExceptionOnRun(betterproto.Message): """W002""" + node_info: "NodeInfo" = betterproto.message_field(1) + exc: str = betterproto.string_field(2) + exc_info: str = betterproto.string_field(3) + + +@dataclass +class CatchableExceptionOnRunMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - exc: str = betterproto.string_field(3) - exc_info: str = betterproto.string_field(4) + data: "CatchableExceptionOnRun" = betterproto.message_field(2) @dataclass class InternalExceptionOnRun(betterproto.Message): """W003""" + build_path: str = betterproto.string_field(1) + exc: str = betterproto.string_field(2) + + +@dataclass +class InternalExceptionOnRunMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - build_path: str = betterproto.string_field(2) - exc: str = betterproto.string_field(3) + data: "InternalExceptionOnRun" = betterproto.message_field(2) @dataclass class GenericExceptionOnRun(betterproto.Message): """W004""" + build_path: str = betterproto.string_field(1) + unique_id: str = betterproto.string_field(2) + exc: str = betterproto.string_field(3) + + +@dataclass +class GenericExceptionOnRunMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - build_path: str = betterproto.string_field(2) - unique_id: str = betterproto.string_field(3) - exc: str = betterproto.string_field(4) + data: "GenericExceptionOnRun" = betterproto.message_field(2) @dataclass class NodeConnectionReleaseError(betterproto.Message): """W005""" + node_name: str = betterproto.string_field(1) + exc: str = betterproto.string_field(2) + exc_info: str = betterproto.string_field(3) + + +@dataclass +class NodeConnectionReleaseErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_name: str = betterproto.string_field(2) - exc: str = betterproto.string_field(3) - exc_info: str = betterproto.string_field(4) + data: "NodeConnectionReleaseError" = betterproto.message_field(2) @dataclass class FoundStats(betterproto.Message): """W006""" + stat_line: str = betterproto.string_field(1) + + +@dataclass +class FoundStatsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - stat_line: str = betterproto.string_field(2) + data: "FoundStats" = betterproto.message_field(2) @dataclass class MainKeyboardInterrupt(betterproto.Message): """Z001""" + pass + + +@dataclass +class MainKeyboardInterruptMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "MainKeyboardInterrupt" = betterproto.message_field(2) @dataclass class MainEncounteredError(betterproto.Message): """Z002""" + exc: str = betterproto.string_field(1) + + +@dataclass +class MainEncounteredErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc: str = betterproto.string_field(2) + data: "MainEncounteredError" = betterproto.message_field(2) @dataclass class MainStackTrace(betterproto.Message): """Z003""" + stack_trace: str = betterproto.string_field(1) + + +@dataclass +class MainStackTraceMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - stack_trace: str = betterproto.string_field(2) + data: "MainStackTrace" = betterproto.message_field(2) @dataclass class SystemErrorRetrievingModTime(betterproto.Message): """Z004""" + path: str = betterproto.string_field(1) + + +@dataclass +class SystemErrorRetrievingModTimeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "SystemErrorRetrievingModTime" = betterproto.message_field(2) @dataclass class SystemCouldNotWrite(betterproto.Message): """Z005""" + path: str = betterproto.string_field(1) + reason: str = betterproto.string_field(2) + exc: str = betterproto.string_field(3) + + +@dataclass +class SystemCouldNotWriteMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) - reason: str = betterproto.string_field(3) - exc: str = betterproto.string_field(4) + data: "SystemCouldNotWrite" = betterproto.message_field(2) @dataclass class SystemExecutingCmd(betterproto.Message): """Z006""" + cmd: List[str] = betterproto.string_field(1) + + +@dataclass +class SystemExecutingCmdMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - cmd: List[str] = betterproto.string_field(2) + data: "SystemExecutingCmd" = betterproto.message_field(2) @dataclass -class SystemStdOutMsg(betterproto.Message): +class SystemStdOut(betterproto.Message): """Z007""" + bmsg: bytes = betterproto.bytes_field(1) + + +@dataclass +class SystemStdOutMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - bmsg: bytes = betterproto.bytes_field(2) + data: "SystemStdOut" = betterproto.message_field(2) @dataclass -class SystemStdErrMsg(betterproto.Message): +class SystemStdErr(betterproto.Message): """Z008""" + bmsg: bytes = betterproto.bytes_field(1) + + +@dataclass +class SystemStdErrMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - bmsg: bytes = betterproto.bytes_field(2) + data: "SystemStdErr" = betterproto.message_field(2) @dataclass class SystemReportReturnCode(betterproto.Message): """Z009""" + returncode: int = betterproto.int32_field(1) + + +@dataclass +class SystemReportReturnCodeMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - returncode: int = betterproto.int32_field(2) + data: "SystemReportReturnCode" = betterproto.message_field(2) @dataclass class TimingInfoCollected(betterproto.Message): """Z010""" + node_info: "NodeInfo" = betterproto.message_field(1) + timing_info: "TimingInfoMsg" = betterproto.message_field(2) + + +@dataclass +class TimingInfoCollectedMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - node_info: "NodeInfo" = betterproto.message_field(2) - timing_info: "TimingInfoMsg" = betterproto.message_field(3) + data: "TimingInfoCollected" = betterproto.message_field(2) @dataclass class LogDebugStackTrace(betterproto.Message): """Z011""" + exc_info: str = betterproto.string_field(1) + + +@dataclass +class LogDebugStackTraceMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc_info: str = betterproto.string_field(2) + data: "LogDebugStackTrace" = betterproto.message_field(2) @dataclass class CheckCleanPath(betterproto.Message): """Z012""" + path: str = betterproto.string_field(1) + + +@dataclass +class CheckCleanPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "CheckCleanPath" = betterproto.message_field(2) @dataclass class ConfirmCleanPath(betterproto.Message): """Z013""" + path: str = betterproto.string_field(1) + + +@dataclass +class ConfirmCleanPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "ConfirmCleanPath" = betterproto.message_field(2) @dataclass class ProtectedCleanPath(betterproto.Message): """Z014""" + path: str = betterproto.string_field(1) + + +@dataclass +class ProtectedCleanPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "ProtectedCleanPath" = betterproto.message_field(2) @dataclass class FinishedCleanPaths(betterproto.Message): """Z015""" + pass + + +@dataclass +class FinishedCleanPathsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "FinishedCleanPaths" = betterproto.message_field(2) @dataclass class OpenCommand(betterproto.Message): """Z016""" + open_cmd: str = betterproto.string_field(1) + profiles_dir: str = betterproto.string_field(2) + + +@dataclass +class OpenCommandMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - open_cmd: str = betterproto.string_field(2) - profiles_dir: str = betterproto.string_field(3) + data: "OpenCommand" = betterproto.message_field(2) @dataclass class EmptyLine(betterproto.Message): """Z017""" + pass + + +@dataclass +class EmptyLineMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "EmptyLine" = betterproto.message_field(2) @dataclass class ServingDocsPort(betterproto.Message): """Z018""" + address: str = betterproto.string_field(1) + port: int = betterproto.int32_field(2) + + +@dataclass +class ServingDocsPortMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - address: str = betterproto.string_field(2) - port: int = betterproto.int32_field(3) + data: "ServingDocsPort" = betterproto.message_field(2) @dataclass class ServingDocsAccessInfo(betterproto.Message): """Z019""" + port: str = betterproto.string_field(1) + + +@dataclass +class ServingDocsAccessInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - port: str = betterproto.string_field(2) + data: "ServingDocsAccessInfo" = betterproto.message_field(2) @dataclass class ServingDocsExitInfo(betterproto.Message): """Z020""" + pass + + +@dataclass +class ServingDocsExitInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "ServingDocsExitInfo" = betterproto.message_field(2) @dataclass class RunResultWarning(betterproto.Message): """Z021""" + resource_type: str = betterproto.string_field(1) + node_name: str = betterproto.string_field(2) + path: str = betterproto.string_field(3) + + +@dataclass +class RunResultWarningMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - resource_type: str = betterproto.string_field(2) - node_name: str = betterproto.string_field(3) - path: str = betterproto.string_field(4) + data: "RunResultWarning" = betterproto.message_field(2) @dataclass class RunResultFailure(betterproto.Message): """Z022""" + resource_type: str = betterproto.string_field(1) + node_name: str = betterproto.string_field(2) + path: str = betterproto.string_field(3) + + +@dataclass +class RunResultFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - resource_type: str = betterproto.string_field(2) - node_name: str = betterproto.string_field(3) - path: str = betterproto.string_field(4) + data: "RunResultFailure" = betterproto.message_field(2) @dataclass class StatsLine(betterproto.Message): """Z023""" - info: "EventInfo" = betterproto.message_field(1) stats: Dict[str, int] = betterproto.map_field( - 2, betterproto.TYPE_STRING, betterproto.TYPE_INT32 + 1, betterproto.TYPE_STRING, betterproto.TYPE_INT32 ) +@dataclass +class StatsLineMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "StatsLine" = betterproto.message_field(2) + + @dataclass class RunResultError(betterproto.Message): """Z024""" + msg: str = betterproto.string_field(1) + + +@dataclass +class RunResultErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "RunResultError" = betterproto.message_field(2) @dataclass class RunResultErrorNoMessage(betterproto.Message): """Z025""" + status: str = betterproto.string_field(1) + + +@dataclass +class RunResultErrorNoMessageMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - status: str = betterproto.string_field(2) + data: "RunResultErrorNoMessage" = betterproto.message_field(2) @dataclass class SQLCompiledPath(betterproto.Message): """Z026""" + path: str = betterproto.string_field(1) + + +@dataclass +class SQLCompiledPathMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - path: str = betterproto.string_field(2) + data: "SQLCompiledPath" = betterproto.message_field(2) @dataclass class CheckNodeTestFailure(betterproto.Message): """Z027""" + relation_name: str = betterproto.string_field(1) + + +@dataclass +class CheckNodeTestFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - relation_name: str = betterproto.string_field(2) + data: "CheckNodeTestFailure" = betterproto.message_field(2) @dataclass class FirstRunResultError(betterproto.Message): """Z028""" + msg: str = betterproto.string_field(1) + + +@dataclass +class FirstRunResultErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "FirstRunResultError" = betterproto.message_field(2) @dataclass class AfterFirstRunResultError(betterproto.Message): """Z029""" + msg: str = betterproto.string_field(1) + + +@dataclass +class AfterFirstRunResultErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "AfterFirstRunResultError" = betterproto.message_field(2) @dataclass class EndOfRunSummary(betterproto.Message): """Z030""" + num_errors: int = betterproto.int32_field(1) + num_warnings: int = betterproto.int32_field(2) + keyboard_interrupt: bool = betterproto.bool_field(3) + + +@dataclass +class EndOfRunSummaryMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - num_errors: int = betterproto.int32_field(2) - num_warnings: int = betterproto.int32_field(3) - keyboard_interrupt: bool = betterproto.bool_field(4) + data: "EndOfRunSummary" = betterproto.message_field(2) @dataclass class LogSkipBecauseError(betterproto.Message): """Z034""" + schema: str = betterproto.string_field(1) + relation: str = betterproto.string_field(2) + index: int = betterproto.int32_field(3) + total: int = betterproto.int32_field(4) + + +@dataclass +class LogSkipBecauseErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - schema: str = betterproto.string_field(2) - relation: str = betterproto.string_field(3) - index: int = betterproto.int32_field(4) - total: int = betterproto.int32_field(5) + data: "LogSkipBecauseError" = betterproto.message_field(2) @dataclass class EnsureGitInstalled(betterproto.Message): """Z036""" + pass + + +@dataclass +class EnsureGitInstalledMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "EnsureGitInstalled" = betterproto.message_field(2) @dataclass class DepsCreatingLocalSymlink(betterproto.Message): """Z037""" + pass + + +@dataclass +class DepsCreatingLocalSymlinkMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DepsCreatingLocalSymlink" = betterproto.message_field(2) @dataclass class DepsSymlinkNotAvailable(betterproto.Message): """Z038""" + pass + + +@dataclass +class DepsSymlinkNotAvailableMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DepsSymlinkNotAvailable" = betterproto.message_field(2) @dataclass class DisableTracking(betterproto.Message): """Z039""" + pass + + +@dataclass +class DisableTrackingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "DisableTracking" = betterproto.message_field(2) @dataclass class SendingEvent(betterproto.Message): """Z040""" + kwargs: str = betterproto.string_field(1) + + +@dataclass +class SendingEventMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - kwargs: str = betterproto.string_field(2) + data: "SendingEvent" = betterproto.message_field(2) @dataclass class SendEventFailure(betterproto.Message): """Z041""" + pass + + +@dataclass +class SendEventFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "SendEventFailure" = betterproto.message_field(2) @dataclass class FlushEvents(betterproto.Message): """Z042""" + pass + + +@dataclass +class FlushEventsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "FlushEvents" = betterproto.message_field(2) @dataclass class FlushEventsFailure(betterproto.Message): """Z043""" + pass + + +@dataclass +class FlushEventsFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) + data: "FlushEventsFailure" = betterproto.message_field(2) @dataclass class TrackingInitializeFailure(betterproto.Message): """Z044""" + exc_info: str = betterproto.string_field(1) + + +@dataclass +class TrackingInitializeFailureMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - exc_info: str = betterproto.string_field(2) + data: "TrackingInitializeFailure" = betterproto.message_field(2) @dataclass class RunResultWarningMessage(betterproto.Message): """Z046""" + msg: str = betterproto.string_field(1) + + +@dataclass +class RunResultWarningMessageMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "RunResultWarningMessage" = betterproto.message_field(2) @dataclass class IntegrationTestInfo(betterproto.Message): """T001""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestInfo" = betterproto.message_field(2) @dataclass class IntegrationTestDebug(betterproto.Message): """T002""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestDebugMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestDebug" = betterproto.message_field(2) @dataclass class IntegrationTestWarn(betterproto.Message): """T003""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestWarnMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestWarn" = betterproto.message_field(2) @dataclass class IntegrationTestError(betterproto.Message): """T004""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestError" = betterproto.message_field(2) @dataclass class IntegrationTestException(betterproto.Message): """T005""" + msg: str = betterproto.string_field(1) + + +@dataclass +class IntegrationTestExceptionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "IntegrationTestException" = betterproto.message_field(2) @dataclass class UnitTestInfo(betterproto.Message): """T006""" + msg: str = betterproto.string_field(1) + + +@dataclass +class UnitTestInfoMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) + data: "UnitTestInfo" = betterproto.message_field(2) diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index ec10b906432..10c002460c1 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -69,607 +69,901 @@ message GenericMessage { // A001 message MainReportVersion { + string version = 1; + int32 log_version = 2; +} + +message MainReportVersionMsg { EventInfo info = 1; - string version = 2; - int32 log_version = 3; + MainReportVersion data = 2; } // A002 message MainReportArgs { + map args = 1; +} + +message MainReportArgsMsg { EventInfo info = 1; - map args = 2; + MainReportArgs data = 2; } // A003 message MainTrackingUserState { + string user_state = 1; +} + +message MainTrackingUserStateMsg { EventInfo info = 1; - string user_state = 2; + MainTrackingUserState data = 2; } // A004 message MergedFromState { + int32 num_merged = 1; + repeated string sample = 2; +} + +message MergedFromStateMsg { EventInfo info = 1; - int32 num_merged = 2; - repeated string sample = 3; + MergedFromState data = 2; } // A005 message MissingProfileTarget { + string profile_name = 1; + string target_name = 2; +} + +message MissingProfileTargetMsg { EventInfo info = 1; - string profile_name = 2; - string target_name = 3; + MissingProfileTarget data = 2; } // Skipped A006, A007 // A008 message InvalidVarsYAML { +} + +message InvalidVarsYAMLMsg { EventInfo info = 1; + InvalidVarsYAML data = 2; } // A009 message DbtProjectError { +} + +message DbtProjectErrorMsg { EventInfo info = 1; + DbtProjectError data = 2; } // A010 message DbtProjectErrorException { + string exc = 1; +} + +message DbtProjectErrorExceptionMsg { EventInfo info = 1; - string exc = 2; + DbtProjectErrorException data = 2; } // A011 message DbtProfileError { +} + +message DbtProfileErrorMsg { EventInfo info = 1; + DbtProfileError data = 2; } // A012 message DbtProfileErrorException { + string exc = 1; +} + +message DbtProfileErrorExceptionMsg { EventInfo info = 1; - string exc = 2; + DbtProfileErrorException data = 2; } // A013 message ProfileListTitle { +} + +message ProfileListTitleMsg { EventInfo info = 1; + ProfileListTitle data = 2; } // A014 message ListSingleProfile { + string profile = 1; +} + +message ListSingleProfileMsg { EventInfo info = 1; - string profile = 2; + ListSingleProfile data = 2; } // A015 message NoDefinedProfiles { +} + +message NoDefinedProfilesMsg { EventInfo info = 1; + NoDefinedProfiles data = 2; } // A016 message ProfileHelpMessage { +} + +message ProfileHelpMessageMsg { EventInfo info = 1; + ProfileHelpMessage data = 2; } // A017 message StarterProjectPath { + string dir = 1; +} + +message StarterProjectPathMsg { EventInfo info = 1; - string dir = 2; + StarterProjectPath data = 2; } // A018 message ConfigFolderDirectory { + string dir = 1; +} + +message ConfigFolderDirectoryMsg { EventInfo info = 1; - string dir = 2; + ConfigFolderDirectory data = 2; } // A019 message NoSampleProfileFound { + string adapter = 1; +} + +message NoSampleProfileFoundMsg { EventInfo info = 1; - string adapter = 2; + NoSampleProfileFound data = 2; } // A020 message ProfileWrittenWithSample { + string name = 1; + string path = 2; +} + +message ProfileWrittenWithSampleMsg { EventInfo info = 1; - string name = 2; - string path = 3; + ProfileWrittenWithSample data = 2; } // A021 message ProfileWrittenWithTargetTemplateYAML { + string name = 1; + string path = 2; +} + +message ProfileWrittenWithTargetTemplateYAMLMsg { EventInfo info = 1; - string name = 2; - string path = 3; + ProfileWrittenWithTargetTemplateYAMLMsg data = 2; } // A022 message ProfileWrittenWithProjectTemplateYAML { + string name = 1; + string path = 2; +} + +message ProfileWrittenWithProjectTemplateYAMLMsg { EventInfo info = 1; - string name = 2; - string path = 3; + ProfileWrittenWithProjectTemplateYAML data = 2; } // A023 message SettingUpProfile { +} + +message SettingUpProfileMsg { EventInfo info = 1; + SettingUpProfile data = 2; } // A024 message InvalidProfileTemplateYAML { +} + +message InvalidProfileTemplateYAMLMsg { EventInfo info = 1; + InvalidProfileTemplateYAML data = 2; } // A025 message ProjectNameAlreadyExists { + string name = 1; +} + +message ProjectNameAlreadyExistsMsg { EventInfo info = 1; - string name = 2; + ProjectNameAlreadyExists data = 2; } // A026 message ProjectCreated { + string project_name = 1; + string docs_url = 2; + string slack_url = 3; +} + +message ProjectCreatedMsg { EventInfo info = 1; - string project_name = 2; - string docs_url = 3; - string slack_url = 4; + ProjectCreated data = 2; } // D - Deprecation // D001 message PackageRedirectDeprecation { + string old_name = 1; + string new_name = 2; +} + +message PackageRedirectDeprecationMsg { EventInfo info = 1; - string old_name = 2; - string new_name = 3; + PackageRedirectDeprecation data = 2; } // D002 message PackageInstallPathDeprecation { +} + +message PackageInstallPathDeprecationMsg { EventInfo info = 1; + PackageInstallPathDeprecation data = 2; } // D003 message ConfigSourcePathDeprecation { + string deprecated_path = 1; + string exp_path = 2; +} + +message ConfigSourcePathDeprecationMsg { EventInfo info = 1; - string deprecated_path = 2; - string exp_path = 3; + ConfigSourcePathDeprecation data = 2; } // D004 message ConfigDataPathDeprecation { + string deprecated_path = 1; + string exp_path = 2; +} + +message ConfigDataPathDeprecationMsg { EventInfo info = 1; - string deprecated_path = 2; - string exp_path = 3; + ConfigDataPathDeprecation data = 2; } //D005 message AdapterDeprecationWarning { + string old_name = 1; + string new_name = 2; +} + +message AdapterDeprecationWarningMsg { EventInfo info = 1; - string old_name = 2; - string new_name = 3; + AdapterDeprecationWarning data = 2; } //D006 message MetricAttributesRenamed { + string metric_name = 1; +} + +message MetricAttributesRenamedMsg { EventInfo info = 1; - string metric_name = 2; + MetricAttributesRenamed data = 2; } //D007 message ExposureNameDeprecation { + string exposure = 1; +} + +message ExposureNameDeprecationMsg { EventInfo info = 1; - string exposure = 2; + ExposureNameDeprecation data = 2; } // E - DB Adapter // E001 message AdapterEventDebug { + NodeInfo node_info = 1; + string name = 2; + string base_msg = 3; + repeated string args = 4; +} + +message AdapterEventDebugMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string base_msg = 4; - repeated string args = 5; + AdapterEventDebug data = 2; } // E002 message AdapterEventInfo { + NodeInfo node_info = 1; + string name = 2; + string base_msg = 3; + repeated string args = 4; +} + +message AdapterEventInfoMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string base_msg = 4; - repeated string args = 5; + AdapterEventInfo data = 2; } // E003 message AdapterEventWarning { + NodeInfo node_info = 1; + string name = 2; + string base_msg = 3; + repeated string args = 4; +} + +message AdapterEventWarningMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string base_msg = 4; - repeated string args = 5; + AdapterEventWarning data = 2; } // E004 message AdapterEventError { + NodeInfo node_info = 1; + string name = 2; + string base_msg = 3; + repeated string args = 4; + string exc_info = 5; +} + +message AdapterEventErrorMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string base_msg = 4; - repeated string args = 5; - string exc_info = 6; + AdapterEventError data = 2; } // E005 message NewConnection { + NodeInfo node_info = 1; + string conn_type = 2; + string conn_name = 3; +} + +message NewConnectionMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_type = 3; - string conn_name = 4; + NewConnection data = 2; } // E006 message ConnectionReused { + string conn_name = 1; +} + +message ConnectionReusedMsg { EventInfo info = 1; - string conn_name = 2; + ConnectionReused data = 2; } // E007 message ConnectionLeftOpenInCleanup { + string conn_name = 1; +} + +message ConnectionLeftOpenInCleanupMsg { EventInfo info = 1; - string conn_name = 2; + ConnectionLeftOpen data = 2; } // E008 message ConnectionClosedInCleanup { + string conn_name = 1; +} + +message ConnectionClosedInCleanupMsg { EventInfo info = 1; - string conn_name = 2; + ConnectionClosedInCleanup data = 2; } // E009 message RollbackFailed { + NodeInfo node_info = 1; + string conn_name = 2; + string exc_info = 3; +} + +message RollbackFailedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; - string exc_info = 4; + RollbackFailed data = 2; } // E010 message ConnectionClosed { + NodeInfo node_info = 1; + string conn_name = 2; +} + +message ConnectionClosedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; + ConnectionClosed data = 2; } // E011 message ConnectionLeftOpen { + NodeInfo node_info = 1; + string conn_name = 2; +} + +message ConnectionLeftOpenMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; + ConnectionLeftOpen data = 2; } // E012 message Rollback { + NodeInfo node_info = 1; + string conn_name = 2; +} + +message RollbackMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; + Rollback data = 2; } // E013 message CacheMiss { + string conn_name = 1; + string database = 2; + string schema = 3; +} + +message CacheMissMsg { EventInfo info = 1; - string conn_name = 2; - string database = 3; - string schema = 4; + CacheMiss data = 2; } // E014 message ListRelations { + string database = 1; + string schema = 2; + repeated ReferenceKeyMsg relations = 3; +} + +message ListRelationsMsg { EventInfo info = 1; - string database = 2; - string schema = 3; - repeated ReferenceKeyMsg relations = 4; + ListRelations data = 2; } // E015 message ConnectionUsed { + NodeInfo node_info = 1; + string conn_type = 2; + string conn_name = 3; +} + +message ConnectionUsedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_type = 3; - string conn_name = 4; + ConnectionUsed data = 2; } // E016 message SQLQuery { + NodeInfo node_info = 1; + string conn_name = 2; + string sql = 3; +} + +message SQLQueryMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; - string sql = 4; + SQLQuery data = 2; } // E017 message SQLQueryStatus { + NodeInfo node_info = 1; + string status = 2; + float elapsed = 3; +} + +message SQLQueryStatusMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string status = 3; - float elapsed = 4; + SQLQueryStatus data = 2; } // E018 message SQLCommit { + NodeInfo node_info = 1; + string conn_name = 2; +} + +message SQLCommitMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string conn_name = 3; + SQLCommit data = 2; } // E019 message ColTypeChange { + string orig_type = 1; + string new_type = 2; + ReferenceKeyMsg table = 3; +} + +message ColTypeChangeMsg { EventInfo info = 1; - string orig_type = 2; - string new_type = 3; - ReferenceKeyMsg table = 4; + ColTypeChange data = 2; } // E020 message SchemaCreation { + ReferenceKeyMsg relation = 1; +} + +message SchemaCreationMsg { EventInfo info = 1; - ReferenceKeyMsg relation = 2; + SchemaCreation data = 2; } // E021 message SchemaDrop { + ReferenceKeyMsg relation = 1; +} + +message SchemaDropMsg { EventInfo info = 1; - ReferenceKeyMsg relation = 2; + SchemaDrop data = 2; } // E022 message UncachedRelation { + ReferenceKeyMsg dep_key = 1; + ReferenceKeyMsg ref_key = 2; +} + +message UncachedRelationMsg { EventInfo info = 1; - ReferenceKeyMsg dep_key = 2; - ReferenceKeyMsg ref_key = 3; + UncachedRelation data = 2; } // E023 message AddLink { + ReferenceKeyMsg dep_key = 1; + ReferenceKeyMsg ref_key = 2; +} + +message AddLinkMsg { EventInfo info = 1; - ReferenceKeyMsg dep_key = 2; - ReferenceKeyMsg ref_key = 3; + AddLink data = 2; } // E024 message AddRelation { + ReferenceKeyMsg relation = 1; +} + +message AddRelationMsg { EventInfo info = 1; - ReferenceKeyMsg relation = 2; + AddRelation data = 2; } // E025 message DropMissingRelation { + ReferenceKeyMsg relation = 1; +} + +message DropMissingRelationMsg { EventInfo info = 1; - ReferenceKeyMsg relation = 2; + DropMissingRelation data = 2; } // E026 message DropCascade { + ReferenceKeyMsg dropped = 1; + repeated ReferenceKeyMsg consequences = 2; +} + +message DropCascadeMsg { EventInfo info = 1; - ReferenceKeyMsg dropped = 2; - repeated ReferenceKeyMsg consequences = 3; + DropCascade data = 2; } // E027 message DropRelation { + ReferenceKeyMsg dropped = 1; +} + +message DropRelationMsg { EventInfo info = 1; - ReferenceKeyMsg dropped = 2; + DropRelation data = 2; } // E028 message UpdateReference { + ReferenceKeyMsg old_key = 1; + ReferenceKeyMsg new_key = 2; + ReferenceKeyMsg cached_key = 3; +} + +message UpdateReferenceMsg { EventInfo info = 1; - ReferenceKeyMsg old_key = 2; - ReferenceKeyMsg new_key = 3; - ReferenceKeyMsg cached_key = 4; + UpdateReference data = 2; } // E029 message TemporaryRelation { + ReferenceKeyMsg key = 1; +} + +message TemporaryRelationMsg { EventInfo info = 1; - ReferenceKeyMsg key = 2; + TemporaryRelation data = 2; } // E030 message RenameSchema { + ReferenceKeyMsg old_key = 1; + ReferenceKeyMsg new_key = 2; +} + +message RenameSchemaMsg { EventInfo info = 1; - ReferenceKeyMsg old_key = 2; - ReferenceKeyMsg new_key = 3; + RenameSchema data = 2; } // E031 message DumpBeforeAddGraph { + map dump = 1; +} + +message DumpBeforeAddGraphMsg { EventInfo info = 1; - map dump = 2; + DumpBeforeAddGraph data = 2; } // E032 message DumpAfterAddGraph { + map dump = 1; +} + +message DumpAfterAddGraphMsg { EventInfo info = 1; - map dump = 2; + DumpAfterAddGraph data = 2; } // E033 message DumpBeforeRenameSchema { + map dump = 1; +} + +message DumpBeforeRenameSchemaMsg { EventInfo info = 1; - map dump = 2; + DumpBeforeRenameSchema data = 2; } // E034 message DumpAfterRenameSchema { + map dump = 1; +} + +message DumpAfterRenameSchemaMsg { EventInfo info = 1; - map dump = 2; + DumpAfterRenameSchema data = 2; } // E035 message AdapterImportError { + string exc = 1; +} + +message AdapterImportErrorMsg { EventInfo info = 1; - string exc = 2; + AdapterImportError data = 2; } // E036 message PluginLoadError { + string exc_info = 1; +} + +message PluginLoadErrorMsg { EventInfo info = 1; - string exc_info = 2; + PluginLoadError data = 2; } // E037 message NewConnectionOpening { + NodeInfo node_info = 1; + string connection_state = 2; +} + +message NewConnectionOpeningMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string connection_state = 3; + NewConnectionOpening data = 2; } // E038 message CodeExecution { + string conn_name = 1; + string code_content = 2; +} + +message CodeExecutionMsg { EventInfo info = 1; - string conn_name = 2; - string code_content = 3; + CodeExecution data = 2; } // E039 message CodeExecutionStatus { + string status = 1; + float elapsed = 2; +} + +message CodeExecutionStatusMsg { EventInfo info = 1; - string status = 2; - float elapsed = 3; + CodeExecutionStatus data = 2; } // E040 message CatalogGenerationError { + string exc = 1; +} + +message CatalogGenerationErrorMsg { EventInfo info = 1; - string exc = 2; + CatalogGenerationError data = 2; } // E041 message WriteCatalogFailure { + int32 num_exceptions = 1; +} + +message WriteCatalogFailureMsg { EventInfo info = 1; - int32 num_exceptions = 2; + WriteCatalogFailure data = 2; } // E042 message CatalogWritten { + string path = 1; +} + +message CatalogWrittenMsg { EventInfo info = 1; - string path = 2; + CatalogWritten data = 2; } // E043 message CannotGenerateDocs { +} + +message CannotGenerateDocsMsg { EventInfo info = 1; + CannotGenerateDocs data = 2; } // E044 message BuildingCatalog { +} + +message BuildingCatalogMsg { EventInfo info = 1; + BuildingCatalog data = 2; } // E045 message DatabaseErrorRunningHook { + string hook_type = 1; +} + +message DatabaseErrorRunningHookMsg { EventInfo info = 1; - string hook_type = 2; + DatabaseErrorRunningHook data = 2; } // E046 message HooksRunning { + int32 num_hooks = 1; + string hook_type = 2; +} + +message HooksRunningMsg { EventInfo info = 1; - int32 num_hooks = 2; - string hook_type = 3; + HooksRunning data = 2; } // E047 message HookFinished { + string stat_line = 1; + string execution = 2; + float execution_time = 3; +} + +message HookFinishedMsg { EventInfo info = 1; - string stat_line = 2; - string execution = 3; - float execution_time = 4; + HookFinished data = 2; } // I - Project parsing // I001 -message ParseCmdStart { - EventInfo info = 1; +message ParseCmdOut { + string msg = 1; } -// I002 -message ParseCmdCompiling { +message ParseCmdOutMsg { EventInfo info = 1; + ParseCmdOut data = 2; } -// I003 -message ParseCmdWritingManifest { - EventInfo info = 1; +// Skipping I002, I003, I004, I005, I006, I007, I008, I009, I010 + + +// I011 +message GenericTestFileParse { + string path = 1; } -// I004 -message ParseCmdDone { +message GenericTestFileParseMsg { EventInfo info = 1; + GenericTestFileParse data = 2; } -// I005 -message ManifestDependenciesLoaded { - EventInfo info = 1; +// I012 +message MacroFileParse { + string path = 1; } -// I006 -message ManifestLoaderCreated { +message MacroFileParseMsg { EventInfo info = 1; + MacroFileParse data = 2; } -// I007 -message ManifestLoaded { - EventInfo info = 1; +// Skipping I013 + +// I014 +message PartialParsingExceptionProcessingFile { + string file = 1; } -// I008 -message ManifestChecked { +message PartialParsingExceptionProcessingFileMsg { EventInfo info = 1; + PartialParsingExceptionProcessingFile data = 2; } -// I009 -message ManifestFlatGraphBuilt { - EventInfo info = 1; +// I016 +message PartialParsingException { + map exc_info = 1; } -// I010 -message ParseCmdPerfInfoPath { +message PartialParsingExceptionMsg { EventInfo info = 1; - string path = 2; + PartialParsingException data = 2; } -// I011 -message GenericTestFileParse { - EventInfo info = 1; - string path = 2; +// I017 +message PartialParsingSkipParsing { } -// I012 -message MacroFileParse { - EventInfo info = 1; - string path = 2; -} - -// Skipping I013 - -// I014 -message PartialParsingExceptionProcessingFile { - EventInfo info = 1; - string file = 2; -} - -// I016 -message PartialParsingException { - EventInfo info = 1; - map exc_info = 2; -} - -// I017 -message PartialParsingSkipParsing { +message PartialParsingSkipParsingMsg { EventInfo info = 1; + PartialParsingSkipParsing data = 2; } @@ -678,8 +972,12 @@ message PartialParsingSkipParsing { // I024 message UnableToPartialParse { + string reason = 1; +} + +message UnableToPartialParseMsg { EventInfo info = 1; - string reason = 2; + UnableToPartialParse data = 2; } // Skipped I025, I026, I027 @@ -687,15 +985,23 @@ message UnableToPartialParse { // I028 message PartialParsingNotEnabled { +} + +message PartialParsingNotEnabledMsg { EventInfo info = 1; + PartialParsingNotEnabled data = 2; } // I029 message ParsedFileLoadFailed { + string path = 1; + string exc = 2; + string exc_info = 3; +} + +message ParsedFileLoadFailedMsg { EventInfo info = 1; - string path = 2; - string exc = 3; - string exc_info = 4; + ParsedFileLoadFailed data = 2; } // Skipping I030 @@ -703,406 +1009,646 @@ message ParsedFileLoadFailed { // I031 message StaticParserCausedJinjaRendering { + string path = 1; +} + +message StaticParserCausedJinjaRenderingMsg { EventInfo info = 1; - string path = 2; + StaticParserCausedJinjaRendering data = 2; } // I032 message UsingExperimentalParser { + string path = 1; +} + +message UsingExperimentalParserMsg { EventInfo info = 1; - string path = 2; + UsingExperimentalParser data = 2; } // I033 message SampleFullJinjaRendering { + string path = 1; +} + +message SampleFullJinjaRenderingMsg { EventInfo info = 1; - string path = 2; + SampleFullJinjaRendering data = 2; } // I034 message StaticParserFallbackJinjaRendering { + string path = 1; +} + +message StaticParserFallbackJinjaRenderingMsg { EventInfo info = 1; - string path = 2; + StaticParserFallbackJinjaRendering data = 2; } // I035 message StaticParsingMacroOverrideDetected { + string path = 1; +} + +message StaticParsingMacroOverrideDetectedMsg { EventInfo info = 1; - string path = 2; + StaticParsingMacroOverrideDetected data = 2; } // I036 message StaticParserSuccess { + string path = 1; +} + +message StaticParserSuccessMsg { EventInfo info = 1; - string path = 2; + StaticParserSuccess data = 2; } // I037 message StaticParserFailure { + string path = 1; +} + +message StaticParserFailureMsg { EventInfo info = 1; - string path = 2; + StaticParserFailure data = 2; } // I038 message ExperimentalParserSuccess { + string path = 1; +} + +message ExperimentalParserSuccessMsg { EventInfo info = 1; - string path = 2; + ExperimentalParserSuccess data = 2; } // I039 message ExperimentalParserFailure { + string path = 1; +} + +message ExperimentalParserFailureMsg { EventInfo info = 1; - string path = 2; + ExperimentalParserFailure data = 2; } // I040 message PartialParsingEnabled { + int32 deleted = 1; + int32 added = 2; + int32 changed = 3; +} + +message PartialParsingEnabledMsg { EventInfo info = 1; - int32 deleted = 2; - int32 added = 3; - int32 changed = 4; + PartialParsingEnabled data = 2; } // I041 message PartialParsingFile { + string file_id = 1; + string operation = 2; +} + +message PartialParsingFileMsg { EventInfo info = 1; - string file_id = 2; - string operation = 3; + PartialParsingFile data = 2; } // Skipped I042, I043, I044, I045, I046, I047, I048, I049 // I050 message InvalidDisabledTargetInTestNode { + string resource_type_title = 1; + string unique_id = 2; + string original_file_path = 3; + string target_kind = 4; + string target_name = 5; + string target_package = 6; +} + +message InvalidDisabledTargetInTestNodeMsg { EventInfo info = 1; - string resource_type_title = 2; - string unique_id = 3; - string original_file_path = 4; - string target_kind = 5; - string target_name = 6; - string target_package = 7; + InvalidDisabledTargetInTestNode data = 2; } // I051 message UnusedResourceConfigPath { + repeated string unused_config_paths = 1; +} + +message UnusedResourceConfigPathMsg { EventInfo info = 1; - repeated string unused_config_paths = 2; + UnusedResourceConfigPath data = 2; } // I052 message SeedIncreased { + string package_name = 1; + string name = 2; +} + +message SeedIncreasedMsg { EventInfo info = 1; - string package_name = 2; - string name = 3; + SeedIncreased data = 2; } // I053 message SeedExceedsLimitSamePath { + string package_name = 1; + string name = 2; +} + +message SeedExceedsLimitSamePathMsg { EventInfo info = 1; - string package_name = 2; - string name = 3; + SeedExceedsLimitSamePath data = 2; } // I054 message SeedExceedsLimitAndPathChanged { + string package_name = 1; + string name = 2; +} + +message SeedExceedsLimitAndPathChangedMsg { EventInfo info = 1; - string package_name = 2; - string name = 3; + SeedExceedsLimitAndPathChanged data = 2; } // I055 message SeedExceedsLimitChecksumChanged { + string package_name = 1; + string name = 2; + string checksum_name = 3; +} + +message SeedExceedsLimitChecksumChangedMsg { EventInfo info = 1; - string package_name = 2; - string name = 3; - string checksum_name = 4; + SeedExceedsLimitChecksumChanged data = 2; } // I056 message UnusedTables { + repeated string unused_tables = 1; +} + +message UnusedTablesMsg { EventInfo info = 1; - repeated string unused_tables = 2; + UnusedTables data = 2; } // I057 message WrongResourceSchemaFile { + string patch_name = 1; + string resource_type = 2; + string plural_resource_type = 3; + string yaml_key = 4; + string file_path = 5; +} + +message WrongResourceSchemaFileMsg { EventInfo info = 1; - string patch_name = 2; - string resource_type = 3; - string plural_resource_type = 4; - string yaml_key = 5; - string file_path = 6; + WrongResourceSchemaFile data = 2; } // I058 message NoNodeForYamlKey { + string patch_name = 1; + string yaml_key = 2; + string file_path = 3; +} + +message NoNodeForYamlKeyMsg { EventInfo info = 1; - string patch_name = 2; - string yaml_key = 3; - string file_path = 4; + NoNodeForYamlKey data = 2; } // I059 -message MacroPatchNotFound { +message MacroNotFoundForPatch { + string patch_name = 1; +} + +message MacroNotFoundForPatchMsg { EventInfo info = 1; - string patch_name = 2; + MacroNotFoundForPatch data = 2; } // I060 message NodeNotFoundOrDisabled { + string original_file_path = 1; + string unique_id = 2; + string resource_type_title = 3; + string target_name = 4; + string target_kind = 5; + string target_package = 6; + string disabled = 7; +} + +message NodeNotFoundOrDisabledMsg { EventInfo info = 1; - string original_file_path = 2; - string unique_id = 3; - string resource_type_title = 4; - string target_name = 5; - string target_kind = 6; - string target_package = 7; - string disabled = 8; + NodeNotFoundOrDisabled data = 2; } // I061 message JinjaLogWarning { + NodeInfo node_info = 1; + string msg = 2; +} + +message JinjaLogWarningMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string msg = 3; + JinjaLogWarning data = 2; } // M - Deps generation // M001 message GitSparseCheckoutSubdirectory { + string subdir = 1; +} + +message GitSparseCheckoutSubdirectoryMsg { EventInfo info = 1; - string subdir = 2; + GitSparseCheckoutSubdirectory data = 2; } // M002 message GitProgressCheckoutRevision { + string revision = 1; +} + +message GitProgressCheckoutRevisionMsg { EventInfo info = 1; - string revision = 2; + GitProgressCheckoutRevision data = 2; } // M003 message GitProgressUpdatingExistingDependency { + string dir = 1; +} + +message GitProgressUpdatingExistingDependencyMsg { EventInfo info = 1; - string dir = 2; + GitProgressUpdatingExistingDependency data = 2; } // M004 message GitProgressPullingNewDependency { + string dir = 1; +} + +message GitProgressPullingNewDependencyMsg { EventInfo info = 1; - string dir = 2; + GitProgressPullingNewDependency data = 2; } // M005 message GitNothingToDo { + string sha = 1; +} + +message GitNothingToDoMsg { EventInfo info = 1; - string sha = 2; + GitNothingToDo data = 2; } // M006 message GitProgressUpdatedCheckoutRange { + string start_sha = 1; + string end_sha = 2; +} + +message GitProgressUpdatedCheckoutRangeMsg { EventInfo info = 1; - string start_sha = 2; - string end_sha = 3; + GitProgressUpdatedCheckoutRange data = 2; } // M007 message GitProgressCheckedOutAt { + string end_sha = 1; +} + +message GitProgressCheckedOutAtMsg { EventInfo info = 1; - string end_sha = 2; + GitProgressCheckedOutAt data = 2; } // M008 message RegistryProgressGETRequest { + string url = 1; +} + +message RegistryProgressGETRequestMsg { EventInfo info = 1; - string url = 2; + RegistryProgressGETRequest data = 2; } // M009 message RegistryProgressGETResponse { + string url = 1; + int32 resp_code = 2; +} + +message RegistryProgressGETResponseMsg { EventInfo info = 1; - string url = 2; - int32 resp_code = 3; + RegistryProgressGETResponse data = 2; } // M010 message SelectorReportInvalidSelector { + string valid_selectors = 1; + string spec_method = 2; + string raw_spec = 3; +} + +message SelectorReportInvalidSelectorMsg { EventInfo info = 1; - string valid_selectors = 2; - string spec_method = 3; - string raw_spec = 4; + SelectorReportInvalidSelector data = 2; } // M011 message JinjaLogInfo { + NodeInfo node_info = 1; + string msg = 2; +} + +message JinjaLogInfoMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string msg = 3; + JinjaLogInfo data = 2; } // M012 message JinjaLogDebug { + NodeInfo node_info = 1; + string msg = 2; +} + +message JinjaLogDebugMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string msg = 3; + JinjaLogDebug data = 2; } // M013 message DepsNoPackagesFound { +} + +message DepsNoPackagesFoundMsg { EventInfo info = 1; + DepsNoPackagesFound data = 2; } // M014 message DepsStartPackageInstall { + string package_name = 1; +} + +message DepsStartPackageInstallMsg { EventInfo info = 1; - string package_name = 2; + DepsStartPackageInstall data = 2; } // M015 message DepsInstallInfo { + string version_name = 1; +} + +message DepsInstallInfoMsg { EventInfo info = 1; - string version_name = 2; + DepsInstallInfo data = 2; } // M016 message DepsUpdateAvailable { + string version_latest = 1; +} + +message DepsUpdateAvailableMsg { EventInfo info = 1; - string version_latest = 2; + DepsUpdateAvailable data = 2; } // M017 message DepsUpToDate { +} + +message DepsUpToDateMsg { EventInfo info = 1; + DepsUpToDate data = 2; } // M018 message DepsListSubdirectory { + string subdirectory = 1; +} + +message DepsListSubdirectoryMsg { EventInfo info = 1; - string subdirectory = 2; + DepsListSubdirectory data = 2; } // M019 message DepsNotifyUpdatesAvailable { + ListOfStrings packages = 1; +} + +message DepsNotifyUpdatesAvailableMsg { EventInfo info = 1; - ListOfStrings packages = 2; + DepsNotifyUpdatesAvailable data = 2; } // M020 message RetryExternalCall { + int32 attempt = 1; + int32 max = 2; +} + +message RetryExternalCallMsg { EventInfo info = 1; - int32 attempt = 2; - int32 max = 3; + RetryExternalCall data = 2; } // M021 message RecordRetryException { + string exc = 1; +} + +message RecordRetryExceptionMsg { EventInfo info = 1; - string exc = 2; + RecordRetryException data = 2; } // M022 message RegistryIndexProgressGETRequest { + string url = 1; +} + +message RegistryIndexProgressGETRequestMsg { EventInfo info = 1; - string url = 2; + RegistryIndexProgressGETRequest data = 2; } // M023 message RegistryIndexProgressGETResponse { + string url = 1; + int32 resp_code = 2; +} + +message RegistryIndexProgressGETResponseMsg { EventInfo info = 1; - string url = 2; - int32 resp_code = 3; + RegistryIndexProgressGETResponse data = 2; } // M024 message RegistryResponseUnexpectedType { + string response = 1; +} + +message RegistryResponseUnexpectedTypeMsg { EventInfo info = 1; - string response = 2; + RegistryResponseUnexpectedType data = 2; } // M025 message RegistryResponseMissingTopKeys { + string response = 1; +} + +message RegistryResponseMissingTopKeysMsg { EventInfo info = 1; - string response = 2; + RegistryResponseMissingTopKeys data = 2; } // M026 message RegistryResponseMissingNestedKeys { + string response = 1; +} + +message RegistryResponseMissingNestedKeysMsg { EventInfo info = 1; - string response = 2; + RegistryResponseMissingNestedKeys data = 2; } // m027 message RegistryResponseExtraNestedKeys { + string response = 1; +} + +message RegistryResponseExtraNestedKeysMsg { EventInfo info = 1; - string response = 2; + RegistryResponseExtraNestedKeys data = 2; } // M028 message DepsSetDownloadDirectory { + string path = 1; +} + +message DepsSetDownloadDirectoryMsg { EventInfo info = 1; - string path = 2; + DepsSetDownloadDirectory data = 2; } // M029 message DepsUnpinned { + string revision = 1; + string git = 2; +} + +message DepsUnpinnedMsg { EventInfo info = 1; - string revision = 2; - string git = 3; + DepsUnpinned data = 2; } // M030 message NoNodesForSelectionCriteria { + string spec_raw = 1; +} + +message NoNodesForSelectionCriteriaMsg { EventInfo info = 1; - string spec_raw = 2; + NoNodesForSelectionCriteria data = 2; } // Q - Node execution // Q001 message RunningOperationCaughtError { + string exc = 1; +} + +message RunningOperationCaughtErrorMsg { EventInfo info = 1; - string exc = 2; + RunningOperationCaughtError data = 2; } // Q002 message CompileComplete { +} + +message CompileCompleteMsg { EventInfo info = 1; + CompileComplete data = 2; } // Q003 message FreshnessCheckComplete { +} + +message FreshnessCheckCompleteMsg { EventInfo info = 1; + FreshnessCheckComplete data = 2; } // Q004 message SeedHeader { + string header = 1; +} + +message SeedHeaderMsg { EventInfo info = 1; - string header = 2; + SeedHeader data = 2; } // Q005 message SeedHeaderSeparator { + int32 len_header = 1; +} + +message SeedHeaderSeparatorMsg { EventInfo info = 1; - int32 len_header = 2; + SeedHeaderSeparator data = 2; } // Q006 message SQLRunnerException { + string exc = 1; + string exc_info = 2; +} + +message SQLRunnerExceptionMsg { EventInfo info = 1; - string exc = 2; - string exc_info = 3; + SQLRunnerException data = 2; } // Q007 message LogTestResult { + NodeInfo node_info = 1; + string name = 2; + string status = 3; + int32 index = 4; + int32 num_models = 5; + float execution_time = 6; + int32 num_failures = 7; +} + +message LogTestResultMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string name = 3; - string status = 4; - int32 index = 5; - int32 num_models = 6; - float execution_time = 7; - int32 num_failures = 8; + LogTestResult data = 2; } @@ -1111,63 +1657,83 @@ message LogTestResult { // Q011 message LogStartLine { + NodeInfo node_info = 1; + string description = 2; + int32 index = 3; + int32 total = 4; +} + +message LogStartLineMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - int32 index = 4; - int32 total = 5; + LogStartLine data = 2; } // Q012 message LogModelResult { + NodeInfo node_info = 1; + string description = 2; + string status = 3; + int32 index = 4; + int32 total = 5; + int32 execution_time = 6; +} + +message LogModelResultMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - int32 execution_time = 7; + LogModelResult data = 2; } // skipped Q013, Q014 // Q015 message LogSnapshotResult { + NodeInfo node_info = 1; + string description = 2; + string status = 3; + int32 index = 4; + int32 total = 5; + float execution_time = 6; + map cfg = 7; +} + +message LogSnapshotResultMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string description = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; - map cfg = 8; + LogSnapshotResult data = 2; } // Q016 message LogSeedResult { + NodeInfo node_info = 1; + string status = 2; + string result_message = 3; + int32 index = 4; + int32 total = 5; + float execution_time = 6; + string schema = 7; + string relation = 8; +} + +message LogSeedResultMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string status = 3; - string result_message = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; - string schema = 8; - string relation = 9; + LogSeedResult data = 2; } // Skipped Q017 // Q018 message LogFreshnessResult { + string status = 1; + NodeInfo node_info = 2; + int32 index = 3; + int32 total = 4; + float execution_time = 5; + string source_name = 6; + string table_name = 7; +} + +message LogFreshnessResultMsg { EventInfo info = 1; - string status = 2; - NodeInfo node_info = 3; - int32 index = 4; - int32 total = 5; - float execution_time = 6; - string source_name = 7; - string table_name = 8; + LogFreshnessResult data = 2; } @@ -1176,117 +1742,181 @@ message LogFreshnessResult { // Q022 message LogCancelLine { + string conn_name = 1; +} + +message LogCancelLineMsg { EventInfo info = 1; - string conn_name = 2; + LogCancelLine data = 2; } // Q023 message DefaultSelector { + string name = 1; +} + +message DefaultSelectorMsg { EventInfo info = 1; - string name = 2; + DefaultSelector data = 2; } // Q024 message NodeStart { + NodeInfo node_info = 1; +} + +message NodeStartMsg { EventInfo info = 1; - NodeInfo node_info = 2; + NodeStart data = 2; } // Q025 message NodeFinished { + NodeInfo node_info = 1; + RunResultMsg run_result = 2; +} + +message NodeFinishedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - RunResultMsg run_result = 4; + NodeFinished data = 2; } // Q026 message QueryCancelationUnsupported { + string type = 1; +} + +message QueryCancelationUnsupportedMsg { EventInfo info = 1; - string type = 2; + QueryCancelationUnsupported data = 2; } // Q027 message ConcurrencyLine { + int32 num_threads = 1; + string target_name = 2; + int32 node_count = 3; +} + +message ConcurrencyLineMsg { EventInfo info = 1; - int32 num_threads = 2; - string target_name = 3; - int32 node_count = 4; + ConcurrencyLine data = 2; } // Skipped Q028 // Q029 message WritingInjectedSQLForNode { + NodeInfo node_info = 1; +} + +message WritingInjectedSQLForNodeMsg { EventInfo info = 1; - NodeInfo node_info = 2; + WritingInjectedSQLForNode data = 2; } // Q030 message NodeCompiling { + NodeInfo node_info = 1; +} + +message NodeCompilingMsg { EventInfo info = 1; - NodeInfo node_info = 2; + NodeCompiling data = 2; } // Q031 message NodeExecuting { + NodeInfo node_info = 1; +} + +message NodeExecutingMsg { EventInfo info = 1; - NodeInfo node_info = 2; + NodeExecuting data = 2; } // Q032 message LogHookStartLine { + NodeInfo node_info = 1; + string statement = 2; + int32 index = 3; + int32 total = 4; +} + +message LogHookStartLineMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string statement = 3; - int32 index = 4; - int32 total = 5; + LogHookStartLine data = 2; } // Q033 message LogHookEndLine { + NodeInfo node_info = 1; + string statement = 2; + string status = 3; + int32 index = 4; + int32 total = 5; + float execution_time = 6; +} + +message LogHookEndLineMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string statement = 3; - string status = 4; - int32 index = 5; - int32 total = 6; - float execution_time = 7; + LogHookEndLine data = 2; } // Q034 message SkippingDetails { + NodeInfo node_info = 1; + string resource_type = 2; + string schema = 3; + string node_name = 4; + int32 index = 5; + int32 total = 6; +} + +message SkippingDetailsMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string resource_type = 3; - string schema = 4; - string node_name = 5; - int32 index = 6; - int32 total = 7; + SkippingDetails data = 2; } // Q035 message NothingToDo { +} + +message NothingToDoMsg { EventInfo info = 1; + NothingToDo data = 2; } // Q036 message RunningOperationUncaughtError { + string exc = 1; +} + +message RunningOperationUncaughtErrorMsg { EventInfo info = 1; - string exc = 2; + RunningOperationUncaughtError data = 2; } // Q037 message EndRunResult { + repeated RunResultMsg results = 1; + float elapsed_time = 2; + google.protobuf.Timestamp generated_at = 3; + bool success = 4; +} + +message EndRunResultMsg { EventInfo info = 1; - repeated RunResultMsg results = 2; - float elapsed_time = 3; - google.protobuf.Timestamp generated_at = 4; - bool success = 5; + EndRunResult data = 2; } // Q038 message NoNodesSelected { +} + +message NoNodesSelectedMsg { EventInfo info = 1; + NoNodesSelected data = 2; } // W - Node testing @@ -1295,330 +1925,538 @@ message NoNodesSelected { // W002 message CatchableExceptionOnRun { + NodeInfo node_info = 1; + string exc = 2; + string exc_info = 3; +} + +message CatchableExceptionOnRunMsg { EventInfo info = 1; - NodeInfo node_info = 2; - string exc = 3; - string exc_info = 4; + CatchableExceptionOnRun data = 2; } // W003 message InternalExceptionOnRun { + string build_path = 1; + string exc = 2; +} + +message InternalExceptionOnRunMsg { EventInfo info = 1; - string build_path = 2; - string exc = 3; + InternalExceptionOnRun data = 2; } // W004 message GenericExceptionOnRun { + string build_path = 1; + string unique_id = 2; + string exc = 3; +} + +message GenericExceptionOnRunMsg { EventInfo info = 1; - string build_path = 2; - string unique_id = 3; - string exc = 4; + GenericExceptionOnRun data = 2; } // W005 message NodeConnectionReleaseError { + string node_name = 1; + string exc = 2; + string exc_info = 3; +} + +message NodeConnectionReleaseErrorMsg { EventInfo info = 1; - string node_name = 2; - string exc = 3; - string exc_info = 4; + NodeConnectionReleaseError data = 2; } // W006 message FoundStats { + string stat_line = 1; +} + +message FoundStatsMsg { EventInfo info = 1; - string stat_line = 2; + FoundStats data = 2; } // Z - Misc // Z001 message MainKeyboardInterrupt { +} + +message MainKeyboardInterruptMsg { EventInfo info = 1; + MainKeyboardInterrupt data = 2; } // Z002 message MainEncounteredError { + string exc = 1; +} + +message MainEncounteredErrorMsg { EventInfo info = 1; - string exc = 2; + MainEncounteredError data = 2; } // Z003 message MainStackTrace { + string stack_trace = 1; +} + +message MainStackTraceMsg { EventInfo info = 1; - string stack_trace = 2; + MainStackTrace data = 2; } // Z004 message SystemErrorRetrievingModTime { + string path = 1; +} + +message SystemErrorRetrievingModTimeMsg { EventInfo info = 1; - string path = 2; + SystemErrorRetrievingModTime data = 2; } // Z005 message SystemCouldNotWrite { + string path = 1; + string reason = 2; + string exc = 3; +} + +message SystemCouldNotWriteMsg { EventInfo info = 1; - string path = 2; - string reason = 3; - string exc = 4; + SystemCouldNotWrite data = 2; } // Z006 message SystemExecutingCmd { + repeated string cmd = 1; +} + +message SystemExecutingCmdMsg { EventInfo info = 1; - repeated string cmd = 2; + SystemExecutingCmd data = 2; } // Z007 +message SystemStdOut{ + bytes bmsg = 1; +} + message SystemStdOutMsg { EventInfo info = 1; - bytes bmsg = 2; + SystemStdOut data = 2; } // Z008 +message SystemStdErr { + bytes bmsg = 1; +} + message SystemStdErrMsg { EventInfo info = 1; - bytes bmsg = 2; + SystemStdErr data = 2; } // Z009 message SystemReportReturnCode { + int32 returncode = 1; +} + +message SystemReportReturnCodeMsg { EventInfo info = 1; - int32 returncode = 2; + SystemReportReturnCode data = 2; } // Z010 message TimingInfoCollected { + NodeInfo node_info = 1; + TimingInfoMsg timing_info = 2; +} + +message TimingInfoCollectedMsg { EventInfo info = 1; - NodeInfo node_info = 2; - TimingInfoMsg timing_info = 3; + TimingInfoCollected data = 2; } // Z011 message LogDebugStackTrace { + string exc_info = 1; +} + +message LogDebugStackTraceMsg { EventInfo info = 1; - string exc_info = 2; + LogDebugStackTrace data = 2; } // Z012 message CheckCleanPath { + string path = 1; +} + +message CheckCleanPathMsg { EventInfo info = 1; - string path = 2; + CheckCleanPath data = 2; } // Z013 message ConfirmCleanPath { + string path = 1; +} + +message ConfirmCleanPathMsg { EventInfo info = 1; - string path = 2; + ConfirmCleanPath data = 2; } // Z014 message ProtectedCleanPath { + string path = 1; +} + +message ProtectedCleanPathMsg { EventInfo info = 1; - string path = 2; + ProtectedCleanPath data = 2; } // Z015 message FinishedCleanPaths { +} + +message FinishedCleanPathsMsg { EventInfo info = 1; + FinishedCleanPaths data = 2; } // Z016 message OpenCommand { + string open_cmd = 1; + string profiles_dir = 2; +} + +message OpenCommandMsg { EventInfo info = 1; - string open_cmd = 2; - string profiles_dir = 3; + OpenCommand data = 2; } // Z017 message EmptyLine { +} + +message EmptyLineMsg { EventInfo info = 1; + EmptyLine data = 2; } // Z018 message ServingDocsPort { + string address = 1; + int32 port = 2; +} + +message ServingDocsPortMsg { EventInfo info = 1; - string address = 2; - int32 port = 3; + ServingDocsPort data = 2; } // Z019 message ServingDocsAccessInfo { + string port = 1; +} + +message ServingDocsAccessInfoMsg { EventInfo info = 1; - string port = 2; + ServingDocsAccessInfo data = 2; } // Z020 message ServingDocsExitInfo { +} + +message ServingDocsExitInfoMsg { EventInfo info = 1; + ServingDocsExitInfo data = 2; } // Z021 message RunResultWarning { + string resource_type = 1; + string node_name = 2; + string path = 3; +} + +message RunResultWarningMsg { EventInfo info = 1; - string resource_type = 2; - string node_name = 3; - string path = 4; + RunResultWarning data = 2; } // Z022 message RunResultFailure { + string resource_type = 1; + string node_name = 2; + string path = 3; +} + +message RunResultFailureMsg { EventInfo info = 1; - string resource_type = 2; - string node_name = 3; - string path = 4; + RunResultFailure data = 2; } // Z023 message StatsLine { + map stats = 1; +} + +message StatsLineMsg { EventInfo info = 1; - map stats = 2; + StatsLine data = 2; } // Z024 message RunResultError { + string msg = 1; +} + +message RunResultErrorMsg { EventInfo info = 1; - string msg = 2; + RunResultError data = 2; } // Z025 message RunResultErrorNoMessage { + string status = 1; +} + +message RunResultErrorNoMessageMsg { EventInfo info = 1; - string status = 2; + RunResultErrorNoMessage data = 2; } // Z026 message SQLCompiledPath { + string path = 1; +} + +message SQLCompiledPathMsg { EventInfo info = 1; - string path = 2; + SQLCompiledPath data = 2; } // Z027 message CheckNodeTestFailure { + string relation_name = 1; +} + +message CheckNodeTestFailureMsg { EventInfo info = 1; - string relation_name = 2; + CheckNodeTestFailure data = 2; } // Z028 message FirstRunResultError { + string msg = 1; +} + +message FirstRunResultErrorMsg { EventInfo info = 1; - string msg = 2; + FirstRunResultError data = 2; } // Z029 message AfterFirstRunResultError { + string msg = 1; +} + +message AfterFirstRunResultErrorMsg { EventInfo info = 1; - string msg = 2; + AfterFirstRunResultError data = 2; } // Z030 message EndOfRunSummary { + int32 num_errors = 1; + int32 num_warnings = 2; + bool keyboard_interrupt = 3; +} + +message EndOfRunSummaryMsg { EventInfo info = 1; - int32 num_errors = 2; - int32 num_warnings = 3; - bool keyboard_interrupt = 4; + EndOfRunSummary data = 2; } // Skipped Z031, Z032, Z033 // Z034 message LogSkipBecauseError { + string schema = 1; + string relation = 2; + int32 index = 3; + int32 total = 4; +} + +message LogSkipBecauseErrorMsg { EventInfo info = 1; - string schema = 2; - string relation = 3; - int32 index = 4; - int32 total = 5; + LogSkipBecauseError data = 2; } // Z036 message EnsureGitInstalled { +} + +message EnsureGitInstalledMsg { EventInfo info = 1; + EnsureGitInstalled data = 2; } // Z037 message DepsCreatingLocalSymlink { +} + +message DepsCreatingLocalSymlinkMsg { EventInfo info = 1; + DepsCreatingLocalSymlink data = 2; } // Z038 message DepsSymlinkNotAvailable { +} + +message DepsSymlinkNotAvailableMsg { EventInfo info = 1; + DepsSymlinkNotAvailable data = 2; } // Z039 message DisableTracking { +} + +message DisableTrackingMsg { EventInfo info = 1; + DisableTracking data = 2; } // Z040 message SendingEvent { + string kwargs = 1; +} + +message SendingEventMsg { EventInfo info = 1; - string kwargs = 2; + SendingEvent data = 2; } // Z041 message SendEventFailure { +} + +message SendEventFailureMsg { EventInfo info = 1; + SendEventFailure data = 2; } // Z042 message FlushEvents { +} + +message FlushEventsMsg { EventInfo info = 1; + FlushEvents data = 2; } // Z043 message FlushEventsFailure { +} + +message FlushEventsFailureMsg { EventInfo info = 1; + FlushEventsFailure data = 2; } // Z044 message TrackingInitializeFailure { + string exc_info = 1; +} + +message TrackingInitializeFailureMsg { EventInfo info = 1; - string exc_info = 2; + TrackingInitializeFailure data = 2; } // Skipped Z045 // Z046 message RunResultWarningMessage { + string msg = 1; +} + +message RunResultWarningMessageMsg { EventInfo info = 1; - string msg = 2; + RunResultWarningMessage data = 2; } // T - Integration tests // T001 message IntegrationTestInfo { + string msg = 1; +} + +message IntegrationTestInfoMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestInfo data = 2; } // T002 message IntegrationTestDebug { + string msg = 1; +} + +message IntegrationTestDebugMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestDebug data = 2; } // T003 message IntegrationTestWarn { + string msg = 1; +} + +message IntegrationTestWarnMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestWarn data = 2; } // T004 message IntegrationTestError { + string msg = 1; +} + +message IntegrationTestErrorMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestError data = 2; } // T005 message IntegrationTestException { + string msg = 1; +} + +message IntegrationTestExceptionMsg { EventInfo info = 1; - string msg = 2; + IntegrationTestException data = 2; } // T006 message UnitTestInfo { + string msg = 1; +} + +message UnitTestInfoMsg { EventInfo info = 1; - string msg = 2; + UnitTestInfo data = 2; } diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 6a597184cd7..e837dc78387 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -11,6 +11,7 @@ Cache, AdapterEventStringFunctor, EventStringFunctor, + EventLevel, ) from dbt.events.format import format_fancy_output_line, pluralize @@ -755,7 +756,7 @@ def code(self): return "E036" def message(self): - pass + return f"{self.exc_info}" @dataclass @@ -867,93 +868,15 @@ def message(self) -> str: @dataclass -class ParseCmdStart(InfoLevel, pt.ParseCmdStart): +class ParseCmdOut(InfoLevel, pt.ParseCmdOut): def code(self): return "I001" def message(self) -> str: - return "Start parsing." - - -@dataclass -class ParseCmdCompiling(InfoLevel, pt.ParseCmdCompiling): - def code(self): - return "I002" - - def message(self) -> str: - return "Compiling." - - -@dataclass -class ParseCmdWritingManifest(InfoLevel, pt.ParseCmdWritingManifest): - def code(self): - return "I003" - - def message(self) -> str: - return "Writing manifest." - - -@dataclass -class ParseCmdDone(InfoLevel, pt.ParseCmdDone): - def code(self): - return "I004" - - def message(self) -> str: - return "Done." - - -@dataclass -class ManifestDependenciesLoaded(InfoLevel, pt.ManifestDependenciesLoaded): - def code(self): - return "I005" - - def message(self) -> str: - return "Dependencies loaded" - - -@dataclass -class ManifestLoaderCreated(InfoLevel, pt.ManifestLoaderCreated): - def code(self): - return "I006" - - def message(self) -> str: - return "ManifestLoader created" - - -@dataclass -class ManifestLoaded(InfoLevel, pt.ManifestLoaded): - def code(self): - return "I007" - - def message(self) -> str: - return "Manifest loaded" - - -@dataclass -class ManifestChecked(InfoLevel, pt.ManifestChecked): - def code(self): - return "I008" - - def message(self) -> str: - return "Manifest checked" - - -@dataclass -class ManifestFlatGraphBuilt(InfoLevel, pt.ManifestFlatGraphBuilt): - def code(self): - return "I009" - - def message(self) -> str: - return "Flat graph built" + return self.msg -@dataclass -class ParseCmdPerfInfoPath(InfoLevel, pt.ParseCmdPerfInfoPath): - def code(self): - return "I010" - - def message(self) -> str: - return f"Performance info: {self.path}" +# Skipping I002, I003, I004, I005, I006, I007, I008, I009, I010 @dataclass @@ -1290,7 +1213,7 @@ def message(self) -> str: @dataclass -class MacroPatchNotFound(WarnLevel, pt.MacroPatchNotFound): +class MacroNotFoundForPatch(WarnLevel, pt.MacroNotFoundForPatch): def code(self): return "I059" @@ -1723,17 +1646,16 @@ def message(self) -> str: @classmethod def status_to_level(cls, status): # The statuses come from TestStatus - # TODO should this return EventLevel enum instead? level_lookup = { - "fail": "error", - "pass": "info", - "warn": "warn", - "error": "error", + "fail": EventLevel.ERROR, + "pass": EventLevel.INFO, + "warn": EventLevel.WARN, + "error": EventLevel.ERROR, } if status in level_lookup: return level_lookup[status] else: - return "info" + return EventLevel.INFO # Skipped Q008, Q009, Q010 @@ -1855,15 +1777,15 @@ def status_to_level(cls, status): # The statuses come from FreshnessStatus # TODO should this return EventLevel enum instead? level_lookup = { - "runtime error": "error", - "pass": "info", - "warn": "warn", - "error": "error", + "runtime error": EventLevel.ERROR, + "pass": EventLevel.INFO, + "warn": EventLevel.WARN, + "error": EventLevel.ERROR, } if status in level_lookup: return level_lookup[status] else: - return "info" + return EventLevel.INFO # Skipped Q019, Q020, Q021 @@ -2164,7 +2086,7 @@ def message(self) -> str: @dataclass -class SystemStdOutMsg(DebugLevel, pt.SystemStdOutMsg): +class SystemStdOut(DebugLevel, pt.SystemStdOut): def code(self): return "Z007" @@ -2173,7 +2095,7 @@ def message(self) -> str: @dataclass -class SystemStdErrMsg(DebugLevel, pt.SystemStdErrMsg): +class SystemStdErr(DebugLevel, pt.SystemStdErr): def code(self): return "Z008" diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 5e81c83fdfb..32bfbb559a1 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -68,7 +68,7 @@ YamlParseListFailure, ) from dbt.events.functions import warn_or_error -from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroPatchNotFound +from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroNotFoundForPatch from dbt.node_types import NodeType from dbt.parser.base import SimpleParser from dbt.parser.search import FileBlock @@ -957,7 +957,7 @@ def parse_patch(self, block: TargetBlock[UnparsedMacroUpdate], refs: ParserRef) unique_id = f"macro.{patch.package_name}.{patch.name}" macro = self.manifest.macros.get(unique_id) if not macro: - warn_or_error(MacroPatchNotFound(patch_name=patch.name)) + warn_or_error(MacroNotFoundForPatch(patch_name=patch.name)) return if macro.patch_path: package_name, existing_file_path = macro.patch_path.split("://") diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index 704368cf24f..c9c8e5051fa 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -16,7 +16,7 @@ FreshnessStatus, ) from dbt.exceptions import RuntimeException, InternalException -from dbt.events.functions import fire_event, info +from dbt.events.functions import fire_event from dbt.events.types import ( FreshnessCheckComplete, LogStartLine, @@ -56,7 +56,6 @@ def after_execute(self, result): level = LogFreshnessResult.status_to_level(str(result.status)) fire_event( LogFreshnessResult( - info=info(level=level), status=result.status, source_name=source_name, table_name=table_name, @@ -64,7 +63,8 @@ def after_execute(self, result): total=self.num_nodes, execution_time=result.execution_time, node_info=self.node.node_info, - ) + ), + level=level, ) def error_result(self, node, message, start_time, timing_info): diff --git a/core/dbt/task/parse.py b/core/dbt/task/parse.py index 5460bf0f3d0..8ce2b9c5b2c 100644 --- a/core/dbt/task/parse.py +++ b/core/dbt/task/parse.py @@ -11,18 +11,7 @@ from dbt.parser.manifest import Manifest, ManifestLoader, _check_manifest from dbt.logger import DbtProcessState from dbt.clients.system import write_file -from dbt.events.types import ( - ManifestDependenciesLoaded, - ManifestLoaderCreated, - ManifestLoaded, - ManifestChecked, - ManifestFlatGraphBuilt, - ParseCmdStart, - ParseCmdCompiling, - ParseCmdWritingManifest, - ParseCmdDone, - ParseCmdPerfInfoPath, -) +from dbt.events.types import ParseCmdOut from dbt.events.functions import fire_event from dbt.graph import Graph import time @@ -50,7 +39,7 @@ def write_manifest(self): def write_perf_info(self): path = os.path.join(self.config.target_path, PERF_INFO_FILE_NAME) write_file(path, json.dumps(self.loader._perf_info, cls=dbt.utils.JSONEncoder, indent=4)) - fire_event(ParseCmdPerfInfoPath(path=path)) + fire_event(ParseCmdOut(msg=f"Performance info: {path}")) # This method takes code that normally exists in other files # and pulls it in here, to simplify logging and make the @@ -68,20 +57,20 @@ def get_full_manifest(self): with PARSING_STATE: start_load_all = time.perf_counter() projects = root_config.load_dependencies() - fire_event(ManifestDependenciesLoaded()) + fire_event(ParseCmdOut(msg="Dependencies loaded")) loader = ManifestLoader(root_config, projects, macro_hook) - fire_event(ManifestLoaderCreated()) + fire_event(ParseCmdOut(msg="ManifestLoader created")) manifest = loader.load() - fire_event(ManifestLoaded()) + fire_event(ParseCmdOut(msg="Manifest loaded")) _check_manifest(manifest, root_config) - fire_event(ManifestChecked()) + fire_event(ParseCmdOut(msg="Manifest checked")) manifest.build_flat_graph() - fire_event(ManifestFlatGraphBuilt()) + fire_event(ParseCmdOut(msg="Flat graph built")) loader._perf_info.load_all_elapsed = time.perf_counter() - start_load_all self.loader = loader self.manifest = manifest - fire_event(ManifestLoaded()) + fire_event(ParseCmdOut(msg="Manifest finished loading")) def compile_manifest(self): adapter = get_adapter(self.config) @@ -89,14 +78,14 @@ def compile_manifest(self): self.graph = compiler.compile(self.manifest) def run(self): - fire_event(ParseCmdStart()) + fire_event(ParseCmdOut(msg="Start parsing.")) self.get_full_manifest() if self.args.compile: - fire_event(ParseCmdCompiling()) + fire_event(ParseCmdOut(msg="Compiling.")) self.compile_manifest() if self.args.write_manifest: - fire_event(ParseCmdWritingManifest()) + fire_event(ParseCmdOut(msg="Writing manifest.")) self.write_manifest() self.write_perf_info() - fire_event(ParseCmdDone()) + fire_event(ParseCmdOut(msg="Done.")) diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index bc8f9a2de75..2d800d24b09 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -27,7 +27,7 @@ RuntimeException, ValidationException, ) -from dbt.events.functions import fire_event, get_invocation_id, info +from dbt.events.functions import fire_event, get_invocation_id from dbt.events.types import ( DatabaseErrorRunningHook, EmptyLine, @@ -38,6 +38,7 @@ LogHookEndLine, LogHookStartLine, ) +from dbt.events.base_types import EventLevel from dbt.logger import ( TextOnly, HookMetadata, @@ -186,10 +187,10 @@ def print_result_line(self, result): description = self.describe_node() if result.status == NodeStatus.Error: status = result.status - level = "error" + level = EventLevel.ERROR else: status = result.message - level = "info" + level = EventLevel.INFO fire_event( LogModelResult( description=description, @@ -198,8 +199,8 @@ def print_result_line(self, result): total=self.num_nodes, execution_time=result.execution_time, node_info=self.node.node_info, - info=info(level=level), - ) + ), + level=level, ) def before_execute(self): diff --git a/core/dbt/task/seed.py b/core/dbt/task/seed.py index 5c922a5ba90..564a55b1577 100644 --- a/core/dbt/task/seed.py +++ b/core/dbt/task/seed.py @@ -9,7 +9,7 @@ from dbt.exceptions import InternalException from dbt.graph import ResourceTypeSelector from dbt.logger import TextOnly -from dbt.events.functions import fire_event, info +from dbt.events.functions import fire_event from dbt.events.types import ( SeedHeader, SeedHeaderSeparator, @@ -17,6 +17,7 @@ LogSeedResult, LogStartLine, ) +from dbt.events.base_types import EventLevel from dbt.node_types import NodeType from dbt.contracts.results import NodeStatus @@ -46,10 +47,9 @@ def compile(self, manifest): def print_result_line(self, result): model = result.node - level = "error" if result.status == NodeStatus.Error else "info" + level = EventLevel.ERROR if result.status == NodeStatus.Error else EventLevel.INFO fire_event( LogSeedResult( - info=info(level=level), status=result.status, result_message=result.message, index=self.node_index, @@ -58,7 +58,8 @@ def print_result_line(self, result): schema=self.node.schema, relation=model.alias, node_info=model.node_info, - ) + ), + level=level, ) diff --git a/core/dbt/task/snapshot.py b/core/dbt/task/snapshot.py index 44ccbd88361..8de99864b96 100644 --- a/core/dbt/task/snapshot.py +++ b/core/dbt/task/snapshot.py @@ -1,7 +1,8 @@ from .run import ModelRunner, RunTask from dbt.exceptions import InternalException -from dbt.events.functions import fire_event, info +from dbt.events.functions import fire_event +from dbt.events.base_types import EventLevel from dbt.events.types import LogSnapshotResult from dbt.graph import ResourceTypeSelector from dbt.node_types import NodeType @@ -15,10 +16,9 @@ def describe_node(self): def print_result_line(self, result): model = result.node cfg = model.config.to_dict(omit_none=True) - level = "error" if result.status == NodeStatus.Error else "info" + level = EventLevel.ERROR if result.status == NodeStatus.Error else EventLevel.INFO fire_event( LogSnapshotResult( - info=info(level=level), status=result.status, description=self.get_node_representation(), cfg=cfg, @@ -26,7 +26,8 @@ def print_result_line(self, result): total=self.num_nodes, execution_time=result.execution_time, node_info=model.node_info, - ) + ), + level=level, ) diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index 26d6d46f028..fccdc894ff1 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -16,7 +16,7 @@ from dbt.contracts.results import TestStatus, PrimitiveDict, RunResult from dbt.context.providers import generate_runtime_model_context from dbt.clients.jinja import MacroGenerator -from dbt.events.functions import fire_event, info +from dbt.events.functions import fire_event from dbt.events.types import ( LogTestResult, LogStartLine, @@ -68,14 +68,14 @@ def print_result_line(self, result): fire_event( LogTestResult( name=model.name, - info=info(level=LogTestResult.status_to_level(str(result.status))), status=str(result.status), index=self.node_index, num_models=self.num_nodes, execution_time=result.execution_time, node_info=model.node_info, num_failures=result.failures, - ) + ), + level=LogTestResult.status_to_level(str(result.status)), ) def print_start_line(self): diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 2f4c7b45ca1..9cdc248b7ed 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -1150,8 +1150,8 @@ def test__warn_for_unused_resource_config_paths(self): project.warn_for_unused_resource_config_paths(self.used, []) warn_or_error_patch.assert_called_once() event = warn_or_error_patch.call_args[0][0] - assert event.info.name == 'UnusedResourceConfigPath' - msg = event.info.msg + assert type(event).__name__ == 'UnusedResourceConfigPath' + msg = event.message() expected_msg = "- models.my_test_project.baz" assert expected_msg in msg diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index 0497d5da02a..7532302784f 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -973,8 +973,8 @@ def test_select_state_changed_seed_checksum_path_to_path(manifest, previous_stat assert not search_manifest_using_method(manifest, method, 'modified') warn_or_error_patch.assert_called_once() event = warn_or_error_patch.call_args[0][0] - assert event.info.name == 'SeedExceedsLimitSamePath' - msg = event.info.msg + assert type(event).__name__ == 'SeedExceedsLimitSamePath' + msg = event.message() assert msg.startswith('Found a seed (pkg.seed) >1MB in size') with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') @@ -990,8 +990,8 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_called_once() event = warn_or_error_patch.call_args[0][0] - assert event.info.name == 'SeedIncreased' - msg = event.info.msg + assert type(event).__name__ == 'SeedIncreased' + msg = event.message() assert msg.startswith('Found a seed (pkg.seed) >1MB in size') with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py index b0feea50809..863c40f1e7d 100644 --- a/tests/functional/logging/test_logging.py +++ b/tests/functional/logging/test_logging.py @@ -34,6 +34,7 @@ def test_basic(project, logs_dir): if "[debug]" in log_line: continue log_dct = json.loads(log_line) + log_data = log_dct["data"] log_event = log_dct['info']['name'] if log_event == "NodeStart": node_start = True @@ -41,11 +42,11 @@ def test_basic(project, logs_dir): node_finished = True if node_start and not node_finished: if log_event == 'NodeExecuting': - assert "node_info" in log_dct + assert "node_info" in log_data if log_event == "JinjaLogDebug": - assert "node_info" in log_dct + assert "node_info" in log_data if log_event == "SQLQuery": - assert "node_info" in log_dct + assert "node_info" in log_data if log_event == "TimingInfoCollected": - assert "node_info" in log_dct - assert "timing_info" in log_dct + assert "node_info" in log_data + assert "timing_info" in log_data diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index e37d26ad552..97cd917598a 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -1,7 +1,8 @@ # flake8: noqa from dbt.events.test_types import UnitTestInfo from dbt.events import AdapterLogger -from dbt.events.functions import event_to_json, LOG_VERSION, event_to_dict +from dbt.events.functions import msg_to_json, LOG_VERSION, msg_to_dict +from dbt.events.base_types import msg_from_base_event from dbt.events.types import * from dbt.events.test_types import * @@ -102,43 +103,6 @@ def test_event_codes(self): all_codes.add(code) -def MockNode(): - return ModelNode( - alias="model_one", - name="model_one", - database="dbt", - schema="analytics", - resource_type=NodeType.Model, - unique_id="model.root.model_one", - fqn=["root", "model_one"], - package_name="root", - original_file_path="model_one.sql", - root_path="/usr/src/app", - refs=[], - sources=[], - depends_on=DependsOn(), - config=NodeConfig.from_dict( - { - "enabled": True, - "materialized": "view", - "persist_docs": {}, - "post-hook": [], - "pre-hook": [], - "vars": {}, - "quoting": {}, - "column_types": {}, - "tags": [], - } - ), - tags=[], - path="model_one.sql", - raw_code="", - description="", - columns={}, - checksum=FileHash.from_contents(""), - ) - - sample_values = [ # A - pre-project loading MainReportVersion(version=""), @@ -243,16 +207,7 @@ def MockNode(): HookFinished(stat_line="", execution="", execution_time=0), # I - Project parsing ====================== - ParseCmdStart(), - ParseCmdCompiling(), - ParseCmdWritingManifest(), - ParseCmdDone(), - ManifestDependenciesLoaded(), - ManifestLoaderCreated(), - ManifestLoaded(), - ManifestChecked(), - ManifestFlatGraphBuilt(), - ParseCmdPerfInfoPath(path=""), + ParseCmdOut(msg="testing"), GenericTestFileParse(path=""), MacroFileParse(path=""), PartialParsingExceptionProcessingFile(file=""), @@ -289,7 +244,7 @@ def MockNode(): UnusedTables(unused_tables=[]), WrongResourceSchemaFile(patch_name="", resource_type="", file_path="", plural_resource_type=""), NoNodeForYamlKey(patch_name="", yaml_key="", file_path=""), - MacroPatchNotFound(patch_name=""), + MacroNotFoundForPatch(patch_name=""), NodeNotFoundOrDisabled( original_file_path="", unique_id="", @@ -429,8 +384,8 @@ def MockNode(): SystemErrorRetrievingModTime(path=""), SystemCouldNotWrite(path="", reason="", exc=""), SystemExecutingCmd(cmd=[""]), - SystemStdOutMsg(bmsg=b""), - SystemStdErrMsg(bmsg=b""), + SystemStdOut(bmsg=b""), + SystemStdErr(bmsg=b""), SystemReportReturnCode(returncode=0), TimingInfoCollected(), LogDebugStackTrace(), @@ -499,9 +454,10 @@ def test_all_serializable(self): # if we have everything we need to test, try to serialize everything for event in sample_values: - event_dict = event_to_dict(event) + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) try: - event_json = event_to_json(event) + msg_json = msg_to_json(msg) except Exception as e: raise Exception(f"{event} is not serializable to json. Originating exception: {e}") diff --git a/tests/unit/test_proto_events.py b/tests/unit/test_proto_events.py index d5b070c41e2..68accd74896 100644 --- a/tests/unit/test_proto_events.py +++ b/tests/unit/test_proto_events.py @@ -1,4 +1,3 @@ -import sys from dbt.events.types import ( MainReportVersion, MainReportArgs, @@ -8,8 +7,9 @@ LogStartLine, LogTestResult, ) -from dbt.events.functions import event_to_dict, LOG_VERSION, reset_metadata_vars, info -from dbt.events import proto_types as pl +from dbt.events.functions import msg_to_dict, LOG_VERSION, reset_metadata_vars +from dbt.events import proto_types as pt +from dbt.events.base_types import msg_from_base_event, EventLevel from dbt.version import installed @@ -20,63 +20,70 @@ def test_events(): # A001 event event = MainReportVersion(version=str(installed), log_version=LOG_VERSION) - event_dict = event_to_dict(event) - event_json = event.to_json() - serialized = bytes(event) + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() + serialized = bytes(msg) assert "Running with dbt=" in str(serialized) - assert set(event_dict.keys()) == {"version", "info", "log_version"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "A001" + assert set(msg_dict.keys()) == {"info", "data"} + assert set(msg_dict["data"].keys()) == {"version", "log_version"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "A001" # Extract EventInfo from serialized message - generic_event = pl.GenericMessage().parse(serialized) + generic_event = pt.GenericMessage().parse(serialized) assert generic_event.info.code == "A001" # get the message class for the real message from the generic message - message_class = getattr(sys.modules["dbt.events.proto_types"], generic_event.info.name) - new_event = message_class().parse(serialized) - assert new_event.info.code == event.info.code - assert new_event.version == event.version + message_class = getattr(pt, f"{generic_event.info.name}Msg") + new_msg = message_class().parse(serialized) + assert new_msg.info.code == msg.info.code + assert new_msg.data.version == msg.data.version # A002 event event = MainReportArgs(args={"one": "1", "two": "2"}) - event_dict = event_to_dict(event) - event_json = event.to_json() + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() - assert set(event_dict.keys()) == {"info", "args"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "A002" + assert set(msg_dict.keys()) == {"info", "data"} + assert set(msg_dict["data"].keys()) == {"args"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "A002" def test_exception_events(): event = RollbackFailed(conn_name="test", exc_info="something failed") - event_dict = event_to_dict(event) - event_json = event.to_json() - assert set(event_dict.keys()) == {"info", "conn_name", "exc_info"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "E009" + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() + assert set(msg_dict.keys()) == {"info", "data"} + assert set(msg_dict["data"].keys()) == {"conn_name", "exc_info"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "E009" event = PluginLoadError(exc_info="something failed") - event_dict = event_to_dict(event) - event_json = event.to_json() - assert set(event_dict.keys()) == {"info", "exc_info"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "E036" - # This event has no "msg"/"message" - assert event.info.msg is None + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() + assert set(msg_dict["data"].keys()) == {"exc_info"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "E036" + assert msg.info.msg == "something failed" # Z002 event event = MainEncounteredError(exc="Rollback failed") - event_dict = event_to_dict(event) - event_json = event.to_json() + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + msg_json = msg.to_json() - assert set(event_dict.keys()) == {"info", "exc"} - assert set(event_dict["info"].keys()) == info_keys - assert event_json - assert event.info.code == "Z002" + assert set(msg_dict["data"].keys()) == {"exc"} + assert set(msg_dict["info"].keys()) == info_keys + assert msg_json + assert msg.info.code == "Z002" def test_node_info_events(): @@ -94,7 +101,7 @@ def test_node_info_events(): description="some description", index=123, total=111, - node_info=pl.NodeInfo(**node_info), + node_info=pt.NodeInfo(**node_info), ) assert event assert event.node_info.node_path == "some_path" @@ -107,18 +114,19 @@ def test_extra_dict_on_event(monkeypatch): reset_metadata_vars() event = MainReportVersion(version=str(installed), log_version=LOG_VERSION) - event_dict = event_to_dict(event) - assert set(event_dict["info"].keys()) == info_keys - assert event.info.extra == {"env_key": "env_value"} - serialized = bytes(event) + msg = msg_from_base_event(event) + msg_dict = msg_to_dict(msg) + assert set(msg_dict["info"].keys()) == info_keys + assert msg.info.extra == {"env_key": "env_value"} + serialized = bytes(msg) # Extract EventInfo from serialized message - generic_event = pl.GenericMessage().parse(serialized) + generic_event = pt.GenericMessage().parse(serialized) assert generic_event.info.code == "A001" # get the message class for the real message from the generic message - message_class = getattr(sys.modules["dbt.events.proto_types"], generic_event.info.name) - new_event = message_class().parse(serialized) - assert new_event.info.extra == event.info.extra + message_class = getattr(pt, f"{generic_event.info.name}Msg") + new_msg = message_class().parse(serialized) + assert new_msg.info.extra == msg.info.extra # clean up reset_metadata_vars() @@ -127,11 +135,11 @@ def test_extra_dict_on_event(monkeypatch): def test_dynamic_level_events(): event = LogTestResult( name="model_name", - info=info(level=LogTestResult.status_to_level("pass")), status="pass", index=1, num_models=3, num_failures=0 ) - assert event - assert event.info.level == "info" + msg = msg_from_base_event(event, level=EventLevel.INFO) + assert msg + assert msg.info.level == "info" From 02c20477b922ffb3c457429d1d4a3f2c6c31f661 Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Mon, 9 Jan 2023 12:09:12 -0600 Subject: [PATCH 088/156] add deprecation decorator (#6540) * add derecation decorator * fix tests * updated changelog and deprecation reason --- .../Breaking Changes-20221205-141937.yaml | 3 +- core/dbt/context/exceptions_jinja.py | 4 +- core/dbt/events/proto_types.py | 11 + core/dbt/events/types.proto | 9 + core/dbt/events/types.py | 16 + core/dbt/exceptions.py | 357 +++++++++-- core/dbt/internal_deprecations.py | 26 + core/dbt/task/run.py | 4 +- core/dbt/task/test.py | 2 +- tests/unit/test_deprecations.py | 602 ++++++++++++++++++ tests/unit/test_events.py | 1 + 11 files changed, 984 insertions(+), 51 deletions(-) create mode 100644 core/dbt/internal_deprecations.py create mode 100644 tests/unit/test_deprecations.py diff --git a/.changes/unreleased/Breaking Changes-20221205-141937.yaml b/.changes/unreleased/Breaking Changes-20221205-141937.yaml index be840b20a99..5f2a780d661 100644 --- a/.changes/unreleased/Breaking Changes-20221205-141937.yaml +++ b/.changes/unreleased/Breaking Changes-20221205-141937.yaml @@ -5,5 +5,4 @@ body: Cleaned up exceptions to directly raise in code. Removed use of all excep time: 2022-12-05T14:19:37.863032-06:00 custom: Author: emmyoop - Issue: "6339" - PR: "6347" + Issue: 6339 6393 diff --git a/core/dbt/context/exceptions_jinja.py b/core/dbt/context/exceptions_jinja.py index 5663b4701e0..a1f49e416fb 100644 --- a/core/dbt/context/exceptions_jinja.py +++ b/core/dbt/context/exceptions_jinja.py @@ -36,7 +36,9 @@ def missing_config(model, name) -> NoReturn: def missing_materialization(model, adapter_type) -> NoReturn: - raise MissingMaterialization(model=model, adapter_type=adapter_type) + raise MissingMaterialization( + materialization=model.config.materialized, adapter_type=adapter_type + ) def missing_relation(relation, model=None) -> NoReturn: diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 124139b1db8..d972a98155e 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -504,6 +504,17 @@ class ExposureNameDeprecationMsg(betterproto.Message): data: "ExposureNameDeprecation" = betterproto.message_field(2) +@dataclass +class FunctionDeprecated(betterproto.Message): + """D008""" + + info: "EventInfo" = betterproto.message_field(1) + function_name: str = betterproto.string_field(2) + reason: str = betterproto.string_field(3) + suggested_action: str = betterproto.string_field(4) + version: str = betterproto.string_field(5) + + @dataclass class AdapterEventDebug(betterproto.Message): """E001""" diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 10c002460c1..b8c7c42a01f 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -384,6 +384,15 @@ message ExposureNameDeprecationMsg { ExposureNameDeprecation data = 2; } +//D008 +message FunctionDeprecated { + EventInfo info = 1; + string function_name = 2; + string reason = 3; + string suggested_action = 4; + string version = 5; +} + // E - DB Adapter // E001 diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index e837dc78387..de8a9cf2c99 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -416,6 +416,22 @@ def message(self): return line_wrap_message(warning_tag(f"Deprecated functionality\n\n{description}")) +@dataclass +class FunctionDeprecated(WarnLevel, pt.FunctionDeprecated): + def code(self): + return "D008" + + def message(self): + extra_reason = "" + if self.reason: + extra_reason = f"\n{self.reason}" + msg = ( + f"`{self.function_name}` is deprecated and will be removed in dbt-core version {self.version}\n\n" + f"Adapter maintainers can resolve this deprecation by {self.suggested_action}. {extra_reason}" + ) + return warning_tag(msg) + + # ======================================================= # E - DB Adapter # ======================================================= diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index 515ec86054b..7d8326cd352 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -3,8 +3,8 @@ import re from typing import Any, Dict, List, Mapping, NoReturn, Optional, Union -# from dbt.contracts.graph import ManifestNode # or ParsedNode? from dbt.dataclass_schema import ValidationError +from dbt.internal_deprecations import deprecated from dbt.events.functions import warn_or_error from dbt.events.helpers import env_secrets, scrub_secrets from dbt.events.types import JinjaLogWarning @@ -692,6 +692,18 @@ def get_message(self) -> str: return msg +class BadSpecError(InternalException): + def __init__(self, repo, revision, error): + self.repo = repo + self.revision = revision + self.stderr = scrub_secrets(error.stderr.strip(), env_secrets()) + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{self.stderr}" + return msg + + class GitCloningError(InternalException): def __init__(self, repo: str, revision: str, error: CommandResultError): self.repo = repo @@ -711,16 +723,8 @@ def get_message(self) -> str: return scrub_secrets(msg, env_secrets()) -class GitCheckoutError(InternalException): - def __init__(self, repo: str, revision: str, error: CommandResultError): - self.repo = repo - self.revision = revision - self.stderr = error.stderr.strip() - super().__init__(msg=self.get_message()) - - def get_message(self) -> str: - msg = f"Error checking out spec='{self.revision}' for repo {self.repo}\n{self.stderr}" - return scrub_secrets(msg, env_secrets()) +class GitCheckoutError(BadSpecError): + pass class InvalidMaterializationArg(CompilationException): @@ -734,6 +738,21 @@ def get_message(self) -> str: return msg +class OperationException(CompilationException): + def __init__(self, operation_name): + self.operation_name = operation_name + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = ( + f"dbt encountered an error when attempting to create a {self.operation_name}. " + "If this error persists, please create an issue at: \n\n" + "https://github.com/dbt-labs/dbt-core" + ) + + return msg + + class SymbolicLinkError(CompilationException): def __init__(self): super().__init__(msg=self.get_message()) @@ -1535,14 +1554,13 @@ def __init__(self, relation): class MaterializationNotAvailable(CompilationException): - def __init__(self, model, adapter_type: str): - self.model = model + def __init__(self, materialization, adapter_type: str): + self.materialization = materialization self.adapter_type = adapter_type super().__init__(msg=self.get_message()) def get_message(self) -> str: - materialization = self.model.get_materialization() - msg = f"Materialization '{materialization}' is not available for {self.adapter_type}!" + msg = f"Materialization '{self.materialization}' is not available for {self.adapter_type}!" return msg @@ -1772,6 +1790,17 @@ def get_message(self) -> str: # contracts level +class UnrecognizedCredentialType(CompilationException): + def __init__(self, typename: str, supported_types: List): + self.typename = typename + self.supported_types = supported_types + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + msg = 'Unrecognized credentials type "{}" - supported types are ({})'.format( + self.typename, ", ".join('"{}"'.format(t) for t in self.supported_types) + ) + return msg class DuplicateMacroInPackage(CompilationException): @@ -1823,6 +1852,29 @@ def get_message(self) -> str: # jinja exceptions +class PatchTargetNotFound(CompilationException): + def __init__(self, patches: Dict): + self.patches = patches + super().__init__(msg=self.get_message()) + + def get_message(self) -> str: + patch_list = "\n\t".join( + f"model {p.name} (referenced in path {p.original_file_path})" + for p in self.patches.values() + ) + msg = f"dbt could not find models for the following patches:\n\t{patch_list}" + return msg + + +class MacroNotFound(CompilationException): + def __init__(self, node, target_macro_id: str): + self.node = node + self.target_macro_id = target_macro_id + msg = f"'{self.node.unique_id}' references macro '{self.target_macro_id}' which is not defined!" + + super().__init__(msg=msg) + + class MissingConfig(CompilationException): def __init__(self, unique_id: str, name: str): self.unique_id = unique_id @@ -1834,20 +1886,19 @@ def __init__(self, unique_id: str, name: str): class MissingMaterialization(CompilationException): - def __init__(self, model, adapter_type): - self.model = model + def __init__(self, materialization, adapter_type): + self.materialization = materialization self.adapter_type = adapter_type super().__init__(msg=self.get_message()) def get_message(self) -> str: - materialization = self.model.get_materialization() valid_types = "'default'" if self.adapter_type != "default": valid_types = f"'default' and '{self.adapter_type}'" - msg = f"No materialization '{materialization}' was found for adapter {self.adapter_type}! (searched types {valid_types})" + msg = f"No materialization '{self.materialization}' was found for adapter {self.adapter_type}! (searched types {valid_types})" return msg @@ -2139,139 +2190,293 @@ def get_message(self) -> str: # They will be removed in 1 (or 2?) versions. Issue to be created to ensure it happens. # TODO: add deprecation to functions +DEPRECATION_VERSION = "1.5.0" +SUGGESTED_ACTION = "using `raise {exception}` directly instead" +REASON = "See https://github.com/dbt-labs/dbt-core/issues/6393 for more details" + + +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="JinjaLogWarning"), + reason=REASON, +) def warn(msg, node=None): warn_or_error(JinjaLogWarning(msg=msg, node_info=get_node_info())) return "" +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MissingConfig"), + reason=REASON, +) def missing_config(model, name) -> NoReturn: raise MissingConfig(unique_id=model.unique_id, name=name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MissingMaterialization"), + reason=REASON, +) def missing_materialization(model, adapter_type) -> NoReturn: - raise MissingMaterialization(model=model, adapter_type=adapter_type) + materialization = model.config.materialized + raise MissingMaterialization(materialization=materialization, adapter_type=adapter_type) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MissingRelation"), + reason=REASON, +) def missing_relation(relation, model=None) -> NoReturn: raise MissingRelation(relation, model) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousAlias"), + reason=REASON, +) def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: raise AmbiguousAlias(node_1, node_2, duped_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousCatalogMatch"), + reason=REASON, +) def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: raise AmbiguousCatalogMatch(unique_id, match_1, match_2) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="CacheInconsistency"), + reason=REASON, +) def raise_cache_inconsistent(message) -> NoReturn: raise CacheInconsistency(message) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DataclassNotDict"), + reason=REASON, +) def raise_dataclass_not_dict(obj) -> NoReturn: raise DataclassNotDict(obj) -# note: this is called all over the code in addition to in jinja +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="CompilationException"), + reason=REASON, +) def raise_compiler_error(msg, node=None) -> NoReturn: raise CompilationException(msg, node) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DatabaseException"), + reason=REASON, +) def raise_database_error(msg, node=None) -> NoReturn: raise DatabaseException(msg, node) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DependencyNotFound"), + reason=REASON, +) def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: raise DependencyNotFound(node, node_description, required_pkg) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DependencyException"), + reason=REASON, +) def raise_dependency_error(msg) -> NoReturn: raise DependencyException(scrub_secrets(msg, env_secrets())) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicatePatchPath"), + reason=REASON, +) def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: raise DuplicatePatchPath(patch_1, existing_patch_path) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateResourceName"), + reason=REASON, +) def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: raise DuplicateResourceName(node_1, node_2) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="InvalidPropertyYML"), + reason=REASON, +) def raise_invalid_property_yml_version(path, issue) -> NoReturn: raise InvalidPropertyYML(path, issue) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="NotImplementedException"), + reason=REASON, +) def raise_not_implemented(msg) -> NoReturn: raise NotImplementedException(msg) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RelationWrongType"), + reason=REASON, +) def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: raise RelationWrongType(relation, expected_type, model) # these were implemented in core so deprecating here by calling the new exception directly + + +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateAlias"), + reason=REASON, +) def raise_duplicate_alias( kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str ) -> NoReturn: raise DuplicateAlias(kwargs, aliases, canonical_key) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateSourcePatchName"), + reason=REASON, +) def raise_duplicate_source_patch_name(patch_1, patch_2): raise DuplicateSourcePatchName(patch_1, patch_2) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroPatchName"), + reason=REASON, +) def raise_duplicate_macro_patch_name(patch_1, existing_patch_path): raise DuplicateMacroPatchName(patch_1, existing_patch_path) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroName"), + reason=REASON, +) def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn: raise DuplicateMacroName(node_1, node_2, namespace) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="ApproximateMatch"), + reason=REASON, +) def approximate_relation_match(target, relation): raise ApproximateMatch(target, relation) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResults"), + reason=REASON, +) def get_relation_returned_multiple_results(kwargs, matches): raise RelationReturnedMultipleResults(kwargs, matches) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="OperationException"), + reason=REASON, +) def system_error(operation_name): - # Note: This was converted for core to use SymbolicLinkError because it's the only way it was used. Maintaining flexibility here for now. - msg = ( - f"dbt encountered an error when attempting to {operation_name}. " - "If this error persists, please create an issue at: \n\n" - "https://github.com/dbt-labs/dbt-core" - ) - raise CompilationException(msg) + raise OperationException(operation_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="InvalidMaterializationArg"), + reason=REASON, +) def invalid_materialization_argument(name, argument): raise InvalidMaterializationArg(name, argument) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="BadSpecException"), + reason=REASON, +) def bad_package_spec(repo, spec, error_message): - msg = f"Error checking out spec='{spec}' for repo {repo}\n{error_message}" - raise InternalException(scrub_secrets(msg, env_secrets())) + raise BadSpecError(spec, repo, error_message) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="CommandResultError"), + reason=REASON, +) def raise_git_cloning_error(error: CommandResultError) -> NoReturn: - error.cmd = list(scrub_secrets(str(error.cmd), env_secrets())) raise error +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="GitCloningProblem"), + reason=REASON, +) def raise_git_cloning_problem(repo) -> NoReturn: raise GitCloningProblem(repo) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MacroInvalidDispatchArg"), + reason=REASON, +) def macro_invalid_dispatch_arg(macro_name) -> NoReturn: raise MacroInvalidDispatchArg(macro_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="GraphDependencyNotFound"), + reason=REASON, +) def dependency_not_found(node, dependency): raise GraphDependencyNotFound(node, dependency) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="TargetNotFound"), + reason=REASON, +) def target_not_found( node, target_name: str, @@ -2288,6 +2493,11 @@ def target_not_found( ) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DocTargetNotFound"), + reason=REASON, +) def doc_target_not_found( model, target_doc_name: str, target_doc_package: Optional[str] ) -> NoReturn: @@ -2296,26 +2506,56 @@ def doc_target_not_found( ) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="InvalidDocArgs"), + reason=REASON, +) def doc_invalid_args(model, args) -> NoReturn: raise InvalidDocArgs(node=model, args=args) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RefBadContext"), + reason=REASON, +) def ref_bad_context(model, args) -> NoReturn: raise RefBadContext(node=model, args=args) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MetricInvalidArgs"), + reason=REASON, +) def metric_invalid_args(model, args) -> NoReturn: raise MetricInvalidArgs(node=model, args=args) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RefInvalidArgs"), + reason=REASON, +) def ref_invalid_args(model, args) -> NoReturn: raise RefInvalidArgs(node=model, args=args) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="InvalidBoolean"), + reason=REASON, +) def invalid_bool_error(got_value, macro_name) -> NoReturn: raise InvalidBoolean(return_value=got_value, macro_name=macro_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="InvalidMacroArgType"), + reason=REASON, +) def invalid_type_error(method_name, arg_name, got_value, expected_type) -> NoReturn: """Raise a CompilationException when an adapter method available to macros has changed. @@ -2323,45 +2563,70 @@ def invalid_type_error(method_name, arg_name, got_value, expected_type) -> NoRet raise InvalidMacroArgType(method_name, arg_name, got_value, expected_type) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="DisallowSecretEnvVar"), + reason=REASON, +) def disallow_secret_env_var(env_var_name) -> NoReturn: """Raise an error when a secret env var is referenced outside allowed rendering contexts""" raise DisallowSecretEnvVar(env_var_name) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="ParsingException"), + reason=REASON, +) def raise_parsing_error(msg, node=None) -> NoReturn: raise ParsingException(msg, node) -# These are the exceptions functions that were not called within dbt-core but will remain here but deprecated to give a chance to rework -# TODO: is this valid? Should I create a special exception class for this? +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="CompilationException"), + reason=REASON, +) def raise_unrecognized_credentials_type(typename, supported_types): - msg = 'Unrecognized credentials type "{}" - supported types are ({})'.format( - typename, ", ".join('"{}"'.format(t) for t in supported_types) - ) - raise CompilationException(msg) + raise UnrecognizedCredentialType(typename, supported_types) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="CompilationException"), + reason=REASON, +) def raise_patch_targets_not_found(patches): - patch_list = "\n\t".join( - f"model {p.name} (referenced in path {p.original_file_path})" for p in patches.values() - ) - msg = f"dbt could not find models for the following patches:\n\t{patch_list}" - raise CompilationException(msg) + raise PatchTargetNotFound(patches) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResults"), + reason=REASON, +) def multiple_matching_relations(kwargs, matches): raise RelationReturnedMultipleResults(kwargs, matches) -# while this isn't in our code I wouldn't be surpised it's in adapter code +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MaterializationNotAvailable"), + reason=REASON, +) def materialization_not_available(model, adapter_type): - raise MaterializationNotAvailable(model, adapter_type) + materialization = model.config.materialized + raise MaterializationNotAvailable(materialization=materialization, adapter_type=adapter_type) +@deprecated( + version=DEPRECATION_VERSION, + suggested_action=SUGGESTED_ACTION.format(exception="MacroNotFound"), + reason=REASON, +) def macro_not_found(model, target_macro_id): - msg = f"'{model.unique_id}' references macro '{target_macro_id}' which is not defined!" - raise CompilationException(msg=msg, node=model) + raise MacroNotFound(node=model, target_macro_id=target_macro_id) # adapters use this to format messages. it should be deprecated but live on for now diff --git a/core/dbt/internal_deprecations.py b/core/dbt/internal_deprecations.py new file mode 100644 index 00000000000..e6154329ca7 --- /dev/null +++ b/core/dbt/internal_deprecations.py @@ -0,0 +1,26 @@ +import functools +from typing import Optional + +from dbt.events.functions import warn_or_error +from dbt.events.types import FunctionDeprecated + + +def deprecated(suggested_action: str, version: str, reason: Optional[str]): + def inner(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + function_name = func.__name__ + + warn_or_error( + FunctionDeprecated( + function_name=function_name, + suggested_action=suggested_action, + version=version, + reason=reason, + ) + ) # TODO: pass in event? + return func(*args, **kwargs) + + return wrapped + + return inner diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 2d800d24b09..ff468f4dd41 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -253,7 +253,9 @@ def execute(self, model, manifest): ) if materialization_macro is None: - raise MissingMaterialization(model=model, adapter_type=self.adapter.type()) + raise MissingMaterialization( + materialization=model.get_materialization(), adapter_type=self.adapter.type() + ) if "config" not in context: raise InternalException( diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index fccdc894ff1..b55eed940ac 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -101,7 +101,7 @@ def execute_test( ) if materialization_macro is None: - raise MissingMaterialization(model=test, adapter_type=self.adapter.type()) + raise MissingMaterialization(materialization=test.get_materialization(), adapter_type=self.adapter.type()) if "config" not in context: raise InternalException( diff --git a/tests/unit/test_deprecations.py b/tests/unit/test_deprecations.py new file mode 100644 index 00000000000..df7a43c867a --- /dev/null +++ b/tests/unit/test_deprecations.py @@ -0,0 +1,602 @@ +import argparse +import pytest + +from dbt.internal_deprecations import deprecated +import dbt.exceptions +from dbt.node_types import NodeType + + +@deprecated(reason="just because", version="1.23.0", suggested_action="Make some updates") +def to_be_decorated(): + return 5 + + +# simpletest that the return value is not modified +def test_deprecated(): + assert(hasattr(to_be_decorated, '__wrapped__')) + assert(to_be_decorated() == 5) + + +class TestDeprecatedFunctions: + def is_deprecated(self, func): + assert(hasattr(func, '__wrapped__')) + # TODO: add in log check + + def test_warn(self): + self.is_deprecated(dbt.exceptions.warn) + + +class TestDeprecatedExceptionFunctions: + def runFunc(self, func, *args): + return func(*args) + + def is_deprecated(self, func): + assert(hasattr(func, '__wrapped__')) + # TODO: add in log check + + def test_missing_config(self): + func = dbt.exceptions.missing_config + exception = dbt.exceptions.MissingConfig + model = argparse.Namespace() + model.unique_id = '' + name = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(model, name) + + def test_missing_materialization(self): + func = dbt.exceptions.missing_materialization + exception = dbt.exceptions.MissingMaterialization + model = argparse.Namespace() + model.config = argparse.Namespace() + model.config.materialized = '' + adapter_type = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(model, adapter_type) + + def test_missing_relation(self): + func = dbt.exceptions.missing_relation + exception = dbt.exceptions.MissingRelation + relation = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(relation) + + def test_raise_ambiguous_alias(self): + func = dbt.exceptions.raise_ambiguous_alias + exception = dbt.exceptions.AmbiguousAlias + node_1 = argparse.Namespace() + node_1.unique_id = "" + node_1.original_file_path = "" + node_2 = argparse.Namespace() + node_2.unique_id = "" + node_2.original_file_path = "" + duped_name = "string" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(node_1, node_2, duped_name) + + def test_raise_ambiguous_catalog_match(self): + func = dbt.exceptions.raise_ambiguous_catalog_match + exception = dbt.exceptions.AmbiguousCatalogMatch + unique_id = "" + match_1 = {"metadata": {"schema": ""}} + match_2 = {"metadata": {"schema": ""}} + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(unique_id, match_1, match_2) + + def test_raise_cache_inconsistent(self): + func = dbt.exceptions.raise_cache_inconsistent + exception = dbt.exceptions.CacheInconsistency + msg = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(msg) + + def test_raise_dataclass_not_dict(self): + func = dbt.exceptions.raise_dataclass_not_dict + exception = dbt.exceptions.DataclassNotDict + obj = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(obj) + + def test_raise_compiler_error(self): + func = dbt.exceptions.raise_compiler_error + exception = dbt.exceptions.CompilationException + msg = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(msg) + + def test_raise_database_error(self): + func = dbt.exceptions.raise_database_error + exception = dbt.exceptions.DatabaseException + msg = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(msg) + + def test_raise_dep_not_found(self): + func = dbt.exceptions.raise_dep_not_found + exception = dbt.exceptions.DependencyNotFound + node = "" + node_description = "" + required_pkg = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(node, node_description, required_pkg) + + def test_raise_dependency_error(self): + func = dbt.exceptions.raise_dependency_error + exception = dbt.exceptions.DependencyException + msg = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(msg) + + def test_raise_duplicate_patch_name(self): + func = dbt.exceptions.raise_duplicate_patch_name + exception = dbt.exceptions.DuplicatePatchPath + patch_1 = argparse.Namespace() + patch_1.name = "" + patch_1.original_file_path = "" + existing_patch_path = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(patch_1, existing_patch_path) + + def test_raise_duplicate_resource_name(self): + func = dbt.exceptions.raise_duplicate_resource_name + exception = dbt.exceptions.DuplicateResourceName + node_1 = argparse.Namespace() + node_1.name = "" + node_1.resource_type = NodeType('model') + node_1.column_name = "" + node_1.unique_id = "" + node_1.original_file_path = "" + node_2 = argparse.Namespace() + node_2.name = "" + node_2.resource_type = "" + node_2.unique_id = "" + node_2.original_file_path = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(node_1, node_2) + + def test_raise_invalid_property_yml_version(self): + func = dbt.exceptions.raise_invalid_property_yml_version + exception = dbt.exceptions.InvalidPropertyYML + path = "" + issue = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(path, issue) + + def test_raise_not_implemented(self): + func = dbt.exceptions.raise_not_implemented + exception = dbt.exceptions.NotImplementedException + msg = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(msg) + + def test_relation_wrong_type(self): + func = dbt.exceptions.relation_wrong_type + exception = dbt.exceptions.RelationWrongType + + relation = argparse.Namespace() + relation.type = "" + expected_type = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(relation, expected_type) + + def test_raise_duplicate_alias(self): + func = dbt.exceptions.raise_duplicate_alias + exception = dbt.exceptions.DuplicateAlias + kwargs = {"": ""} + aliases = {"": ""} + canonical_key = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(kwargs, aliases, canonical_key) + + def test_raise_duplicate_source_patch_name(self): + func = dbt.exceptions.raise_duplicate_source_patch_name + exception = dbt.exceptions.DuplicateSourcePatchName + patch_1 = argparse.Namespace() + patch_1.name = "" + patch_1.path = "" + patch_1.overrides = "" + patch_2 = argparse.Namespace() + patch_2.path = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(patch_1, patch_2) + + def test_raise_duplicate_macro_patch_name(self): + func = dbt.exceptions.raise_duplicate_macro_patch_name + exception = dbt.exceptions.DuplicateMacroPatchName + patch_1 = argparse.Namespace() + patch_1.package_name = "" + patch_1.name = "" + patch_1.original_file_path = "" + existing_patch_path = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(patch_1, existing_patch_path) + + def test_raise_duplicate_macro_name(self): + func = dbt.exceptions.raise_duplicate_macro_name + exception = dbt.exceptions.DuplicateMacroName + node_1 = argparse.Namespace() + node_1.name = "" + node_1.package_name = "" + node_1.original_file_path = "" + node_1.unique_id = "" + node_2 = argparse.Namespace() + node_2.package_name = "" + node_2.unique_id = "" + node_2.original_file_path = "" + namespace = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(node_1, node_2, namespace) + + def test_approximate_relation_match(self): + func = dbt.exceptions.approximate_relation_match + exception = dbt.exceptions.ApproximateMatch + target = "" + relation = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(target, relation) + + def test_get_relation_returned_multiple_results(self): + func = dbt.exceptions.get_relation_returned_multiple_results + exception = dbt.exceptions.RelationReturnedMultipleResults + kwargs = {} + matches = [] + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(kwargs, matches) + + def test_system_error(self): + func = dbt.exceptions.system_error + exception = dbt.exceptions.OperationException + operation_name = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(operation_name) + + def test_invalid_materialization_argument(self): + func = dbt.exceptions.invalid_materialization_argument + exception = dbt.exceptions.InvalidMaterializationArg + name = "" + argument = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(name, argument) + + def test_bad_package_spec(self): + func = dbt.exceptions.bad_package_spec + exception = dbt.exceptions.BadSpecError + repo = "" + spec = "" + error = argparse.Namespace() + error.stderr = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(repo, spec, error) + + # def test_raise_git_cloning_error(self): + # func = dbt.exceptions.raise_git_cloning_error + # exception = dbt.exceptions.CommandResultError + + # error = dbt.exceptions.CommandResultError + # error.cwd = "" + # error.cmd = [""] + # error.returncode = 1 + # error.stdout = "" + # error.stderr = "" + + # self.is_deprecated(func) + + # assert(hasattr(func, '__wrapped__')) + # with pytest.raises(exception): + # func(error) + + def test_raise_git_cloning_problem(self): + func = dbt.exceptions.raise_git_cloning_problem + exception = dbt.exceptions.GitCloningProblem + repo = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(repo) + + def test_macro_invalid_dispatch_arg(self): + func = dbt.exceptions.macro_invalid_dispatch_arg + exception = dbt.exceptions.MacroInvalidDispatchArg + macro_name = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(macro_name) + + def test_dependency_not_found(self): + func = dbt.exceptions.dependency_not_found + exception = dbt.exceptions.GraphDependencyNotFound + node = argparse.Namespace() + node.unique_id = "" + dependency = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(node, dependency) + + def test_target_not_found(self): + func = dbt.exceptions.target_not_found + exception = dbt.exceptions.TargetNotFound + node = argparse.Namespace() + node.unique_id = "" + node.original_file_path = "" + node.resource_type = "" + target_name = "" + target_kind = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(node, target_name, target_kind) + + def test_doc_target_not_found(self): + func = dbt.exceptions.doc_target_not_found + exception = dbt.exceptions.DocTargetNotFound + model = argparse.Namespace() + model.unique_id = "" + target_doc_name = "" + target_doc_package = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(model, target_doc_name, target_doc_package) + + def test_ref_bad_context(self): + func = dbt.exceptions.ref_bad_context + exception = dbt.exceptions.RefBadContext + model = argparse.Namespace() + model.name = "" + args = [] + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(model, args) + + def test_metric_invalid_args(self): + func = dbt.exceptions.metric_invalid_args + exception = dbt.exceptions.MetricInvalidArgs + model = argparse.Namespace() + model.unique_id = "" + args = [] + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(model, args) + + def test_ref_invalid_args(self): + func = dbt.exceptions.ref_invalid_args + exception = dbt.exceptions.RefInvalidArgs + model = argparse.Namespace() + model.unique_id = "" + args = [] + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(model, args) + + def test_invalid_bool_error(self): + func = dbt.exceptions.invalid_bool_error + exception = dbt.exceptions.InvalidBoolean + return_value = "" + macro_name = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(return_value, macro_name) + + def test_invalid_type_error(self): + func = dbt.exceptions.invalid_type_error + exception = dbt.exceptions.InvalidMacroArgType + method_name = "" + arg_name = "" + got_value = "" + expected_type = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(method_name, arg_name, got_value, expected_type) + + def test_disallow_secret_env_var(self): + func = dbt.exceptions.disallow_secret_env_var + exception = dbt.exceptions.DisallowSecretEnvVar + env_var_name = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(env_var_name) + + def test_raise_parsing_error(self): + func = dbt.exceptions.raise_parsing_error + exception = dbt.exceptions.ParsingException + msg = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(msg) + + def test_raise_unrecognized_credentials_type(self): + func = dbt.exceptions.raise_unrecognized_credentials_type + exception = dbt.exceptions.UnrecognizedCredentialType + typename = "" + supported_types = [] + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(typename, supported_types) + + def test_raise_patch_targets_not_found(self): + func = dbt.exceptions.raise_patch_targets_not_found + exception = dbt.exceptions.PatchTargetNotFound + node = argparse.Namespace() + node.name = "" + node.original_file_path = "" + patches = {"patch": node} + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(patches) + + def test_multiple_matching_relations(self): + func = dbt.exceptions.multiple_matching_relations + exception = dbt.exceptions.RelationReturnedMultipleResults + kwargs = {} + matches = [] + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(kwargs, matches) + + def test_materialization_not_available(self): + func = dbt.exceptions.materialization_not_available + exception = dbt.exceptions.MaterializationNotAvailable + model = argparse.Namespace() + model.config = argparse.Namespace() + model.config.materialized = "" + adapter_type = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(model, adapter_type) + + def test_macro_not_found(self): + func = dbt.exceptions.macro_not_found + exception = dbt.exceptions.MacroNotFound + model = argparse.Namespace() + model.unique_id = "" + target_macro_id = "" + + self.is_deprecated(func) + + assert(hasattr(func, '__wrapped__')) + with pytest.raises(exception): + func(model, target_macro_id) diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 97cd917598a..17af8f94369 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -138,6 +138,7 @@ def test_event_codes(self): AdapterDeprecationWarning(old_name="", new_name=""), MetricAttributesRenamed(metric_name=""), ExposureNameDeprecation(exposure=""), + FunctionDeprecated(function_name="", reason="", suggested_action="", version=""), # E - DB Adapter ====================== AdapterEventDebug(), From ab3f8dcbfd433a23edbf6da60c879452a5250b6f Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Mon, 9 Jan 2023 11:07:34 -0800 Subject: [PATCH 089/156] Ct 1517/convert 060 persist docs (#6409) * convert the test and fix an error due to a dead code seed * Get rid of old test * Remove unfortunately added files. Don't use that * Co-authored-by: Mila Page --- .../models-column-missing/missing_column.sql | 2 - .../models-column-missing/schema.yaml | 8 -- .../models/my_fun_docs.md | 10 -- .../models/no_docs_model.sql | 1 - .../models/table_model.sql | 2 - .../models/view_model.sql | 2 - .../060_persist_docs_tests/seeds/seed.csv | 3 - .../functional/persist_docs_tests/fixtures.py | 51 ++++++++++ .../persist_docs_tests}/test_persist_docs.py | 96 ++++++++++++------- 9 files changed, 114 insertions(+), 61 deletions(-) delete mode 100644 test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql delete mode 100644 test/integration/060_persist_docs_tests/models-column-missing/schema.yaml delete mode 100644 test/integration/060_persist_docs_tests/models/my_fun_docs.md delete mode 100644 test/integration/060_persist_docs_tests/models/no_docs_model.sql delete mode 100644 test/integration/060_persist_docs_tests/models/table_model.sql delete mode 100644 test/integration/060_persist_docs_tests/models/view_model.sql delete mode 100644 test/integration/060_persist_docs_tests/seeds/seed.csv rename test/integration/060_persist_docs_tests/models/schema.yml => tests/functional/persist_docs_tests/fixtures.py (68%) rename {test/integration/060_persist_docs_tests => tests/functional/persist_docs_tests}/test_persist_docs.py (65%) diff --git a/test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql b/test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql deleted file mode 100644 index 642b0f14a19..00000000000 --- a/test/integration/060_persist_docs_tests/models-column-missing/missing_column.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='table') }} -select 1 as id, 'Ed' as name diff --git a/test/integration/060_persist_docs_tests/models-column-missing/schema.yaml b/test/integration/060_persist_docs_tests/models-column-missing/schema.yaml deleted file mode 100644 index aa7b4f88820..00000000000 --- a/test/integration/060_persist_docs_tests/models-column-missing/schema.yaml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 -models: - - name: missing_column - columns: - - name: id - description: "test id column description" - - name: column_that_does_not_exist - description: "comment that cannot be created" diff --git a/test/integration/060_persist_docs_tests/models/my_fun_docs.md b/test/integration/060_persist_docs_tests/models/my_fun_docs.md deleted file mode 100644 index f3c0fbf55ec..00000000000 --- a/test/integration/060_persist_docs_tests/models/my_fun_docs.md +++ /dev/null @@ -1,10 +0,0 @@ -{% docs my_fun_doc %} -name Column description "with double quotes" -and with 'single quotes' as welll as other; -'''abc123''' -reserved -- characters --- -/* comment */ -Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting - -{% enddocs %} diff --git a/test/integration/060_persist_docs_tests/models/no_docs_model.sql b/test/integration/060_persist_docs_tests/models/no_docs_model.sql deleted file mode 100644 index e39a7a1566f..00000000000 --- a/test/integration/060_persist_docs_tests/models/no_docs_model.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as id, 'Alice' as name diff --git a/test/integration/060_persist_docs_tests/models/table_model.sql b/test/integration/060_persist_docs_tests/models/table_model.sql deleted file mode 100644 index c0e93c3f307..00000000000 --- a/test/integration/060_persist_docs_tests/models/table_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='table') }} -select 1 as id, 'Joe' as name diff --git a/test/integration/060_persist_docs_tests/models/view_model.sql b/test/integration/060_persist_docs_tests/models/view_model.sql deleted file mode 100644 index a6f96a16d5d..00000000000 --- a/test/integration/060_persist_docs_tests/models/view_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='view') }} -select 2 as id, 'Bob' as name diff --git a/test/integration/060_persist_docs_tests/seeds/seed.csv b/test/integration/060_persist_docs_tests/seeds/seed.csv deleted file mode 100644 index 1a728c8ab74..00000000000 --- a/test/integration/060_persist_docs_tests/seeds/seed.csv +++ /dev/null @@ -1,3 +0,0 @@ -id,name -1,Alice -2,Bob diff --git a/test/integration/060_persist_docs_tests/models/schema.yml b/tests/functional/persist_docs_tests/fixtures.py similarity index 68% rename from test/integration/060_persist_docs_tests/models/schema.yml rename to tests/functional/persist_docs_tests/fixtures.py index 5a909162456..c596f5219cf 100644 --- a/test/integration/060_persist_docs_tests/models/schema.yml +++ b/tests/functional/persist_docs_tests/fixtures.py @@ -1,3 +1,4 @@ +_PROPERTIES__SCHEMA_YML = """ version: 2 models: @@ -68,3 +69,53 @@ description: | Some stuff here and then a call to {{ doc('my_fun_doc')}} +""" + +_MODELS__VIEW = """ +{{ config(materialized='view') }} +select 2 as id, 'Bob' as name +""" + +_MODELS__NO_DOCS_MODEL = """ +select 1 as id, 'Alice' as name +""" + +_DOCS__MY_FUN_DOCS = """ +{% docs my_fun_doc %} +name Column description "with double quotes" +and with 'single quotes' as welll as other; +'''abc123''' +reserved -- characters +-- +/* comment */ +Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting + +{% enddocs %} +""" + +_MODELS__TABLE = """ +{{ config(materialized='table') }} +select 1 as id, 'Joe' as name +""" + + +_MODELS__MISSING_COLUMN = """ +{{ config(materialized='table') }} +select 1 as id, 'Ed' as name +""" + +_PROPERITES__SCHEMA_MISSING_COL = """ +version: 2 +models: + - name: missing_column + columns: + - name: id + description: "test id column description" + - name: column_that_does_not_exist + description: "comment that cannot be created" +""" + +_SEEDS__SEED = """id,name +1,Alice +2,Bob +""" diff --git a/test/integration/060_persist_docs_tests/test_persist_docs.py b/tests/functional/persist_docs_tests/test_persist_docs.py similarity index 65% rename from test/integration/060_persist_docs_tests/test_persist_docs.py rename to tests/functional/persist_docs_tests/test_persist_docs.py index 89fecf6383e..7d337edd7cc 100644 --- a/test/integration/060_persist_docs_tests/test_persist_docs.py +++ b/tests/functional/persist_docs_tests/test_persist_docs.py @@ -1,17 +1,47 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os - import json - - -class BasePersistDocsTest(DBTIntegrationTest): - @property - def schema(self): - return "persist_docs_060" - - @property +import os +import pytest + +from dbt.tests.util import ( + run_dbt, +) + +from tests.functional.persist_docs_tests.fixtures import ( + _DOCS__MY_FUN_DOCS, + _MODELS__MISSING_COLUMN, + _MODELS__NO_DOCS_MODEL, + _MODELS__TABLE, + _MODELS__VIEW, + _PROPERITES__SCHEMA_MISSING_COL, + _PROPERTIES__SCHEMA_YML, + _SEEDS__SEED, +) + + +class BasePersistDocsTest: + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project): + run_dbt(["seed"]) + run_dbt() + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": _SEEDS__SEED} + + @pytest.fixture(scope="class") def models(self): - return "models" + return { + "no_docs_model.sql": _MODELS__NO_DOCS_MODEL, + "table_model.sql": _MODELS__TABLE, + "view_model.sql": _MODELS__VIEW, + } + + @pytest.fixture(scope="class") + def properties(self): + return { + "my_fun_docs.md": _DOCS__MY_FUN_DOCS, + "schema.yml": _PROPERTIES__SCHEMA_YML, + } def _assert_common_comments(self, *comments): for comment in comments: @@ -40,8 +70,12 @@ def _assert_has_table_comments(self, table_node): table_comment, table_id_comment, table_name_comment ) - def _assert_has_view_comments(self, view_node, has_node_comments=True, - has_column_comments=True): + def _assert_has_view_comments( + self, + view_node, + has_node_comments=True, + has_column_comments=True + ): view_comment = view_node['metadata']['comment'] if has_node_comments: assert view_comment.startswith('View model description') @@ -61,10 +95,9 @@ def _assert_has_view_comments(self, view_node, has_node_comments=True, class TestPersistDocs(BasePersistDocsTest): - @property - def project_config(self): + @pytest.fixture(scope="class") + def project_config_update(self): return { - 'config-version': 2, 'models': { 'test': { '+persist_docs': { @@ -75,13 +108,12 @@ def project_config(self): } } - def run_has_comments_pglike(self): - self.run_dbt() - self.run_dbt(['docs', 'generate']) + def test_has_comments_pglike(self, project): + run_dbt(['docs', 'generate']) with open('target/catalog.json') as fp: catalog_data = json.load(fp) assert 'nodes' in catalog_data - assert len(catalog_data['nodes']) == 3 + assert len(catalog_data['nodes']) == 4 table_node = catalog_data['nodes']['model.test.table_model'] view_node = self._assert_has_table_comments(table_node) @@ -91,15 +123,11 @@ def run_has_comments_pglike(self): no_docs_node = catalog_data['nodes']['model.test.no_docs_model'] self._assert_has_view_comments(no_docs_node, False, False) - @use_profile('postgres') - def test_postgres_comments(self): - self.run_has_comments_pglike() class TestPersistDocsColumnMissing(BasePersistDocsTest): - @property - def project_config(self): + @pytest.fixture(scope="class") + def project_config_update(self): return { - 'config-version': 2, 'models': { 'test': { '+persist_docs': { @@ -109,14 +137,16 @@ def project_config(self): } } - @property + @pytest.fixture(scope="class") def models(self): - return 'models-column-missing' + return {"missing_column.sql": _MODELS__MISSING_COLUMN} + + @pytest.fixture(scope="class") + def properties(self): + return {"schema.yml": _PROPERITES__SCHEMA_MISSING_COL} - @use_profile('postgres') - def test_postgres_missing_column(self): - self.run_dbt() - self.run_dbt(['docs', 'generate']) + def test_postgres_missing_column(self, project): + run_dbt(['docs', 'generate']) with open('target/catalog.json') as fp: catalog_data = json.load(fp) assert 'nodes' in catalog_data From 34fa7034661f43a9f89de303c3e6c71f88394835 Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Mon, 9 Jan 2023 15:56:42 -0600 Subject: [PATCH 090/156] fix log msg format (#6557) --- core/dbt/events/proto_types.py | 7 ++++++- core/dbt/events/types.proto | 6 +++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index d972a98155e..93dd1cb3639 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -508,13 +508,18 @@ class ExposureNameDeprecationMsg(betterproto.Message): class FunctionDeprecated(betterproto.Message): """D008""" - info: "EventInfo" = betterproto.message_field(1) function_name: str = betterproto.string_field(2) reason: str = betterproto.string_field(3) suggested_action: str = betterproto.string_field(4) version: str = betterproto.string_field(5) +@dataclass +class FunctionDeprecatedMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "FunctionDeprecated" = betterproto.message_field(2) + + @dataclass class AdapterEventDebug(betterproto.Message): """E001""" diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index b8c7c42a01f..16e71d4baa3 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -386,13 +386,17 @@ message ExposureNameDeprecationMsg { //D008 message FunctionDeprecated { - EventInfo info = 1; string function_name = 2; string reason = 3; string suggested_action = 4; string version = 5; } +message FunctionDeprecatedMsg { + EventInfo info = 1; + FunctionDeprecated data = 2; +} + // E - DB Adapter // E001 From 294def205fa7a7d5523782f0ec5768aa77558072 Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Mon, 9 Jan 2023 18:15:59 -0500 Subject: [PATCH 091/156] Fix event level regression (#6556) * CT-6501: Make certain disabled test events debug-level again to match historical behavior. * CT-1740: Add changelog entry. --- .../unreleased/Fixes-20230109-161254.yaml | 7 ++++++ core/dbt/events/types.py | 2 +- core/dbt/parser/manifest.py | 23 +++++++++++-------- 3 files changed, 22 insertions(+), 10 deletions(-) create mode 100644 .changes/unreleased/Fixes-20230109-161254.yaml diff --git a/.changes/unreleased/Fixes-20230109-161254.yaml b/.changes/unreleased/Fixes-20230109-161254.yaml new file mode 100644 index 00000000000..2ccd417b107 --- /dev/null +++ b/.changes/unreleased/Fixes-20230109-161254.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Restore historical behavior of certain disabled test messages, so that they + are at the less obtrusive debug level, rather than the warning level. +time: 2023-01-09T16:12:54.064875-05:00 +custom: + Author: peterallenwebb + Issue: "6501" diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index de8a9cf2c99..e2d108419ca 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -1092,7 +1092,7 @@ def message(self) -> str: @dataclass -class InvalidDisabledTargetInTestNode(WarnLevel, pt.InvalidDisabledTargetInTestNode): +class InvalidDisabledTargetInTestNode(DebugLevel, pt.InvalidDisabledTargetInTestNode): def code(self): return "I050" diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 108b73e06f4..988c4539c9e 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -7,6 +7,7 @@ from typing import Dict, Optional, Mapping, Callable, Any, List, Type, Union, Tuple from itertools import chain import time +from dbt.events.base_types import EventLevel import dbt.exceptions import dbt.tracking @@ -961,19 +962,20 @@ def invalid_target_fail_unless_test( target_kind: str, target_package: Optional[str] = None, disabled: Optional[bool] = None, + should_warn_if_disabled: bool = True, ): if node.resource_type == NodeType.Test: if disabled: - fire_event( - InvalidDisabledTargetInTestNode( - resource_type_title=node.resource_type.title(), - unique_id=node.unique_id, - original_file_path=node.original_file_path, - target_kind=target_kind, - target_name=target_name, - target_package=target_package if target_package else "", - ) + event = InvalidDisabledTargetInTestNode( + resource_type_title=node.resource_type.title(), + unique_id=node.unique_id, + original_file_path=node.original_file_path, + target_kind=target_kind, + target_name=target_name, + target_package=target_package if target_package else "", ) + + fire_event(event, EventLevel.WARN if should_warn_if_disabled else None) else: warn_or_error( NodeNotFoundOrDisabled( @@ -1132,6 +1134,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur target_kind="node", target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), + should_warn_if_disabled=False, ) continue @@ -1175,6 +1178,7 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: M target_kind="node", target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), + should_warn_if_disabled=False, ) continue @@ -1270,6 +1274,7 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif target_kind="node", target_package=target_model_package, disabled=(isinstance(target_model, Disabled)), + should_warn_if_disabled=False, ) continue From 2b0f6597a4290c8bda3db2d5e6f360a944959c15 Mon Sep 17 00:00:00 2001 From: Kshitij Aranke Date: Mon, 9 Jan 2023 15:23:02 -0800 Subject: [PATCH 092/156] [CT-1694] Deprecate event tracking tests (#6538) --- .../Under the Hood-20230106-114412.yaml | 6 + .../model-compilation-error/bad_ref.sql | 2 - .../models/example.sql | 2 - .../models/example_2.sql | 4 - .../models/model_error.sql | 2 - .../models/schema.yml | 12 - .../models/snapshottable.sql | 4 - .../seeds/example_seed.csv | 2 - .../033_event_tracking_tests/snapshots/a.sql | 4 - .../033_event_tracking_tests/test_events.py | 986 ------------------ 10 files changed, 6 insertions(+), 1018 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230106-114412.yaml delete mode 100644 test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql delete mode 100644 test/integration/033_event_tracking_tests/models/example.sql delete mode 100644 test/integration/033_event_tracking_tests/models/example_2.sql delete mode 100644 test/integration/033_event_tracking_tests/models/model_error.sql delete mode 100644 test/integration/033_event_tracking_tests/models/schema.yml delete mode 100644 test/integration/033_event_tracking_tests/models/snapshottable.sql delete mode 100644 test/integration/033_event_tracking_tests/seeds/example_seed.csv delete mode 100644 test/integration/033_event_tracking_tests/snapshots/a.sql delete mode 100644 test/integration/033_event_tracking_tests/test_events.py diff --git a/.changes/unreleased/Under the Hood-20230106-114412.yaml b/.changes/unreleased/Under the Hood-20230106-114412.yaml new file mode 100644 index 00000000000..e6f7e46f930 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230106-114412.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: '[CT-1694] Deprecate event tracking tests' +time: 2023-01-06T11:44:12.210821-08:00 +custom: + Author: aranke + Issue: "6467" diff --git a/test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql b/test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql deleted file mode 100644 index 06dd3b0d29c..00000000000 --- a/test/integration/033_event_tracking_tests/model-compilation-error/bad_ref.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from {{ ref('does_not_exist') }} diff --git a/test/integration/033_event_tracking_tests/models/example.sql b/test/integration/033_event_tracking_tests/models/example.sql deleted file mode 100644 index 2cd691ea7b4..00000000000 --- a/test/integration/033_event_tracking_tests/models/example.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select 1 as id diff --git a/test/integration/033_event_tracking_tests/models/example_2.sql b/test/integration/033_event_tracking_tests/models/example_2.sql deleted file mode 100644 index 6e892d91c47..00000000000 --- a/test/integration/033_event_tracking_tests/models/example_2.sql +++ /dev/null @@ -1,4 +0,0 @@ - -select * from {{ ref('example') }} -union all -select * from {{ ref('example') }} diff --git a/test/integration/033_event_tracking_tests/models/model_error.sql b/test/integration/033_event_tracking_tests/models/model_error.sql deleted file mode 100644 index 45c65306faf..00000000000 --- a/test/integration/033_event_tracking_tests/models/model_error.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select * from a_thing_that_does_not_exist diff --git a/test/integration/033_event_tracking_tests/models/schema.yml b/test/integration/033_event_tracking_tests/models/schema.yml deleted file mode 100644 index 5ac3436dc22..00000000000 --- a/test/integration/033_event_tracking_tests/models/schema.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 2 -models: -- name: example - columns: - - name: id - tests: - - unique -- name: example_2 - columns: - - name: id - tests: - - unique diff --git a/test/integration/033_event_tracking_tests/models/snapshottable.sql b/test/integration/033_event_tracking_tests/models/snapshottable.sql deleted file mode 100644 index 3c9a65a221a..00000000000 --- a/test/integration/033_event_tracking_tests/models/snapshottable.sql +++ /dev/null @@ -1,4 +0,0 @@ - -select - 1 as id, - '2018-07-15T00:00:00Z'::timestamp as updated_at diff --git a/test/integration/033_event_tracking_tests/seeds/example_seed.csv b/test/integration/033_event_tracking_tests/seeds/example_seed.csv deleted file mode 100644 index bfde6bfa0b8..00000000000 --- a/test/integration/033_event_tracking_tests/seeds/example_seed.csv +++ /dev/null @@ -1,2 +0,0 @@ -a,b,c -1,2,3 diff --git a/test/integration/033_event_tracking_tests/snapshots/a.sql b/test/integration/033_event_tracking_tests/snapshots/a.sql deleted file mode 100644 index dd90278e560..00000000000 --- a/test/integration/033_event_tracking_tests/snapshots/a.sql +++ /dev/null @@ -1,4 +0,0 @@ -{% snapshot snapshotted %} - {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}} - select * from {{ schema }}.snapshottable -{% endsnapshot %} diff --git a/test/integration/033_event_tracking_tests/test_events.py b/test/integration/033_event_tracking_tests/test_events.py deleted file mode 100644 index 1bcbbcec688..00000000000 --- a/test/integration/033_event_tracking_tests/test_events.py +++ /dev/null @@ -1,986 +0,0 @@ -# NOTE: turning off event tracking tests! [#3631](https://github.com/dbt-labs/dbt-core/issues/3631) -# from test.integration.base import DBTIntegrationTest, use_profile -# import hashlib -# import os - -# from unittest.mock import call, ANY, patch - -# import dbt.exceptions -# import dbt.version -# import dbt.tracking -# import dbt.utils - - -# # immutably creates a new array with the value inserted at the index -# def inserted(value, index, arr): -# x = [] -# for i in range(0, len(arr)): -# if i == index: -# x.append(value) -# x.append(arr[i]) -# else: -# x.append(arr[i]) -# return x - -# class TestEventTracking(DBTIntegrationTest): -# maxDiff = None - -# @property -# def profile_config(self): -# return { -# 'config': { -# 'send_anonymous_usage_stats': True -# } -# } - -# @property -# def schema(self): -# return "event_tracking_033" - -# @staticmethod -# def dir(path): -# return path.lstrip("/") - -# @property -# def models(self): -# return self.dir("models") - -# # TODO : Handle the subject. Should be the same every time! -# # TODO : Regex match a uuid for user_id, invocation_id? - -# @patch('dbt.tracking.tracker.track_struct_event') -# def run_event_test( -# self, -# cmd, -# expected_calls, -# expected_contexts, -# track_fn, -# expect_pass=True, -# expect_raise=False -# ): -# self.run_dbt(["deps"]) -# track_fn.reset_mock() - -# project_id = hashlib.md5( -# self.config.project_name.encode('utf-8')).hexdigest() -# version = str(dbt.version.get_installed_version()) - -# if expect_raise: -# with self.assertRaises(BaseException): -# self.run_dbt(cmd, expect_pass=expect_pass) -# else: -# self.run_dbt(cmd, expect_pass=expect_pass) - -# user_id = dbt.tracking.active_user.id -# invocation_id = dbt.tracking.active_user.invocation_id - -# self.assertTrue(len(user_id) > 0) -# self.assertTrue(len(invocation_id) > 0) - -# track_fn.assert_has_calls(expected_calls) - -# ordered_contexts = [] - -# for (args, kwargs) in track_fn.call_args_list: -# ordered_contexts.append( -# [context.__dict__ for context in kwargs['context']] -# ) - -# populated_contexts = [] - -# for context in expected_contexts: -# if callable(context): -# populated_contexts.append(context( -# project_id, user_id, invocation_id, version)) -# else: -# populated_contexts.append(context) - -# return ordered_contexts == populated_contexts - -# def load_context(self): - -# def populate(project_id, user_id, invocation_id, version): -# return [{ -# 'schema': 'iglu:com.dbt/load_all_timing/jsonschema/1-0-3', -# 'data': { -# 'invocation_id': invocation_id, -# 'project_id': project_id, -# 'parsed_path_count': ANY, -# 'path_count': ANY, -# 'is_partial_parse_enabled': ANY, -# 'is_static_analysis_enabled': ANY, -# 'static_analysis_path_count': ANY, -# 'static_analysis_parsed_path_count': ANY, -# 'load_all_elapsed': ANY, -# 'read_files_elapsed': ANY, -# 'load_macros_elapsed': ANY, -# 'parse_project_elapsed': ANY, -# 'patch_sources_elapsed': ANY, -# 'process_manifest_elapsed': ANY, -# }, -# }] -# return populate - -# def resource_counts_context(self): -# return [ -# { -# 'schema': 'iglu:com.dbt/resource_counts/jsonschema/1-0-0', -# 'data': { -# 'models': ANY, -# 'tests': ANY, -# 'snapshots': ANY, -# 'analyses': ANY, -# 'macros': ANY, -# 'operations': ANY, -# 'seeds': ANY, -# 'sources': ANY, -# 'exposures': ANY, -# } -# } -# ] - -# def build_context( -# self, -# command, -# progress, -# result_type=None, -# adapter_type='postgres' -# ): - -# def populate( -# project_id, -# user_id, -# invocation_id, -# version -# ): -# return [ -# { -# 'schema': 'iglu:com.dbt/invocation/jsonschema/1-0-1', -# 'data': { -# 'project_id': project_id, -# 'user_id': user_id, -# 'invocation_id': invocation_id, -# 'command': command, -# 'options': None, # TODO : Add options to compile cmd! -# 'version': version, - -# 'run_type': 'regular', -# 'adapter_type': adapter_type, -# 'progress': progress, - -# 'result_type': result_type, -# 'result': None, -# } -# }, -# { -# 'schema': 'iglu:com.dbt/platform/jsonschema/1-0-0', -# 'data': ANY -# }, -# { -# 'schema': 'iglu:com.dbt/invocation_env/jsonschema/1-0-0', -# 'data': ANY -# } -# ] - -# return populate - -# def run_context( -# self, -# materialization, -# hashed_contents, -# model_id, -# index, -# total, -# status, -# ): -# timing = [] -# error = False - -# if status != 'ERROR': -# timing = [ANY, ANY] -# else: -# error = True - -# def populate(project_id, user_id, invocation_id, version): -# return [{ -# 'schema': 'iglu:com.dbt/run_model/jsonschema/1-0-1', -# 'data': { -# 'invocation_id': invocation_id, - -# 'model_materialization': materialization, - -# 'execution_time': ANY, -# 'hashed_contents': hashed_contents, -# 'model_id': model_id, - -# 'index': index, -# 'total': total, - -# 'run_status': status, -# 'run_error': error, -# 'run_skipped': False, - -# 'timing': timing, -# }, -# }] - -# return populate - - -# class TestEventTrackingSuccess(TestEventTracking): -# @property -# def packages_config(self): -# return { -# 'packages': [ -# { -# 'git': 'https://github.com/dbt-labs/dbt-integration-project', -# 'revision': 'dbt/1.0.0', -# }, -# ], -# } - -# @property -# def project_config(self): -# return { -# 'config-version': 2, -# "seed-paths": [self.dir("data")], -# "test-paths": [self.dir("test")], -# 'seeds': { -# 'quote_columns': False, -# } -# } - -# @use_profile("postgres") -# def test__postgres_event_tracking_compile(self): -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('compile', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.build_context('compile', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["compile", "--vars", "sensitive_thing: abc"], -# expected_calls_A, -# expected_contexts -# ) - -# test_result_B = self.run_event_test( -# ["compile", "--vars", "sensitive_thing: abc"], -# expected_calls_B, -# expected_contexts -# ) - -# self.assertTrue(test_result_A or test_result_B) - -# @use_profile("postgres") -# def test__postgres_event_tracking_deps(self): -# package_context = [ -# { -# 'schema': 'iglu:com.dbt/invocation/jsonschema/1-0-1', -# 'data': { -# 'project_id': '098f6bcd4621d373cade4e832627b4f6', -# 'user_id': ANY, -# 'invocation_id': ANY, -# 'version': ANY, -# 'command': 'deps', -# 'run_type': 'regular', -# 'options': None, -# 'adapter_type': 'postgres' -# } -# }, -# { -# 'schema': 'iglu:com.dbt/package_install/jsonschema/1-0-0', -# 'data': { -# 'name': 'c5552991412d1cd86e5c20a87f3518d5', -# 'source': 'git', -# 'version': '6deb95629194572d44ca26c4bc25b573' -# } -# } -# ] - -# expected_calls = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='package', -# label=ANY, -# property_='install', -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_contexts = [ -# self.build_context('deps', 'start'), -# package_context, -# self.build_context('deps', 'end', result_type='ok') -# ] - -# test_result = self.run_event_test(["deps"], expected_calls, expected_contexts) -# self.assertTrue(test_result) - -# @use_profile("postgres") -# def test__postgres_event_tracking_seed(self): -# def seed_context(project_id, user_id, invocation_id, version): -# return [{ -# 'schema': 'iglu:com.dbt/run_model/jsonschema/1-0-1', -# 'data': { -# 'invocation_id': invocation_id, - -# 'model_materialization': 'seed', - -# 'execution_time': ANY, -# 'hashed_contents': 'd41d8cd98f00b204e9800998ecf8427e', -# 'model_id': '39bc2cd707d99bd3e600d2faaafad7ae', - -# 'index': 1, -# 'total': 1, - -# 'run_status': 'SUCCESS', -# 'run_error': False, -# 'run_skipped': False, - -# 'timing': [ANY, ANY], -# }, -# }] - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('seed', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# seed_context, -# self.build_context('seed', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test(["seed"], expected_calls_A, expected_contexts) -# test_result_B = self.run_event_test(["seed"], expected_calls_B, expected_contexts) - -# self.assertTrue(test_result_A or test_result_B) - -# @use_profile("postgres") -# def test__postgres_event_tracking_models(self): -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# hashed = '20ff78afb16c8b3b8f83861b1d3b99bd' -# # this hashed contents field changes on azure postgres tests, I believe -# # due to newlines again -# if os.name == 'nt': -# hashed = '52cf9d1db8f0a18ca64ef64681399746' - -# expected_contexts = [ -# self.build_context('run', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.run_context( -# hashed_contents='1e5789d34cddfbd5da47d7713aa9191c', -# model_id='4fbacae0e1b69924b22964b457148fb8', -# index=1, -# total=2, -# status='SUCCESS', -# materialization='view' -# ), -# self.run_context( -# hashed_contents=hashed, -# model_id='57994a805249953b31b738b1af7a1eeb', -# index=2, -# total=2, -# status='SUCCESS', -# materialization='view' -# ), -# self.build_context('run', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["run", "--model", "example", "example_2"], -# expected_calls_A, -# expected_contexts -# ) - -# test_result_B = self.run_event_test( -# ["run", "--model", "example", "example_2"], -# expected_calls_A, -# expected_contexts -# ) - -# self.assertTrue(test_result_A or test_result_B) - -# @use_profile("postgres") -# def test__postgres_event_tracking_model_error(self): -# # cmd = ["run", "--model", "model_error"] -# # self.run_event_test(cmd, event_run_model_error, expect_pass=False) - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('run', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.run_context( -# hashed_contents='4419e809ce0995d99026299e54266037', -# model_id='576c3d4489593f00fad42b97c278641e', -# index=1, -# total=1, -# status='ERROR', -# materialization='view' -# ), -# self.build_context('run', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["run", "--model", "model_error"], -# expected_calls_A, -# expected_contexts, -# expect_pass=False -# ) - -# test_result_B = self.run_event_test( -# ["run", "--model", "model_error"], -# expected_calls_B, -# expected_contexts, -# expect_pass=False -# ) - -# self.assertTrue(test_result_A or test_result_B) - -# @use_profile("postgres") -# def test__postgres_event_tracking_tests(self): -# # TODO: dbt does not track events for tests, but it should! -# self.run_dbt(["deps"]) -# self.run_dbt(["run", "--model", "example", "example_2"]) - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('test', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.build_context('test', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["test"], -# expected_calls_A, -# expected_contexts, -# expect_pass=False -# ) - -# test_result_B = self.run_event_test( -# ["test"], -# expected_calls_A, -# expected_contexts, -# expect_pass=False -# ) - -# self.assertTrue(test_result_A or test_result_B) - - -# class TestEventTrackingCompilationError(TestEventTracking): -# @property -# def project_config(self): -# return { -# 'config-version': 2, -# "model-paths": [self.dir("model-compilation-error")], -# } - -# @use_profile("postgres") -# def test__postgres_event_tracking_with_compilation_error(self): -# expected_calls = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_contexts = [ -# self.build_context('compile', 'start'), -# self.build_context('compile', 'end', result_type='error') -# ] - -# test_result = self.run_event_test( -# ["compile"], -# expected_calls, -# expected_contexts, -# expect_pass=False, -# expect_raise=True -# ) - -# self.assertTrue(test_result) - - -# class TestEventTrackingUnableToConnect(TestEventTracking): - -# @property -# def profile_config(self): -# return { -# 'config': { -# 'send_anonymous_usage_stats': True -# }, -# 'test': { -# 'outputs': { -# 'default2': { -# 'type': 'postgres', -# 'threads': 4, -# 'host': self.database_host, -# 'port': 5432, -# 'user': 'root', -# 'pass': 'password', -# 'dbname': 'dbt', -# 'schema': self.unique_schema() -# }, -# 'noaccess': { -# 'type': 'postgres', -# 'threads': 4, -# 'host': self.database_host, -# 'port': 5432, -# 'user': 'BAD', -# 'pass': 'bad_password', -# 'dbname': 'dbt', -# 'schema': self.unique_schema() -# } -# }, -# 'target': 'default2' -# } -# } - -# @use_profile("postgres") -# def test__postgres_event_tracking_unable_to_connect(self): -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('run', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.build_context('run', 'end', result_type='error') -# ] - -# test_result_A = self.run_event_test( -# ["run", "--target", "noaccess", "--models", "example"], -# expected_calls_A, -# expected_contexts, -# expect_pass=False -# ) - -# test_result_B = self.run_event_test( -# ["run", "--target", "noaccess", "--models", "example"], -# expected_calls_B, -# expected_contexts, -# expect_pass=False -# ) - -# self.assertTrue(test_result_A or test_result_B) - - -# class TestEventTrackingSnapshot(TestEventTracking): -# @property -# def project_config(self): -# return { -# 'config-version': 2, -# "snapshot-paths": ['snapshots'] -# } - -# @use_profile("postgres") -# def test__postgres_event_tracking_snapshot(self): -# self.run_dbt(["run", "--models", "snapshottable"]) - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='run_model', -# label=ANY, -# context=ANY -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# # the model here has a raw_code that contains the schema, which changes -# expected_contexts = [ -# self.build_context('snapshot', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.run_context( -# hashed_contents=ANY, -# model_id='820793a4def8d8a38d109a9709374849', -# index=1, -# total=1, -# status='SUCCESS', -# materialization='snapshot' -# ), -# self.build_context('snapshot', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["snapshot"], -# expected_calls_A, -# expected_contexts -# ) - -# test_result_B = self.run_event_test( -# ["snapshot"], -# expected_calls_B, -# expected_contexts -# ) - -# self.assertTrue(test_result_A or test_result_B) - - -# class TestEventTrackingCatalogGenerate(TestEventTracking): -# @use_profile("postgres") -# def test__postgres_event_tracking_catalog_generate(self): -# # create a model for the catalog -# self.run_dbt(["run", "--models", "example"]) - -# expected_calls_A = [ -# call( -# category='dbt', -# action='invocation', -# label='start', -# context=ANY -# ), -# call( -# category='dbt', -# action='load_project', -# label=ANY, -# context=ANY, -# ), -# call( -# category='dbt', -# action='resource_counts', -# label=ANY, -# context=ANY, -# ), -# call( -# category='dbt', -# action='invocation', -# label='end', -# context=ANY -# ), -# ] - -# expected_calls_B = inserted( -# call( -# category='dbt', -# action='experimental_parser', -# label=ANY, -# context=ANY -# ), -# 3, -# expected_calls_A -# ) - -# expected_contexts = [ -# self.build_context('generate', 'start'), -# self.load_context(), -# self.resource_counts_context(), -# self.build_context('generate', 'end', result_type='ok') -# ] - -# test_result_A = self.run_event_test( -# ["docs", "generate"], -# expected_calls_A, -# expected_contexts -# ) - -# test_result_B = self.run_event_test( -# ["docs", "generate"], -# expected_calls_B, -# expected_contexts -# ) - -# self.assertTrue(test_result_A or test_result_B) From 89cc89dfdf76b2cb2054daa56eaed317441ec000 Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Tue, 10 Jan 2023 14:57:39 +0100 Subject: [PATCH 093/156] Fix changelog entry for 5989/6432 (#6560) --- .changes/1.4.0/Fixes-20221213-112620.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changes/1.4.0/Fixes-20221213-112620.yaml b/.changes/1.4.0/Fixes-20221213-112620.yaml index a2220f9a920..fabe5d1af2d 100644 --- a/.changes/1.4.0/Fixes-20221213-112620.yaml +++ b/.changes/1.4.0/Fixes-20221213-112620.yaml @@ -3,4 +3,4 @@ body: '[CT-1284] Change Python model default materialization to table' time: 2022-12-13T11:26:20.550017-08:00 custom: Author: aranke - Issue: "6345" + Issue: "5989" From 9eb82c64974478d94f0a10fecb70191b06ab78ef Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Tue, 10 Jan 2023 11:13:50 -0700 Subject: [PATCH 094/156] Remove duplicated changelog entry for 5521 (#6563) --- .changes/1.4.0-b1.md | 1 - .changes/1.4.0/Features-20220914-095625.yaml | 6 ------ CHANGELOG.md | 1 - 3 files changed, 8 deletions(-) delete mode 100644 .changes/1.4.0/Features-20220914-095625.yaml diff --git a/.changes/1.4.0-b1.md b/.changes/1.4.0-b1.md index b2a0e96827c..d9d8537e98a 100644 --- a/.changes/1.4.0-b1.md +++ b/.changes/1.4.0-b1.md @@ -5,7 +5,6 @@ - Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) - Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) - Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) - Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) - extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) - This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) diff --git a/.changes/1.4.0/Features-20220914-095625.yaml b/.changes/1.4.0/Features-20220914-095625.yaml deleted file mode 100644 index d46b1bfa8d8..00000000000 --- a/.changes/1.4.0/Features-20220914-095625.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Migrate dbt-utils current_timestamp macros into core + adapters -time: 2022-09-14T09:56:25.97818-07:00 -custom: - Author: colin-rogers-dbt - Issue: "5521" diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a91696f68b..5925cb9492d 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,6 @@ - Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) - Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) - Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- Migrate dbt-utils current_timestamp macros into core + adapters ([#5521](https://github.com/dbt-labs/dbt-core/issues/5521)) - Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) - extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) - This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) From 4d6352db14c81a13e375a3183dff77346803ded9 Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Tue, 10 Jan 2023 13:14:24 -0500 Subject: [PATCH 095/156] CT-1645: Bump mashumaro version to receive regression fix, and add unit test to confirm fix. (#6564) --- .changes/unreleased/Fixes-20230110-124132.yaml | 7 +++++++ core/setup.py | 2 +- tests/unit/test_events.py | 10 ++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Fixes-20230110-124132.yaml diff --git a/.changes/unreleased/Fixes-20230110-124132.yaml b/.changes/unreleased/Fixes-20230110-124132.yaml new file mode 100644 index 00000000000..cc484367fe0 --- /dev/null +++ b/.changes/unreleased/Fixes-20230110-124132.yaml @@ -0,0 +1,7 @@ +kind: Fixes +body: Bump mashumuro version to get regression fix and add unit test to verify that + fix. +time: 2023-01-10T12:41:32.339631-05:00 +custom: + Author: peterallenwebb + Issue: "6428" diff --git a/core/setup.py b/core/setup.py index c2c04458ace..5378f0e6065 100644 --- a/core/setup.py +++ b/core/setup.py @@ -54,7 +54,7 @@ "hologram>=0.0.14,<=0.0.15", "isodate>=0.6,<0.7", "logbook>=1.5,<1.6", - "mashumaro[msgpack]==3.2", + "mashumaro[msgpack]==3.3.1", "minimal-snowplow-tracker==0.0.2", "networkx>=2.3,<2.8.1;python_version<'3.8'", "networkx>=2.3,<3;python_version>='3.8'", diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 17af8f94369..aa1579da9d1 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -5,6 +5,7 @@ from dbt.events.base_types import msg_from_base_event from dbt.events.types import * from dbt.events.test_types import * +from dbt.contracts.results import TimingInfo from dbt.events.base_types import ( BaseEvent, @@ -464,3 +465,12 @@ def test_all_serializable(self): T = TypeVar("T") + + +def test_date_serialization(): + ti = TimingInfo("test") + ti.begin() + ti.end() + ti_dict = ti.to_dict() + assert ti_dict["started_at"].endswith("Z") + assert ti_dict["completed_at"].endswith("Z") From b9fdfd9e3641e38ff4c96d3b20c5773e1ff50d54 Mon Sep 17 00:00:00 2001 From: Tim Mastny Date: Tue, 10 Jan 2023 12:42:51 -0600 Subject: [PATCH 096/156] Adds the meta field to node_info in logs (#6493) * add meta attribute to nodeinfo for events * also add meta to dataclass * add to unit test to ensure meta is added * adding functional test to check that meta is passed to nodeinfo during logging * changelog * remove used imported * add tests with non-string keys * renaming test dict keys * add non-string value * resolve failing test * test additional non-string values * fix flake8 * Stringify meta dict in node_info Co-authored-by: Gerda Shank --- .../unreleased/Features-20221230-104820.yaml | 7 +++ core/dbt/contracts/graph/nodes.py | 4 ++ core/dbt/events/proto_types.py | 1 + core/dbt/events/types.proto | 1 + core/dbt/utils.py | 7 +++ tests/functional/logging/test_meta_logging.py | 43 +++++++++++++++++++ tests/unit/test_proto_events.py | 3 ++ 7 files changed, 66 insertions(+) create mode 100644 .changes/unreleased/Features-20221230-104820.yaml create mode 100644 tests/functional/logging/test_meta_logging.py diff --git a/.changes/unreleased/Features-20221230-104820.yaml b/.changes/unreleased/Features-20221230-104820.yaml new file mode 100644 index 00000000000..51dc7ca85f0 --- /dev/null +++ b/.changes/unreleased/Features-20221230-104820.yaml @@ -0,0 +1,7 @@ +kind: Features +body: The meta configuration field is now included in the node_info property of structured + logs. +time: 2022-12-30T10:48:20.486416-06:00 +custom: + Author: tmastny + Issue: "6216" diff --git a/core/dbt/contracts/graph/nodes.py b/core/dbt/contracts/graph/nodes.py index 730e2286ccd..a299f5e9b12 100644 --- a/core/dbt/contracts/graph/nodes.py +++ b/core/dbt/contracts/graph/nodes.py @@ -46,6 +46,7 @@ from dbt.events.contextvars import set_contextvars from dbt import flags from dbt.node_types import ModelLanguage, NodeType +from dbt.utils import cast_dict_to_dict_of_strings from .model_config import ( @@ -206,6 +207,8 @@ class NodeInfoMixin: @property def node_info(self): + meta = getattr(self, "meta", {}) + meta_stringified = cast_dict_to_dict_of_strings(meta) node_info = { "node_path": getattr(self, "path", None), "node_name": getattr(self, "name", None), @@ -215,6 +218,7 @@ def node_info(self): "node_status": str(self._event_status.get("node_status")), "node_started_at": self._event_status.get("started_at"), "node_finished_at": self._event_status.get("finished_at"), + "meta": meta_stringified, } node_info_msg = NodeInfo(**node_info) return node_info_msg diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 93dd1cb3639..db61362f428 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -47,6 +47,7 @@ class NodeInfo(betterproto.Message): node_status: str = betterproto.string_field(6) node_started_at: str = betterproto.string_field(7) node_finished_at: str = betterproto.string_field(8) + meta: str = betterproto.string_field(9) @dataclass diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 16e71d4baa3..a03e70fd364 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -35,6 +35,7 @@ message NodeInfo { string node_status = 6; string node_started_at = 7; string node_finished_at = 8; + map meta = 9; } // RunResult diff --git a/core/dbt/utils.py b/core/dbt/utils.py index 987371b6b02..6afe9d1e26d 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -683,3 +683,10 @@ def cast_to_int(integer: Optional[int]) -> int: return 0 else: return integer + + +def cast_dict_to_dict_of_strings(dct): + new_dct = {} + for k, v in dct.items(): + new_dct[str(k)] = str(v) + return new_dct diff --git a/tests/functional/logging/test_meta_logging.py b/tests/functional/logging/test_meta_logging.py new file mode 100644 index 00000000000..76c261fe901 --- /dev/null +++ b/tests/functional/logging/test_meta_logging.py @@ -0,0 +1,43 @@ +import pytest +from dbt.tests.util import run_dbt, read_file +import json + +model1 = 'select 1 as fun' +model2 = '{{ config(meta={"owners": ["team1", "team2"]})}} select 1 as fun' +model3 = '{{ config(meta={"key": 1})}} select 1 as fun' + +@pytest.fixture(scope="class") # noqa +def models(): + return {"model1.sql": model1, "model2.sql": model2, "model3.sql": model3} + + +# This test checks that various events contain node_info, +# which is supplied by the log_contextvars context manager +def test_meta(project, logs_dir): + run_dbt(["--log-format=json", "run"]) + + # get log file + log_file = read_file(logs_dir, "dbt.log") + assert log_file + + for log_line in log_file.split('\n'): + # skip empty lines + if len(log_line) == 0: + continue + # The adapter logging also shows up, so skip non-json lines + if "[debug]" in log_line: + continue + + log_dct = json.loads(log_line) + if "node_info" not in log_dct["data"]: + continue + + print(f"--- log_dct: {log_dct}") + node_info = log_dct["data"]["node_info"] + node_path = node_info['node_path'] + if node_path == "model1.sql": + assert node_info['meta'] == {} + elif node_path == "model2.sql": + assert node_info['meta'] == {"owners": "['team1', 'team2']"} + elif node_path == "model3.sql": + assert node_info['meta'] == {"key": "1"} diff --git a/tests/unit/test_proto_events.py b/tests/unit/test_proto_events.py index 68accd74896..2b25cd4985c 100644 --- a/tests/unit/test_proto_events.py +++ b/tests/unit/test_proto_events.py @@ -87,6 +87,7 @@ def test_exception_events(): def test_node_info_events(): + meta_dict = {"string-key1": ["value1", 2], "string-key2": {"nested-dict-key": "value2"}, 1: "value-from-non-string-key", "string-key3": 1, "string-key4": ["string1", 1, "string2", 2]} node_info = { "node_path": "some_path", "node_name": "some_name", @@ -96,6 +97,7 @@ def test_node_info_events(): "node_status": "started", "node_started_at": "some_time", "node_finished_at": "another_time", + "meta": meta_dict, } event = LogStartLine( description="some description", @@ -105,6 +107,7 @@ def test_node_info_events(): ) assert event assert event.node_info.node_path == "some_path" + assert event.node_info.meta == meta_dict def test_extra_dict_on_event(monkeypatch): From b13b0e949232ed7fefb626ab0fe6499979621482 Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Tue, 10 Jan 2023 12:35:06 -0700 Subject: [PATCH 097/156] Re-factor list of YAML keys for hooks to late-render (#6435) * Re-factor list of YAML keys for hooks to late-render * Add `pre_` and `post_hook` to list of late-rendered hooks * Check for non-empty set intersection Co-authored-by: Kshitij Aranke * Test functional synonymy of `*_hook` with `*-hook` Test that `pre_hook`/`post_hook` are functionally synonymous with `pre-hook`/`post-hook` for model project config * Undo bugfix to validate the new test fails * Revert "Undo bugfix to validate the new test fails" This reverts commit e83a2be2eb6f0ef4d95ffcfaf77ea9287eb0fbca. Co-authored-by: Kshitij Aranke --- .../unreleased/Fixes-20221213-092655.yaml | 6 +++++ core/dbt/config/renderer.py | 3 ++- tests/functional/hooks/test_model_hooks.py | 23 +++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Fixes-20221213-092655.yaml diff --git a/.changes/unreleased/Fixes-20221213-092655.yaml b/.changes/unreleased/Fixes-20221213-092655.yaml new file mode 100644 index 00000000000..b187daf9ad8 --- /dev/null +++ b/.changes/unreleased/Fixes-20221213-092655.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Late-rendering for `pre_` and `post_hook`s in `dbt_project.yml` +time: 2022-12-13T09:26:55.11397-07:00 +custom: + Author: dbeatty10 + Issue: "6411" diff --git a/core/dbt/config/renderer.py b/core/dbt/config/renderer.py index 8fc4211754e..434e30666a4 100644 --- a/core/dbt/config/renderer.py +++ b/core/dbt/config/renderer.py @@ -159,7 +159,8 @@ def should_render_keypath(self, keypath: Keypath) -> bool: if first in {"seeds", "models", "snapshots", "tests"}: keypath_parts = {(k.lstrip("+ ") if isinstance(k, str) else k) for k in keypath} # model-level hooks - if "pre-hook" in keypath_parts or "post-hook" in keypath_parts: + late_rendered_hooks = {"pre-hook", "post-hook", "pre_hook", "post_hook"} + if keypath_parts.intersection(late_rendered_hooks): return False return True diff --git a/tests/functional/hooks/test_model_hooks.py b/tests/functional/hooks/test_model_hooks.py index 097fa8af0c8..79f3632bd8e 100644 --- a/tests/functional/hooks/test_model_hooks.py +++ b/tests/functional/hooks/test_model_hooks.py @@ -170,6 +170,29 @@ def test_pre_and_post_run_hooks(self, project, dbt_profile_target): self.check_hooks("end", project, dbt_profile_target["host"]) +class TestPrePostModelHooksUnderscores(TestPrePostModelHooks): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "test": { + "pre_hook": [ + # inside transaction (runs second) + MODEL_PRE_HOOK, + # outside transaction (runs first) + {"sql": "vacuum {{ this.schema }}.on_model_hook", "transaction": False}, + ], + "post_hook": [ + # outside transaction (runs second) + {"sql": "vacuum {{ this.schema }}.on_model_hook", "transaction": False}, + # inside transaction (runs first) + MODEL_POST_HOOK, + ], + } + } + } + + class TestHookRefs(BaseTestPrePost): @pytest.fixture(scope="class") def project_config_update(self): From 0dbdecef10bb85ba951e58e6f372686bd6bc0449 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Tue, 10 Jan 2023 15:56:52 -0500 Subject: [PATCH 098/156] Ct 1716 cleanup logging events (#6561) * Combine DbtProfileError log events * Combine DbtProjectErrorException with DbtProjectError * Combine cache logging events * Changie * fix ticket number * Ooops. Add another file. * fix serialization of profile names --- .../Under the Hood-20230110-114233.yaml | 6 + core/dbt/adapters/cache.py | 77 ++--- core/dbt/events/proto_types.py | 272 ++---------------- core/dbt/events/types.proto | 211 ++------------ core/dbt/events/types.py | 213 ++++---------- core/dbt/task/base.py | 29 +- tests/unit/test_events.py | 41 +-- 7 files changed, 158 insertions(+), 691 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230110-114233.yaml diff --git a/.changes/unreleased/Under the Hood-20230110-114233.yaml b/.changes/unreleased/Under the Hood-20230110-114233.yaml new file mode 100644 index 00000000000..c18a26d4a03 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230110-114233.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Combine some logging events +time: 2023-01-10T11:42:33.580756-05:00 +custom: + Author: gshank + Issue: 1716 1717 1719 diff --git a/core/dbt/adapters/cache.py b/core/dbt/adapters/cache.py index 90c4cab27fb..430c79d3b3a 100644 --- a/core/dbt/adapters/cache.py +++ b/core/dbt/adapters/cache.py @@ -16,21 +16,7 @@ TruncatedModelNameCausedCollision, ) from dbt.events.functions import fire_event, fire_event_if -from dbt.events.types import ( - AddLink, - AddRelation, - DropCascade, - DropMissingRelation, - DropRelation, - DumpAfterAddGraph, - DumpAfterRenameSchema, - DumpBeforeAddGraph, - DumpBeforeRenameSchema, - RenameSchema, - TemporaryRelation, - UncachedRelation, - UpdateReference, -) +from dbt.events.types import CacheAction, CacheDumpGraph import dbt.flags as flags from dbt.utils import lowercase @@ -281,7 +267,7 @@ def _add_link(self, referenced_key, dependent_key): referenced.add_reference(dependent) - # TODO: Is this dead code? I can't seem to find it grepping the codebase. + # This is called in plugins/postgres/dbt/adapters/postgres/impl.py def add_link(self, referenced, dependent): """Add a link between two relations to the database. If either relation does not exist, it will be added as an "external" relation. @@ -303,9 +289,9 @@ def add_link(self, referenced, dependent): # referring to a table outside our control. There's no need to make # a link - we will never drop the referenced relation during a run. fire_event( - UncachedRelation( - dep_key=_make_msg_from_ref_key(dep_key), + CacheAction( ref_key=_make_msg_from_ref_key(ref_key), + ref_key_2=_make_msg_from_ref_key(dep_key), ) ) return @@ -318,8 +304,10 @@ def add_link(self, referenced, dependent): dependent = dependent.replace(type=referenced.External) self.add(dependent) fire_event( - AddLink( - dep_key=_make_msg_from_ref_key(dep_key), ref_key=_make_msg_from_ref_key(ref_key) + CacheAction( + action="add_link", + ref_key=_make_msg_from_ref_key(dep_key), + ref_key_2=_make_msg_from_ref_key(ref_key), ) ) with self.lock: @@ -332,12 +320,18 @@ def add(self, relation): :param BaseRelation relation: The underlying relation. """ cached = _CachedRelation(relation) - fire_event(AddRelation(relation=_make_ref_key_msg(cached))) - fire_event_if(flags.LOG_CACHE_EVENTS, lambda: DumpBeforeAddGraph(dump=self.dump_graph())) + fire_event_if( + flags.LOG_CACHE_EVENTS, + lambda: CacheDumpGraph(before_after="before", action="adding", dump=self.dump_graph()), + ) + fire_event(CacheAction(action="add_relation", ref_key=_make_ref_key_msg(cached))) with self.lock: self._setdefault(cached) - fire_event_if(flags.LOG_CACHE_EVENTS, lambda: DumpAfterAddGraph(dump=self.dump_graph())) + fire_event_if( + flags.LOG_CACHE_EVENTS, + lambda: CacheDumpGraph(before_after="after", action="adding", dump=self.dump_graph()), + ) def _remove_refs(self, keys): """Removes all references to all entries in keys. This does not @@ -365,16 +359,19 @@ def drop(self, relation): """ dropped_key = _make_ref_key(relation) dropped_key_msg = _make_ref_key_msg(relation) - fire_event(DropRelation(dropped=dropped_key_msg)) + fire_event(CacheAction(action="drop_relation", ref_key=dropped_key_msg)) with self.lock: if dropped_key not in self.relations: - fire_event(DropMissingRelation(relation=dropped_key_msg)) + fire_event(CacheAction(action="drop_missing_relation", ref_key=dropped_key_msg)) return consequences = self.relations[dropped_key].collect_consequences() # convert from a list of _ReferenceKeys to a list of ReferenceKeyMsgs consequence_msgs = [_make_msg_from_ref_key(key) for key in consequences] - - fire_event(DropCascade(dropped=dropped_key_msg, consequences=consequence_msgs)) + fire_event( + CacheAction( + action="drop_cascade", ref_key=dropped_key_msg, ref_list=consequence_msgs + ) + ) self._remove_refs(consequences) def _rename_relation(self, old_key, new_relation): @@ -397,12 +394,14 @@ def _rename_relation(self, old_key, new_relation): for cached in self.relations.values(): if cached.is_referenced_by(old_key): fire_event( - UpdateReference( - old_key=_make_ref_key_msg(old_key), - new_key=_make_ref_key_msg(new_key), - cached_key=_make_ref_key_msg(cached.key()), + CacheAction( + action="update_reference", + ref_key=_make_ref_key_msg(old_key), + ref_key_2=_make_ref_key_msg(new_key), + ref_key_3=_make_ref_key_msg(cached.key()), ) ) + cached.rename_key(old_key, new_key) self.relations[new_key] = relation @@ -430,7 +429,9 @@ def _check_rename_constraints(self, old_key, new_key): raise TruncatedModelNameCausedCollision(new_key, self.relations) if old_key not in self.relations: - fire_event(TemporaryRelation(key=_make_msg_from_ref_key(old_key))) + fire_event( + CacheAction(action="temporary_relation", ref_key=_make_msg_from_ref_key(old_key)) + ) return False return True @@ -449,13 +450,16 @@ def rename(self, old, new): old_key = _make_ref_key(old) new_key = _make_ref_key(new) fire_event( - RenameSchema( - old_key=_make_msg_from_ref_key(old_key), new_key=_make_msg_from_ref_key(new) + CacheAction( + action="rename_relation", + ref_key=_make_msg_from_ref_key(old_key), + ref_key_2=_make_msg_from_ref_key(new), ) ) fire_event_if( - flags.LOG_CACHE_EVENTS, lambda: DumpBeforeRenameSchema(dump=self.dump_graph()) + flags.LOG_CACHE_EVENTS, + lambda: CacheDumpGraph(before_after="before", action="rename", dump=self.dump_graph()), ) with self.lock: @@ -465,7 +469,8 @@ def rename(self, old, new): self._setdefault(_CachedRelation(new)) fire_event_if( - flags.LOG_CACHE_EVENTS, lambda: DumpAfterRenameSchema(dump=self.dump_graph()) + flags.LOG_CACHE_EVENTS, + lambda: CacheDumpGraph(before_after="after", action="rename", dump=self.dump_graph()), ) def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]: diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index db61362f428..746ce294067 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -172,107 +172,30 @@ class InvalidVarsYAMLMsg(betterproto.Message): @dataclass -class DbtProjectError(betterproto.Message): +class LogDbtProjectError(betterproto.Message): """A009""" - pass - - -@dataclass -class DbtProjectErrorMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "DbtProjectError" = betterproto.message_field(2) - - -@dataclass -class DbtProjectErrorException(betterproto.Message): - """A010""" - exc: str = betterproto.string_field(1) @dataclass -class DbtProjectErrorExceptionMsg(betterproto.Message): +class LogDbtProjectErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "DbtProjectErrorException" = betterproto.message_field(2) + data: "LogDbtProjectError" = betterproto.message_field(2) @dataclass -class DbtProfileError(betterproto.Message): +class LogDbtProfileError(betterproto.Message): """A011""" - pass - - -@dataclass -class DbtProfileErrorMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "DbtProfileError" = betterproto.message_field(2) - - -@dataclass -class DbtProfileErrorException(betterproto.Message): - """A012""" - exc: str = betterproto.string_field(1) + profiles: List[str] = betterproto.string_field(2) @dataclass -class DbtProfileErrorExceptionMsg(betterproto.Message): +class LogDbtProfileErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "DbtProfileErrorException" = betterproto.message_field(2) - - -@dataclass -class ProfileListTitle(betterproto.Message): - """A013""" - - pass - - -@dataclass -class ProfileListTitleMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "ProfileListTitle" = betterproto.message_field(2) - - -@dataclass -class ListSingleProfile(betterproto.Message): - """A014""" - - profile: str = betterproto.string_field(1) - - -@dataclass -class ListSingleProfileMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "ListSingleProfile" = betterproto.message_field(2) - - -@dataclass -class NoDefinedProfiles(betterproto.Message): - """A015""" - - pass - - -@dataclass -class NoDefinedProfilesMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "NoDefinedProfiles" = betterproto.message_field(2) - - -@dataclass -class ProfileHelpMessage(betterproto.Message): - """A016""" - - pass - - -@dataclass -class ProfileHelpMessageMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "ProfileHelpMessage" = betterproto.message_field(2) + data: "LogDbtProfileError" = betterproto.message_field(2) @dataclass @@ -509,10 +432,10 @@ class ExposureNameDeprecationMsg(betterproto.Message): class FunctionDeprecated(betterproto.Message): """D008""" - function_name: str = betterproto.string_field(2) - reason: str = betterproto.string_field(3) - suggested_action: str = betterproto.string_field(4) - version: str = betterproto.string_field(5) + function_name: str = betterproto.string_field(1) + reason: str = betterproto.string_field(2) + suggested_action: str = betterproto.string_field(3) + version: str = betterproto.string_field(4) @dataclass @@ -828,186 +751,37 @@ class SchemaDropMsg(betterproto.Message): @dataclass -class UncachedRelation(betterproto.Message): +class CacheAction(betterproto.Message): """E022""" - dep_key: "ReferenceKeyMsg" = betterproto.message_field(1) - ref_key: "ReferenceKeyMsg" = betterproto.message_field(2) - - -@dataclass -class UncachedRelationMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "UncachedRelation" = betterproto.message_field(2) - - -@dataclass -class AddLink(betterproto.Message): - """E023""" - - dep_key: "ReferenceKeyMsg" = betterproto.message_field(1) + action: str = betterproto.string_field(1) ref_key: "ReferenceKeyMsg" = betterproto.message_field(2) + ref_key_2: "ReferenceKeyMsg" = betterproto.message_field(3) + ref_key_3: "ReferenceKeyMsg" = betterproto.message_field(4) + ref_list: List["ReferenceKeyMsg"] = betterproto.message_field(5) @dataclass -class AddLinkMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "AddLink" = betterproto.message_field(2) - - -@dataclass -class AddRelation(betterproto.Message): - """E024""" - - relation: "ReferenceKeyMsg" = betterproto.message_field(1) - - -@dataclass -class AddRelationMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "AddRelation" = betterproto.message_field(2) - - -@dataclass -class DropMissingRelation(betterproto.Message): - """E025""" - - relation: "ReferenceKeyMsg" = betterproto.message_field(1) - - -@dataclass -class DropMissingRelationMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "DropMissingRelation" = betterproto.message_field(2) - - -@dataclass -class DropCascade(betterproto.Message): - """E026""" - - dropped: "ReferenceKeyMsg" = betterproto.message_field(1) - consequences: List["ReferenceKeyMsg"] = betterproto.message_field(2) - - -@dataclass -class DropCascadeMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "DropCascade" = betterproto.message_field(2) - - -@dataclass -class DropRelation(betterproto.Message): - """E027""" - - dropped: "ReferenceKeyMsg" = betterproto.message_field(1) - - -@dataclass -class DropRelationMsg(betterproto.Message): +class CacheActionMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "DropRelation" = betterproto.message_field(2) + data: "CacheAction" = betterproto.message_field(2) @dataclass -class UpdateReference(betterproto.Message): - """E028""" - - old_key: "ReferenceKeyMsg" = betterproto.message_field(1) - new_key: "ReferenceKeyMsg" = betterproto.message_field(2) - cached_key: "ReferenceKeyMsg" = betterproto.message_field(3) - - -@dataclass -class UpdateReferenceMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "UpdateReference" = betterproto.message_field(2) - - -@dataclass -class TemporaryRelation(betterproto.Message): - """E029""" - - key: "ReferenceKeyMsg" = betterproto.message_field(1) - - -@dataclass -class TemporaryRelationMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "TemporaryRelation" = betterproto.message_field(2) - - -@dataclass -class RenameSchema(betterproto.Message): - """E030""" - - old_key: "ReferenceKeyMsg" = betterproto.message_field(1) - new_key: "ReferenceKeyMsg" = betterproto.message_field(2) - - -@dataclass -class RenameSchemaMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "RenameSchema" = betterproto.message_field(2) - - -@dataclass -class DumpBeforeAddGraph(betterproto.Message): +class CacheDumpGraph(betterproto.Message): """E031""" dump: Dict[str, "ListOfStrings"] = betterproto.map_field( 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE ) + before_after: str = betterproto.string_field(2) + action: str = betterproto.string_field(3) @dataclass -class DumpBeforeAddGraphMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "DumpBeforeAddGraph" = betterproto.message_field(2) - - -@dataclass -class DumpAfterAddGraph(betterproto.Message): - """E032""" - - dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass -class DumpAfterAddGraphMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "DumpAfterAddGraph" = betterproto.message_field(2) - - -@dataclass -class DumpBeforeRenameSchema(betterproto.Message): - """E033""" - - dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass -class DumpBeforeRenameSchemaMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "DumpBeforeRenameSchema" = betterproto.message_field(2) - - -@dataclass -class DumpAfterRenameSchema(betterproto.Message): - """E034""" - - dump: Dict[str, "ListOfStrings"] = betterproto.map_field( - 1, betterproto.TYPE_STRING, betterproto.TYPE_MESSAGE - ) - - -@dataclass -class DumpAfterRenameSchemaMsg(betterproto.Message): +class CacheDumpGraphMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "DumpAfterRenameSchema" = betterproto.message_field(2) + data: "CacheDumpGraph" = betterproto.message_field(2) @dataclass diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index a03e70fd364..85d46692089 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -133,79 +133,29 @@ message InvalidVarsYAMLMsg { } // A009 -message DbtProjectError { -} - -message DbtProjectErrorMsg { - EventInfo info = 1; - DbtProjectError data = 2; -} - -// A010 -message DbtProjectErrorException { +message LogDbtProjectError { string exc = 1; } -message DbtProjectErrorExceptionMsg { +message LogDbtProjectErrorMsg { EventInfo info = 1; - DbtProjectErrorException data = 2; -} - -// A011 -message DbtProfileError { + LogDbtProjectError data = 2; } -message DbtProfileErrorMsg { - EventInfo info = 1; - DbtProfileError data = 2; -} +// Skipped A010 -// A012 -message DbtProfileErrorException { +// A011 +message LogDbtProfileError { string exc = 1; + repeated string profiles = 2; } -message DbtProfileErrorExceptionMsg { +message LogDbtProfileErrorMsg { EventInfo info = 1; - DbtProfileErrorException data = 2; + LogDbtProfileError data = 2; } -// A013 -message ProfileListTitle { -} - -message ProfileListTitleMsg { - EventInfo info = 1; - ProfileListTitle data = 2; -} - -// A014 -message ListSingleProfile { - string profile = 1; -} - -message ListSingleProfileMsg { - EventInfo info = 1; - ListSingleProfile data = 2; -} - -// A015 -message NoDefinedProfiles { -} - -message NoDefinedProfilesMsg { - EventInfo info = 1; - NoDefinedProfiles data = 2; -} - -// A016 -message ProfileHelpMessage { -} - -message ProfileHelpMessageMsg { - EventInfo info = 1; - ProfileHelpMessage data = 2; -} +// Skipped A012, A013, A014, A015, A016 // A017 message StarterProjectPath { @@ -387,10 +337,10 @@ message ExposureNameDeprecationMsg { //D008 message FunctionDeprecated { - string function_name = 2; - string reason = 3; - string suggested_action = 4; - string version = 5; + string function_name = 1; + string reason = 2; + string suggested_action = 3; + string version = 4; } message FunctionDeprecatedMsg { @@ -644,140 +594,35 @@ message SchemaDropMsg { } // E022 -message UncachedRelation { - ReferenceKeyMsg dep_key = 1; - ReferenceKeyMsg ref_key = 2; -} - -message UncachedRelationMsg { - EventInfo info = 1; - UncachedRelation data = 2; -} - -// E023 -message AddLink { - ReferenceKeyMsg dep_key = 1; +message CacheAction { + string action = 1; ReferenceKeyMsg ref_key = 2; + ReferenceKeyMsg ref_key_2 = 3; + ReferenceKeyMsg ref_key_3 = 4; + repeated ReferenceKeyMsg ref_list = 5; } -message AddLinkMsg { +message CacheActionMsg { EventInfo info = 1; - AddLink data = 2; -} - -// E024 -message AddRelation { - ReferenceKeyMsg relation = 1; + CacheAction data = 2; } -message AddRelationMsg { - EventInfo info = 1; - AddRelation data = 2; -} - -// E025 -message DropMissingRelation { - ReferenceKeyMsg relation = 1; -} - -message DropMissingRelationMsg { - EventInfo info = 1; - DropMissingRelation data = 2; -} - -// E026 -message DropCascade { - ReferenceKeyMsg dropped = 1; - repeated ReferenceKeyMsg consequences = 2; -} - -message DropCascadeMsg { - EventInfo info = 1; - DropCascade data = 2; -} - -// E027 -message DropRelation { - ReferenceKeyMsg dropped = 1; -} - -message DropRelationMsg { - EventInfo info = 1; - DropRelation data = 2; -} - -// E028 -message UpdateReference { - ReferenceKeyMsg old_key = 1; - ReferenceKeyMsg new_key = 2; - ReferenceKeyMsg cached_key = 3; -} - -message UpdateReferenceMsg { - EventInfo info = 1; - UpdateReference data = 2; -} - -// E029 -message TemporaryRelation { - ReferenceKeyMsg key = 1; -} - -message TemporaryRelationMsg { - EventInfo info = 1; - TemporaryRelation data = 2; -} - -// E030 -message RenameSchema { - ReferenceKeyMsg old_key = 1; - ReferenceKeyMsg new_key = 2; -} - -message RenameSchemaMsg { - EventInfo info = 1; - RenameSchema data = 2; -} +// Skipping E023, E024, E025, E026, E027, E028, E029, E0230 // E031 -message DumpBeforeAddGraph { - map dump = 1; -} - -message DumpBeforeAddGraphMsg { - EventInfo info = 1; - DumpBeforeAddGraph data = 2; -} - -// E032 -message DumpAfterAddGraph { +message CacheDumpGraph { map dump = 1; + string before_after = 2; + string action = 3; } -message DumpAfterAddGraphMsg { +message CacheDumpGraphMsg { EventInfo info = 1; - DumpAfterAddGraph data = 2; -} - -// E033 -message DumpBeforeRenameSchema { - map dump = 1; + CacheDumpGraph data = 2; } -message DumpBeforeRenameSchemaMsg { - EventInfo info = 1; - DumpBeforeRenameSchema data = 2; -} -// E034 -message DumpAfterRenameSchema { - map dump = 1; -} - -message DumpAfterRenameSchemaMsg { - EventInfo info = 1; - DumpAfterRenameSchema data = 2; -} +// Skipping E032, E033, E034 // E035 message AdapterImportError { diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index e2d108419ca..b76188a8c97 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -117,79 +117,40 @@ def message(self) -> str: @dataclass -class DbtProjectError(ErrorLevel, pt.DbtProjectError): +class LogDbtProjectError(ErrorLevel, pt.LogDbtProjectError): def code(self): return "A009" def message(self) -> str: - return "Encountered an error while reading the project:" + msg = "Encountered an error while reading the project:" + if self.exc: + msg += f" ERROR: {str(self.exc)}" + return msg -@dataclass -class DbtProjectErrorException(ErrorLevel, pt.DbtProjectErrorException): - def code(self): - return "A010" - - def message(self) -> str: - return f" ERROR: {str(self.exc)}" +# Skipped A010 @dataclass -class DbtProfileError(ErrorLevel, pt.DbtProfileError): +class LogDbtProfileError(ErrorLevel, pt.LogDbtProfileError): def code(self): return "A011" def message(self) -> str: - return "Encountered an error while reading profiles:" - - -@dataclass -class DbtProfileErrorException(ErrorLevel, pt.DbtProfileErrorException): - def code(self): - return "A012" - - def message(self) -> str: - return f" ERROR: {str(self.exc)}" - - -@dataclass -class ProfileListTitle(InfoLevel, pt.ProfileListTitle): - def code(self): - return "A013" - - def message(self) -> str: - return "Defined profiles:" - - -@dataclass -class ListSingleProfile(InfoLevel, pt.ListSingleProfile): - def code(self): - return "A014" - - def message(self) -> str: - return f" - {self.profile}" - - -@dataclass -class NoDefinedProfiles(InfoLevel, pt.NoDefinedProfiles): - def code(self): - return "A015" - - def message(self) -> str: - return "There are no profiles defined in your profiles.yml file" - - -@dataclass -class ProfileHelpMessage(InfoLevel, pt.ProfileHelpMessage): - def code(self): - return "A016" + msg = "Encountered an error while reading profiles:\n" f" ERROR: {str(self.exc)}" + if self.profiles: + msg += "Defined profiles:\n" + for profile in self.profiles: + msg += f" - {profile}" + else: + msg += "There are no profiles defined in your profiles.yml file" - def message(self) -> str: - return """ + msg += """ For more information on configuring profiles, please consult the dbt docs: https://docs.getdbt.com/docs/configure-your-profile """ + return msg @dataclass @@ -631,130 +592,54 @@ def message(self) -> str: return f'Dropping schema "{self.relation}".' -# TODO pretty sure this is only ever called in dead code -# see: core/dbt/adapters/cache.py _add_link vs add_link @dataclass -class UncachedRelation(DebugLevel, Cache, pt.UncachedRelation): +class CacheAction(DebugLevel, Cache, pt.CacheAction): def code(self): return "E022" - def message(self) -> str: - return ( - f"{self.dep_key} references {str(self.ref_key)} " - f"but {self.ref_key.database}.{self.ref_key.schema}" - "is not in the cache, skipping assumed external relation" - ) - - -@dataclass -class AddLink(DebugLevel, Cache, pt.AddLink): - def code(self): - return "E023" - - def message(self) -> str: - return f"adding link, {self.dep_key} references {self.ref_key}" - - -@dataclass -class AddRelation(DebugLevel, Cache, pt.AddRelation): - def code(self): - return "E024" - - def message(self) -> str: - return f"Adding relation: {str(self.relation)}" - - -@dataclass -class DropMissingRelation(DebugLevel, Cache, pt.DropMissingRelation): - def code(self): - return "E025" - - def message(self) -> str: - return f"dropped a nonexistent relationship: {str(self.relation)}" - - -@dataclass -class DropCascade(DebugLevel, Cache, pt.DropCascade): - def code(self): - return "E026" - - def message(self) -> str: - return f"drop {self.dropped} is cascading to {self.consequences}" - - -@dataclass -class DropRelation(DebugLevel, Cache, pt.DropRelation): - def code(self): - return "E027" - - def message(self) -> str: - return f"Dropping relation: {self.dropped}" - - -@dataclass -class UpdateReference(DebugLevel, Cache, pt.UpdateReference): - def code(self): - return "E028" - - def message(self) -> str: - return ( - f"updated reference from {self.old_key} -> {self.cached_key} to " - f"{self.new_key} -> {self.cached_key}" - ) - - -@dataclass -class TemporaryRelation(DebugLevel, Cache, pt.TemporaryRelation): - def code(self): - return "E029" + def message(self): + if self.action == "add_link": + return f"adding link, {self.ref_key} references {self.ref_key_2}" + elif self.action == "add_relation": + return f"adding relation: {str(self.ref_key)}" + elif self.action == "drop_missing_relation": + return f"dropped a nonexistent relationship: {str(self.ref_key)}" + elif self.action == "drop_cascade": + return f"drop {self.ref_key} is cascading to {self.ref_list}" + elif self.action == "drop_relation": + return f"Dropping relation: {self.ref_key}" + elif self.action == "update_reference": + return ( + f"updated reference from {self.ref_key} -> {self.ref_key_3} to " + f"{self.ref_key_2} -> {self.ref_key_3}" + ) + elif self.action == "temporary_relation": + return f"old key {self.ref_key} not found in self.relations, assuming temporary" + elif self.action == "rename_relation": + return f"Renaming relation {self.ref_key} to {self.ref_key_2}" + elif self.action == "uncached_relation": + return ( + f"{self.ref_key_2} references {str(self.ref_key)} " + f"but {self.ref_key.database}.{self.ref_key.schema}" + "is not in the cache, skipping assumed external relation" + ) + else: + return f"{self.ref_key}" - def message(self) -> str: - return f"old key {self.key} not found in self.relations, assuming temporary" - -@dataclass -class RenameSchema(DebugLevel, Cache, pt.RenameSchema): - def code(self): - return "E030" - - def message(self) -> str: - return f"Renaming relation {self.old_key} to {self.new_key}" +# Skipping E023, E024, E025, E026, E027, E028, E029, E030 @dataclass -class DumpBeforeAddGraph(DebugLevel, Cache, pt.DumpBeforeAddGraph): +class CacheDumpGraph(DebugLevel, Cache, pt.CacheDumpGraph): def code(self): return "E031" def message(self) -> str: - return f"before adding : {self.dump}" - - -@dataclass -class DumpAfterAddGraph(DebugLevel, Cache, pt.DumpAfterAddGraph): - def code(self): - return "E032" - - def message(self) -> str: - return f"after adding: {self.dump}" + return f"{self.before_after} {self.action} : {self.dump}" -@dataclass -class DumpBeforeRenameSchema(DebugLevel, Cache, pt.DumpBeforeRenameSchema): - def code(self): - return "E033" - - def message(self) -> str: - return f"before rename: {self.dump}" - - -@dataclass -class DumpAfterRenameSchema(DebugLevel, Cache, pt.DumpAfterRenameSchema): - def code(self): - return "E034" - - def message(self) -> str: - return f"after rename: {self.dump}" +# Skipping E032, E033, E034 @dataclass diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index e448a15c1d2..b7ababdd067 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -24,14 +24,8 @@ from dbt.logger import log_manager from dbt.events.functions import fire_event from dbt.events.types import ( - DbtProjectError, - DbtProjectErrorException, - DbtProfileError, - DbtProfileErrorException, - ProfileListTitle, - ListSingleProfile, - NoDefinedProfiles, - ProfileHelpMessage, + LogDbtProjectError, + LogDbtProfileError, CatchableExceptionOnRun, InternalExceptionOnRun, GenericExceptionOnRun, @@ -102,26 +96,13 @@ def from_args(cls, args): # for the clean or deps tasks config = cls.ConfigType.from_args(args) except dbt.exceptions.DbtProjectError as exc: - fire_event(DbtProjectError()) - fire_event(DbtProjectErrorException(exc=str(exc))) + fire_event(LogDbtProjectError(exc=str(exc))) tracking.track_invalid_invocation(args=args, result_type=exc.result_type) raise dbt.exceptions.RuntimeException("Could not run dbt") from exc except dbt.exceptions.DbtProfileError as exc: - fire_event(DbtProfileError()) - fire_event(DbtProfileErrorException(exc=str(exc))) - - all_profiles = read_profiles(flags.PROFILES_DIR).keys() - - if len(all_profiles) > 0: - fire_event(ProfileListTitle()) - for profile in all_profiles: - fire_event(ListSingleProfile(profile=profile)) - else: - fire_event(NoDefinedProfiles()) - - fire_event(ProfileHelpMessage()) - + all_profile_names = list(read_profiles(flags.PROFILES_DIR).keys()) + fire_event(LogDbtProfileError(exc=str(exc), profiles=all_profile_names)) tracking.track_invalid_invocation(args=args, result_type=exc.result_type) raise dbt.exceptions.RuntimeException("Could not run dbt") from exc return cls(args, config) diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index aa1579da9d1..a7056a729b2 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -112,14 +112,8 @@ def test_event_codes(self): MergedFromState(num_merged=0, sample=[]), MissingProfileTarget(profile_name="", target_name=""), InvalidVarsYAML(), - DbtProjectError(), - DbtProjectErrorException(exc=""), - DbtProfileError(), - DbtProfileErrorException(exc=""), - ProfileListTitle(), - ListSingleProfile(profile=""), - NoDefinedProfiles(), - ProfileHelpMessage(), + LogDbtProjectError(), + LogDbtProfileError(), StarterProjectPath(dir=""), ConfigFolderDirectory(dir=""), NoSampleProfileFound(adapter=""), @@ -165,35 +159,12 @@ def test_event_codes(self): ), SchemaCreation(relation=ReferenceKeyMsg(database="", schema="", identifier="")), SchemaDrop(relation=ReferenceKeyMsg(database="", schema="", identifier="")), - UncachedRelation( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), + CacheAction( + action="adding_relation", ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), + ref_key_2=ReferenceKeyMsg(database="", schema="", identifier=""), ), - AddLink( - dep_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ref_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ), - AddRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")), - DropMissingRelation(relation=ReferenceKeyMsg(database="", schema="", identifier="")), - DropCascade( - dropped=ReferenceKeyMsg(database="", schema="", identifier=""), - consequences=[ReferenceKeyMsg(database="", schema="", identifier="")], - ), - DropRelation(dropped=ReferenceKeyMsg()), - UpdateReference( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - cached_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ), - TemporaryRelation(key=ReferenceKeyMsg(database="", schema="", identifier="")), - RenameSchema( - old_key=ReferenceKeyMsg(database="", schema="", identifier=""), - new_key=ReferenceKeyMsg(database="", schema="", identifier=""), - ), - DumpBeforeAddGraph(dump=dict()), - DumpAfterAddGraph(dump=dict()), - DumpBeforeRenameSchema(dump=dict()), - DumpAfterRenameSchema(dump=dict()), + CacheDumpGraph(before_after="before", action="rename", dump=dict()), AdapterImportError(exc=""), PluginLoadError(exc_info=""), NewConnectionOpening(connection_state=""), From 457ff3ef4833b2f1f6b4a66a5924adca9d6d155f Mon Sep 17 00:00:00 2001 From: nshuman1 <95879744+nshuman1@users.noreply.github.com> Date: Tue, 10 Jan 2023 16:36:21 -0500 Subject: [PATCH 099/156] fixed minor typos (#6499) * fixed minor typos * adding changelog file * linter * Update Docs-20230102-170216.yaml --- .changes/unreleased/Docs-20230102-170216.yaml | 6 ++++++ core/dbt/cli/params.py | 2 +- core/dbt/docs/build/html/index.html | 4 ++-- core/dbt/main.py | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 .changes/unreleased/Docs-20230102-170216.yaml diff --git a/.changes/unreleased/Docs-20230102-170216.yaml b/.changes/unreleased/Docs-20230102-170216.yaml new file mode 100644 index 00000000000..602d7a80b24 --- /dev/null +++ b/.changes/unreleased/Docs-20230102-170216.yaml @@ -0,0 +1,6 @@ +kind: Docs +body: Updated minor typos encountered when skipping profile setup +time: 2023-01-02T17:02:16.66596191-05:00 +custom: + Author: nshuman1 + Issue: 6529 diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index 5045d04cc18..b739e886e2a 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -270,7 +270,7 @@ ) skip_profile_setup = click.option( - "--skip-profile-setup", "-s", envvar=None, help="Skip interative profile setup.", is_flag=True + "--skip-profile-setup", "-s", envvar=None, help="Skip interactive profile setup.", is_flag=True ) # TODO: The env var and name (reflected in flags) are corrections! diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html index d4238bb08c3..a62245c306d 100644 --- a/core/dbt/docs/build/html/index.html +++ b/core/dbt/docs/build/html/index.html @@ -321,7 +321,7 @@

    project_dir

    skip_profile_setup

    Type: boolean

    -

    Skip interative profile setup.

    +

    Skip interactive profile setup.

    target

    @@ -852,4 +852,4 @@

    Quick search

    - \ No newline at end of file + diff --git a/core/dbt/main.py b/core/dbt/main.py index 55920e8a5cc..5c3c629a875 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -351,7 +351,7 @@ def _build_init_subparser(subparsers, base_subparser): dest="skip_profile_setup", action="store_true", help=""" - Skip interative profile setup. + Skip interactive profile setup. """, ) sub.set_defaults(cls=init_task.InitTask, which="init", rpc_method=None) From 5da63602b37a57726db07241673ff67d0eadd6b5 Mon Sep 17 00:00:00 2001 From: AGPapa Date: Tue, 10 Jan 2023 18:15:48 -0500 Subject: [PATCH 100/156] Adds buildable selection mode (#6366) --- .../unreleased/Features-20230102-091335.yaml | 6 ++ core/dbt/graph/cli.py | 9 +- core/dbt/graph/selector.py | 45 +++++++-- core/dbt/graph/selector_spec.py | 3 + core/dbt/main.py | 6 +- test/unit/test_flags.py | 3 + tests/functional/test_selection/fixtures.py | 2 +- .../test_selection_expansion.py | 93 +++++++++++++++++-- 8 files changed, 144 insertions(+), 23 deletions(-) create mode 100644 .changes/unreleased/Features-20230102-091335.yaml diff --git a/.changes/unreleased/Features-20230102-091335.yaml b/.changes/unreleased/Features-20230102-091335.yaml new file mode 100644 index 00000000000..78154c12e53 --- /dev/null +++ b/.changes/unreleased/Features-20230102-091335.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Adds buildable selection mode +time: 2023-01-02T09:13:35.663627-05:00 +custom: + Author: agpapa + Issue: "6365" diff --git a/core/dbt/graph/cli.py b/core/dbt/graph/cli.py index 6059de6b042..2ae0d814327 100644 --- a/core/dbt/graph/cli.py +++ b/core/dbt/graph/cli.py @@ -44,12 +44,14 @@ def parse_union( components=intersection_components, expect_exists=expect_exists, raw=raw_spec, + indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION), ) ) return SelectionUnion( components=union_components, expect_exists=False, raw=components, + indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION), ) @@ -78,9 +80,12 @@ def parse_difference( include, DEFAULT_INCLUDES, indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION) ) excluded = parse_union_from_default( - exclude, DEFAULT_EXCLUDES, indirect_selection=IndirectSelection.Eager + exclude, DEFAULT_EXCLUDES, indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION) + ) + return SelectionDifference( + components=[included, excluded], + indirect_selection=IndirectSelection(flags.INDIRECT_SELECTION), ) - return SelectionDifference(components=[included, excluded]) RawDefinition = Union[str, Dict[str, Any]] diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index ed91596712b..8f9561c6519 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -134,7 +134,9 @@ def select_nodes_recursively(self, spec: SelectionSpec) -> Tuple[Set[UniqueId], initial_direct = spec.combined(direct_sets) indirect_nodes = spec.combined(indirect_sets) - direct_nodes = self.incorporate_indirect_nodes(initial_direct, indirect_nodes) + direct_nodes = self.incorporate_indirect_nodes( + initial_direct, indirect_nodes, spec.indirect_selection + ) if spec.expect_exists and len(direct_nodes) == 0: warn_or_error(NoNodesForSelectionCriteria(spec_raw=str(spec.raw))) @@ -197,7 +199,7 @@ def expand_selection( ) -> Tuple[Set[UniqueId], Set[UniqueId]]: # Test selection by default expands to include an implicitly/indirectly selected tests. # `dbt test -m model_a` also includes tests that directly depend on `model_a`. - # Expansion has two modes, EAGER and CAUTIOUS. + # Expansion has three modes, EAGER, CAUTIOUS and BUILDABLE. # # EAGER mode: If ANY parent is selected, select the test. # @@ -205,11 +207,22 @@ def expand_selection( # - If ALL parents are selected, select the test. # - If ANY parent is missing, return it separately. We'll keep it around # for later and see if its other parents show up. + # + # BUILDABLE mode: + # - If ALL parents are selected, or the parents of the test are themselves parents of the selected, select the test. + # - If ANY parent is missing, return it separately. We'll keep it around + # for later and see if its other parents show up. + # # Users can opt out of inclusive EAGER mode by passing --indirect-selection cautious # CLI argument or by specifying `indirect_selection: true` in a yaml selector direct_nodes = set(selected) indirect_nodes = set() + selected_and_parents = set() + if indirect_selection == IndirectSelection.Buildable: + selected_and_parents = selected.union(self.graph.select_parents(selected)).union( + self.manifest.sources + ) for unique_id in self.graph.select_successors(selected): if unique_id in self.manifest.nodes: @@ -220,14 +233,20 @@ def expand_selection( node.depends_on_nodes ) <= set(selected): direct_nodes.add(unique_id) - # if not: + elif indirect_selection == IndirectSelection.Buildable and set( + node.depends_on_nodes + ) <= set(selected_and_parents): + direct_nodes.add(unique_id) else: indirect_nodes.add(unique_id) return direct_nodes, indirect_nodes def incorporate_indirect_nodes( - self, direct_nodes: Set[UniqueId], indirect_nodes: Set[UniqueId] = set() + self, + direct_nodes: Set[UniqueId], + indirect_nodes: Set[UniqueId] = set(), + indirect_selection: IndirectSelection = IndirectSelection.Eager, ) -> Set[UniqueId]: # Check tests previously selected indirectly to see if ALL their # parents are now present. @@ -238,11 +257,19 @@ def incorporate_indirect_nodes( selected = set(direct_nodes) - for unique_id in indirect_nodes: - if unique_id in self.manifest.nodes: - node = self.manifest.nodes[unique_id] - if set(node.depends_on_nodes) <= set(selected): - selected.add(unique_id) + if indirect_selection == IndirectSelection.Cautious: + for unique_id in indirect_nodes: + if unique_id in self.manifest.nodes: + node = self.manifest.nodes[unique_id] + if set(node.depends_on_nodes) <= set(selected): + selected.add(unique_id) + elif indirect_selection == IndirectSelection.Buildable: + selected_and_parents = selected.union(self.graph.select_parents(selected)) + for unique_id in indirect_nodes: + if unique_id in self.manifest.nodes: + node = self.manifest.nodes[unique_id] + if set(node.depends_on_nodes) <= set(selected_and_parents): + selected.add(unique_id) return selected diff --git a/core/dbt/graph/selector_spec.py b/core/dbt/graph/selector_spec.py index 991ae7fcb89..5b8e4560d5e 100644 --- a/core/dbt/graph/selector_spec.py +++ b/core/dbt/graph/selector_spec.py @@ -24,6 +24,7 @@ class IndirectSelection(StrEnum): Eager = "eager" Cautious = "cautious" + Buildable = "buildable" def _probably_path(value: str): @@ -173,12 +174,14 @@ class BaseSelectionGroup(dbtClassMixin, Iterable[SelectionSpec], metaclass=ABCMe def __init__( self, components: Iterable[SelectionSpec], + indirect_selection: IndirectSelection = IndirectSelection.Eager, expect_exists: bool = False, raw: Any = None, ): self.components: List[SelectionSpec] = list(components) self.expect_exists = expect_exists self.raw = raw + self.indirect_selection = indirect_selection def __iter__(self) -> Iterator[SelectionSpec]: for component in self.components: diff --git a/core/dbt/main.py b/core/dbt/main.py index 5c3c629a875..1bdd59fef1f 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -385,7 +385,7 @@ def _build_build_subparser(subparsers, base_subparser): ) sub.add_argument( "--indirect-selection", - choices=["eager", "cautious"], + choices=["eager", "cautious", "buildable"], default="eager", dest="indirect_selection", help=""" @@ -763,7 +763,7 @@ def _build_test_subparser(subparsers, base_subparser): ) sub.add_argument( "--indirect-selection", - choices=["eager", "cautious"], + choices=["eager", "cautious", "buildable"], default="eager", dest="indirect_selection", help=""" @@ -869,7 +869,7 @@ def _build_list_subparser(subparsers, base_subparser): ) sub.add_argument( "--indirect-selection", - choices=["eager", "cautious"], + choices=["eager", "cautious", "buildable"], default="eager", dest="indirect_selection", help=""" diff --git a/test/unit/test_flags.py b/test/unit/test_flags.py index 4be866338a2..8bb248af443 100644 --- a/test/unit/test_flags.py +++ b/test/unit/test_flags.py @@ -206,6 +206,9 @@ def test__flags(self): self.user_config.indirect_selection = 'cautious' flags.set_from_args(self.args, self.user_config) self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Cautious) + self.user_config.indirect_selection = 'buildable' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Buildable) self.user_config.indirect_selection = None flags.set_from_args(self.args, self.user_config) self.assertEqual(flags.INDIRECT_SELECTION, IndirectSelection.Eager) diff --git a/tests/functional/test_selection/fixtures.py b/tests/functional/test_selection/fixtures.py index ae798edd3fd..48c3f40c62d 100644 --- a/tests/functional/test_selection/fixtures.py +++ b/tests/functional/test_selection/fixtures.py @@ -64,7 +64,7 @@ tags = ['a_or_b'] ) }} -select 1 as fun +select * FROM {{ref('model_b')}} """ diff --git a/tests/functional/test_selection/test_selection_expansion.py b/tests/functional/test_selection/test_selection_expansion.py index b563398e89f..e006fd50258 100644 --- a/tests/functional/test_selection/test_selection_expansion.py +++ b/tests/functional/test_selection/test_selection_expansion.py @@ -184,6 +184,18 @@ def test_model_a_exclude_specific_test_cautious( self.list_tests_and_assert(select, exclude, expected, indirect_selection) self.run_tests_and_assert(select, exclude, expected, indirect_selection) + def test_model_a_exclude_specific_test_buildable( + self, + project, + ): + select = "model_a" + exclude = "unique_model_a_fun" + expected = ["just_a", "cf_a_b", "cf_a_src", "relationships_model_a_fun__fun__ref_model_b_", "relationships_model_a_fun__fun__source_my_src_my_tbl_"] + indirect_selection = "buildable" + + self.list_tests_and_assert(select, exclude, expected, indirect_selection) + self.run_tests_and_assert(select, exclude, expected, indirect_selection) + def test_only_generic( self, project, @@ -374,6 +386,40 @@ def test_model_a_indirect_selection_eager( self.list_tests_and_assert(select, exclude, expected, indirect_selection) self.run_tests_and_assert(select, exclude, expected, indirect_selection) + def test_model_a_indirect_selection_cautious( + self, + project, + ): + select = "model_a" + exclude = None + expected = [ + "just_a", + "unique_model_a_fun", + ] + indirect_selection = "cautious" + + self.list_tests_and_assert(select, exclude, expected, indirect_selection) + self.run_tests_and_assert(select, exclude, expected, indirect_selection) + + def test_model_a_indirect_selection_buildable( + self, + project, + ): + select = "model_a" + exclude = None + expected = [ + "cf_a_b", + "cf_a_src", + "just_a", + "relationships_model_a_fun__fun__ref_model_b_", + "relationships_model_a_fun__fun__source_my_src_my_tbl_", + "unique_model_a_fun", + ] + indirect_selection = "buildable" + + self.list_tests_and_assert(select, exclude, expected, indirect_selection) + self.run_tests_and_assert(select, exclude, expected, indirect_selection) + def test_model_a_indirect_selection_exclude_unique_tests( self, project, @@ -402,16 +448,21 @@ def selectors(self): definition: method: fqn value: model_a - - name: model_a_no_indirect_selection + - name: model_a_cautious_indirect_selection definition: method: fqn value: model_a indirect_selection: "cautious" - - name: model_a_yes_indirect_selection + - name: model_a_eager_indirect_selection definition: method: fqn value: model_a indirect_selection: "eager" + - name: model_a_buildable_indirect_selection + definition: + method: fqn + value: model_a + indirect_selection: "buildable" """ def test_selector_model_a_unset_indirect_selection( @@ -440,7 +491,7 @@ def test_selector_model_a_unset_indirect_selection( selector_name="model_a_unset_indirect_selection", ) - def test_selector_model_a_no_indirect_selection( + def test_selector_model_a_cautious_indirect_selection( self, project, ): @@ -450,16 +501,42 @@ def test_selector_model_a_no_indirect_selection( include=None, exclude=None, expected_tests=expected, - selector_name="model_a_no_indirect_selection", + selector_name="model_a_cautious_indirect_selection", + ) + self.run_tests_and_assert( + include=None, + exclude=None, + expected_tests=expected, + selector_name="model_a_cautious_indirect_selection", + ) + + def test_selector_model_a_eager_indirect_selection( + self, + project, + ): + expected = [ + "cf_a_b", + "cf_a_src", + "just_a", + "relationships_model_a_fun__fun__ref_model_b_", + "relationships_model_a_fun__fun__source_my_src_my_tbl_", + "unique_model_a_fun", + ] + + self.list_tests_and_assert( + include=None, + exclude=None, + expected_tests=expected, + selector_name="model_a_eager_indirect_selection", ) self.run_tests_and_assert( include=None, exclude=None, expected_tests=expected, - selector_name="model_a_no_indirect_selection", + selector_name="model_a_eager_indirect_selection", ) - def test_selector_model_a_yes_indirect_selection( + def test_selector_model_a_buildable_indirect_selection( self, project, ): @@ -476,11 +553,11 @@ def test_selector_model_a_yes_indirect_selection( include=None, exclude=None, expected_tests=expected, - selector_name="model_a_yes_indirect_selection", + selector_name="model_a_buildable_indirect_selection", ) self.run_tests_and_assert( include=None, exclude=None, expected_tests=expected, - selector_name="model_a_yes_indirect_selection", + selector_name="model_a_buildable_indirect_selection", ) From 0fc080d222ae7b77888b96982098b8a7deee81f4 Mon Sep 17 00:00:00 2001 From: mivanicova <32622017+mivanicova@users.noreply.github.com> Date: Wed, 11 Jan 2023 00:18:20 +0100 Subject: [PATCH 101/156] add defer_to_manifest in before_run to fix faulty deferred docs generate (#6488) * add defer_to_manifest in before_run to fix faulty deferred docs generate * add a changelog * add declaration of defer_to_manifest to FreshnessTask and GraphRunnableTask * fix: add defer_to_manifest method to ListTask --- .changes/unreleased/Fixes-20221226-010211.yaml | 6 ++++++ core/dbt/task/freshness.py | 4 ++++ core/dbt/task/list.py | 4 ++++ core/dbt/task/runnable.py | 5 +++++ .../062_defer_state_tests/test_defer_state.py | 10 ++++++++++ 5 files changed, 29 insertions(+) create mode 100644 .changes/unreleased/Fixes-20221226-010211.yaml diff --git a/.changes/unreleased/Fixes-20221226-010211.yaml b/.changes/unreleased/Fixes-20221226-010211.yaml new file mode 100644 index 00000000000..4674b27df4d --- /dev/null +++ b/.changes/unreleased/Fixes-20221226-010211.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: fix docs generate --defer by adding defer_to_manifest to before_run +time: 2022-12-26T01:02:11.630614+01:00 +custom: + Author: mivanicova + Issue: "6488" diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index c9c8e5051fa..c4898b779fa 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -147,6 +147,10 @@ def node_is_match(self, node): class FreshnessTask(GraphRunnableTask): + def defer_to_manifest(self, adapter, selected_uids): + # freshness don't defer + return + def result_path(self): if self.args.output: return os.path.realpath(self.args.output) diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index e1be8f214d3..49fb07b359a 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -179,6 +179,10 @@ def selection_arg(self): else: return self.args.select + def defer_to_manifest(self, adapter, selected_uids): + # list don't defer + return + def get_node_selector(self): if self.manifest is None or self.graph is None: raise InternalException("manifest and graph must be set to get perform node selection") diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 14005203296..7143c286675 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -152,6 +152,10 @@ def get_selection_spec(self) -> SelectionSpec: def get_node_selector(self) -> NodeSelector: raise NotImplementedException(f"get_node_selector not implemented for task {type(self)}") + @abstractmethod + def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): + raise NotImplementedException(f"defer_to_manifest not implemented for task {type(self)}") + def get_graph_queue(self) -> GraphQueue: selector = self.get_node_selector() spec = self.get_selection_spec() @@ -419,6 +423,7 @@ def populate_adapter_cache(self, adapter, required_schemas: Set[BaseRelation] = def before_run(self, adapter, selected_uids: AbstractSet[str]): with adapter.connection_named("master"): self.populate_adapter_cache(adapter) + self.defer_to_manifest(adapter, selected_uids) def after_run(self, adapter, results): pass diff --git a/test/integration/062_defer_state_tests/test_defer_state.py b/test/integration/062_defer_state_tests/test_defer_state.py index 058e43ef05f..d48d84aae46 100644 --- a/test/integration/062_defer_state_tests/test_defer_state.py +++ b/test/integration/062_defer_state_tests/test_defer_state.py @@ -109,12 +109,22 @@ def run_and_defer(self): # no state, wrong schema, failure. self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False) + # test generate docs + # no state, wrong schema, empty nodes + catalog = self.run_dbt(['docs','generate','--target', 'otherschema']) + assert not catalog.nodes + # no state, run also fails self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False) # defer test, it succeeds results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--target', 'otherschema']) + # defer docs generate with state, catalog refers schema from the happy times + catalog = self.run_dbt(['docs','generate', '-m', 'view_model+', '--state', 'state', '--defer','--target', 'otherschema']) + assert self.other_schema not in catalog.nodes["seed.test.seed"].metadata.schema + assert self.unique_schema() in catalog.nodes["seed.test.seed"].metadata.schema + # with state it should work though results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema']) assert self.other_schema not in results[0].node.compiled_code From eb200b46877c31469f410b7d47dfd245473ed7cb Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Tue, 10 Jan 2023 17:43:27 -0600 Subject: [PATCH 102/156] Rename exceptions (#6539) * rename InternalException * rename RuntimeException * rename DatabaseException * rename CompilationException * cleanup renames in tests and postgres * rename ValidationException * rename IncompatibleSchemaException * more renaming * more renaming * rename InternalException again * convert ParsingException * replace JSONValidationException and SemverException * replace VersionsNotCompatibleException * replace NotImplementedException * replace FailedToConnectException * replace InvalidConnectionException * replace InvalidSelectorException * replace DuplicateYamlKeyException * replace ConnectionException * minor cleanup * update comment * more cleanup * add class decorator * rename more exceptions * more renamed, add changelog * rename exception * rework class deprecations * removing testing line * fix failing test * rename newer exceptions * fix failing test * commit unsaved faile * convert back an rpc exception * remove class deprecations --- .../Breaking Changes-20221205-141937.yaml | 5 +- core/dbt/adapters/base/column.py | 12 +- core/dbt/adapters/base/connections.py | 36 +- core/dbt/adapters/base/impl.py | 114 ++- core/dbt/adapters/base/plugin.py | 4 +- core/dbt/adapters/base/query_headers.py | 4 +- core/dbt/adapters/base/relation.py | 18 +- core/dbt/adapters/cache.py | 20 +- core/dbt/adapters/factory.py | 12 +- core/dbt/adapters/sql/connections.py | 10 +- core/dbt/adapters/sql/impl.py | 4 +- core/dbt/clients/_jinja_blocks.py | 28 +- core/dbt/clients/agate_helper.py | 4 +- core/dbt/clients/git.py | 8 +- core/dbt/clients/jinja.py | 48 +- core/dbt/clients/jinja_static.py | 6 +- core/dbt/clients/system.py | 2 +- core/dbt/clients/yaml_helper.py | 2 +- core/dbt/compilation.py | 14 +- core/dbt/config/profile.py | 22 +- core/dbt/config/project.py | 16 +- core/dbt/config/renderer.py | 8 +- core/dbt/config/runtime.py | 22 +- core/dbt/config/selectors.py | 8 +- core/dbt/config/utils.py | 6 +- core/dbt/context/base.py | 20 +- core/dbt/context/configured.py | 6 +- core/dbt/context/context_config.py | 8 +- core/dbt/context/docs.py | 8 +- core/dbt/context/exceptions_jinja.py | 68 +- core/dbt/context/macro_resolver.py | 6 +- core/dbt/context/macros.py | 6 +- core/dbt/context/providers.py | 128 ++-- core/dbt/context/secret.py | 4 +- core/dbt/contracts/connection.py | 4 +- core/dbt/contracts/graph/manifest.py | 30 +- core/dbt/contracts/graph/model_config.py | 16 +- core/dbt/contracts/graph/unparsed.py | 6 +- core/dbt/contracts/relation.py | 14 +- core/dbt/contracts/results.py | 6 +- core/dbt/contracts/state.py | 10 +- core/dbt/contracts/util.py | 14 +- core/dbt/deps/git.py | 4 +- core/dbt/deps/registry.py | 18 +- core/dbt/deps/resolver.py | 16 +- core/dbt/events/functions.py | 4 +- core/dbt/events/proto_types.py | 26 +- core/dbt/events/types.proto | 28 +- core/dbt/events/types.py | 10 +- core/dbt/exceptions.py | 669 +++++++++--------- core/dbt/graph/cli.py | 22 +- core/dbt/graph/graph.py | 6 +- core/dbt/graph/selector.py | 8 +- core/dbt/graph/selector_methods.py | 30 +- core/dbt/graph/selector_spec.py | 14 +- core/dbt/internal_deprecations.py | 8 +- core/dbt/lib.py | 4 +- core/dbt/main.py | 12 +- core/dbt/parser/base.py | 10 +- core/dbt/parser/generic_test.py | 8 +- core/dbt/parser/generic_test_builders.py | 44 +- core/dbt/parser/hooks.py | 4 +- core/dbt/parser/macros.py | 8 +- core/dbt/parser/manifest.py | 26 +- core/dbt/parser/models.py | 30 +- core/dbt/parser/read_files.py | 8 +- core/dbt/parser/schemas.py | 100 +-- core/dbt/parser/search.py | 6 +- core/dbt/parser/snapshots.py | 4 +- core/dbt/parser/sources.py | 4 +- core/dbt/parser/sql.py | 4 +- core/dbt/semver.py | 18 +- core/dbt/task/base.py | 34 +- core/dbt/task/build.py | 6 +- core/dbt/task/compile.py | 10 +- core/dbt/task/freshness.py | 10 +- core/dbt/task/generate.py | 10 +- core/dbt/task/init.py | 2 +- core/dbt/task/list.py | 14 +- core/dbt/task/run.py | 36 +- core/dbt/task/run_operation.py | 4 +- core/dbt/task/runnable.py | 36 +- core/dbt/task/seed.py | 4 +- core/dbt/task/snapshot.py | 4 +- core/dbt/task/sql.py | 4 +- core/dbt/task/test.py | 18 +- core/dbt/tests/fixtures/project.py | 6 +- core/dbt/utils.py | 16 +- .../dbt/adapters/postgres/connections.py | 6 +- .../postgres/dbt/adapters/postgres/impl.py | 20 +- .../dbt/adapters/postgres/relation.py | 4 +- .../035_docs_blocks_tests/test_docs_blocks.py | 8 +- .../062_defer_state_tests/test_defer_state.py | 2 +- .../test_modified_state.py | 6 +- .../test_run_results_state.py | 2 - .../test_partial_parsing.py | 12 +- .../test_pp_metrics.py | 6 +- .../068_partial_parsing_tests/test_pp_vars.py | 10 +- test/unit/test_adapter_connection_manager.py | 22 +- test/unit/test_cache.py | 2 +- test/unit/test_config.py | 2 +- test/unit/test_context.py | 4 +- test/unit/test_core_dbt_utils.py | 4 +- test/unit/test_deps.py | 8 +- test/unit/test_exceptions.py | 6 +- test/unit/test_graph_selection.py | 2 +- test/unit/test_graph_selector_methods.py | 4 +- test/unit/test_graph_selector_spec.py | 6 +- test/unit/test_jinja.py | 34 +- test/unit/test_parser.py | 22 +- test/unit/test_postgres_adapter.py | 6 +- .../test_registry_get_request_exception.py | 4 +- test/unit/test_semver.py | 4 +- .../query_comment/test_query_comment.py | 4 +- tests/functional/artifacts/test_override.py | 4 +- .../artifacts/test_previous_version_state.py | 4 +- .../basic/test_invalid_reference.py | 4 +- tests/functional/configs/test_configs.py | 4 +- .../configs/test_configs_in_schema_files.py | 6 +- .../functional/configs/test_disabled_model.py | 6 +- .../functional/configs/test_unused_configs.py | 4 +- .../context_methods/test_builtin_functions.py | 6 +- .../context_methods/test_cli_vars.py | 8 +- .../context_methods/test_secret_env_vars.py | 8 +- .../test_var_in_generate_name.py | 4 +- .../dependencies/test_local_dependency.py | 6 +- .../deprecations/test_deprecations.py | 10 +- .../duplicates/test_duplicate_analysis.py | 4 +- .../duplicates/test_duplicate_exposure.py | 4 +- .../duplicates/test_duplicate_macro.py | 6 +- .../duplicates/test_duplicate_metric.py | 4 +- .../duplicates/test_duplicate_model.py | 6 +- .../duplicates/test_duplicate_source.py | 4 +- .../fail_fast/test_fail_fast_run.py | 6 +- tests/functional/hooks/test_model_hooks.py | 4 +- .../test_invalid_models.py | 14 +- tests/functional/macros/test_macros.py | 4 +- .../materializations/test_incremental.py | 6 +- .../functional/metrics/test_metric_configs.py | 6 +- tests/functional/metrics/test_metrics.py | 32 +- .../schema_tests/test_schema_v2_tests.py | 10 +- .../test_missing_strategy_snapshot.py | 4 +- .../test_source_overrides_duplicate_model.py | 4 +- .../functional/sources/test_simple_source.py | 4 +- .../sources/test_source_fresher_state.py | 6 +- tests/unit/test_connection_retries.py | 4 +- tests/unit/test_deprecations.py | 84 +-- tests/unit/test_events.py | 8 +- 148 files changed, 1330 insertions(+), 1332 deletions(-) diff --git a/.changes/unreleased/Breaking Changes-20221205-141937.yaml b/.changes/unreleased/Breaking Changes-20221205-141937.yaml index 5f2a780d661..39506f9ab2b 100644 --- a/.changes/unreleased/Breaking Changes-20221205-141937.yaml +++ b/.changes/unreleased/Breaking Changes-20221205-141937.yaml @@ -1,8 +1,9 @@ kind: Breaking Changes -body: Cleaned up exceptions to directly raise in code. Removed use of all exception +body: Cleaned up exceptions to directly raise in code. Also updated the existing + exception to meet PEP guidelines.Removed use of all exception functions in the code base and marked them all as deprecated to be removed next minor release. time: 2022-12-05T14:19:37.863032-06:00 custom: Author: emmyoop - Issue: 6339 6393 + Issue: 6339 6393 6460 diff --git a/core/dbt/adapters/base/column.py b/core/dbt/adapters/base/column.py index b47aac64062..3c6246b33a6 100644 --- a/core/dbt/adapters/base/column.py +++ b/core/dbt/adapters/base/column.py @@ -2,7 +2,7 @@ import re from typing import Dict, ClassVar, Any, Optional -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError @dataclass @@ -85,7 +85,7 @@ def is_numeric(self) -> bool: def string_size(self) -> int: if not self.is_string(): - raise RuntimeException("Called string_size() on non-string field!") + raise DbtRuntimeError("Called string_size() on non-string field!") if self.dtype == "text" or self.char_size is None: # char_size should never be None. Handle it reasonably just in case @@ -124,7 +124,7 @@ def __repr__(self) -> str: def from_description(cls, name: str, raw_data_type: str) -> "Column": match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type) if match is None: - raise RuntimeException(f'Could not interpret data type "{raw_data_type}"') + raise DbtRuntimeError(f'Could not interpret data type "{raw_data_type}"') data_type, size_info = match.groups() char_size = None numeric_precision = None @@ -137,7 +137,7 @@ def from_description(cls, name: str, raw_data_type: str) -> "Column": try: char_size = int(parts[0]) except ValueError: - raise RuntimeException( + raise DbtRuntimeError( f'Could not interpret data_type "{raw_data_type}": ' f'could not convert "{parts[0]}" to an integer' ) @@ -145,14 +145,14 @@ def from_description(cls, name: str, raw_data_type: str) -> "Column": try: numeric_precision = int(parts[0]) except ValueError: - raise RuntimeException( + raise DbtRuntimeError( f'Could not interpret data_type "{raw_data_type}": ' f'could not convert "{parts[0]}" to an integer' ) try: numeric_scale = int(parts[1]) except ValueError: - raise RuntimeException( + raise DbtRuntimeError( f'Could not interpret data_type "{raw_data_type}": ' f'could not convert "{parts[1]}" to an integer' ) diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py index 577cdf6d9a6..73e87ae9600 100644 --- a/core/dbt/adapters/base/connections.py +++ b/core/dbt/adapters/base/connections.py @@ -91,13 +91,13 @@ def get_thread_connection(self) -> Connection: key = self.get_thread_identifier() with self.lock: if key not in self.thread_connections: - raise dbt.exceptions.InvalidConnectionException(key, list(self.thread_connections)) + raise dbt.exceptions.InvalidConnectionError(key, list(self.thread_connections)) return self.thread_connections[key] def set_thread_connection(self, conn: Connection) -> None: key = self.get_thread_identifier() if key in self.thread_connections: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( "In set_thread_connection, existing connection exists for {}" ) self.thread_connections[key] = conn @@ -137,7 +137,7 @@ def exception_handler(self, sql: str) -> ContextManager: :return: A context manager that handles exceptions raised by the underlying database. """ - raise dbt.exceptions.NotImplementedException( + raise dbt.exceptions.NotImplementedError( "`exception_handler` is not implemented for this adapter!" ) @@ -211,7 +211,7 @@ def retry_connection( connect should trigger a retry. :type retryable_exceptions: Iterable[Type[Exception]] :param int retry_limit: How many times to retry the call to connect. If this limit - is exceeded before a successful call, a FailedToConnectException will be raised. + is exceeded before a successful call, a FailedToConnectError will be raised. Must be non-negative. :param retry_timeout: Time to wait between attempts to connect. Can also take a Callable that takes the number of attempts so far, beginning at 0, and returns an int @@ -220,14 +220,14 @@ def retry_connection( :param int _attempts: Parameter used to keep track of the number of attempts in calling the connect function across recursive calls. Passed as an argument to retry_timeout if it is a Callable. This parameter should not be set by the initial caller. - :raises dbt.exceptions.FailedToConnectException: Upon exhausting all retry attempts without + :raises dbt.exceptions.FailedToConnectError: Upon exhausting all retry attempts without successfully acquiring a handle. :return: The given connection with its appropriate state and handle attributes set depending on whether we successfully acquired a handle or not. """ timeout = retry_timeout(_attempts) if callable(retry_timeout) else retry_timeout if timeout < 0: - raise dbt.exceptions.FailedToConnectException( + raise dbt.exceptions.FailedToConnectError( "retry_timeout cannot be negative or return a negative time." ) @@ -235,7 +235,7 @@ def retry_connection( # This guard is not perfect others may add to the recursion limit (e.g. built-ins). connection.handle = None connection.state = ConnectionState.FAIL - raise dbt.exceptions.FailedToConnectException("retry_limit cannot be negative") + raise dbt.exceptions.FailedToConnectError("retry_limit cannot be negative") try: connection.handle = connect() @@ -246,7 +246,7 @@ def retry_connection( if retry_limit <= 0: connection.handle = None connection.state = ConnectionState.FAIL - raise dbt.exceptions.FailedToConnectException(str(e)) + raise dbt.exceptions.FailedToConnectError(str(e)) logger.debug( f"Got a retryable error when attempting to open a {cls.TYPE} connection.\n" @@ -268,12 +268,12 @@ def retry_connection( except Exception as e: connection.handle = None connection.state = ConnectionState.FAIL - raise dbt.exceptions.FailedToConnectException(str(e)) + raise dbt.exceptions.FailedToConnectError(str(e)) @abc.abstractmethod def cancel_open(self) -> Optional[List[str]]: """Cancel all open connections on the adapter. (passable)""" - raise dbt.exceptions.NotImplementedException( + raise dbt.exceptions.NotImplementedError( "`cancel_open` is not implemented for this adapter!" ) @@ -288,7 +288,7 @@ def open(cls, connection: Connection) -> Connection: This should be thread-safe, or hold the lock if necessary. The given connection should not be in either in_use or available. """ - raise dbt.exceptions.NotImplementedException("`open` is not implemented for this adapter!") + raise dbt.exceptions.NotImplementedError("`open` is not implemented for this adapter!") def release(self) -> None: with self.lock: @@ -320,16 +320,12 @@ def cleanup_all(self) -> None: @abc.abstractmethod def begin(self) -> None: """Begin a transaction. (passable)""" - raise dbt.exceptions.NotImplementedException( - "`begin` is not implemented for this adapter!" - ) + raise dbt.exceptions.NotImplementedError("`begin` is not implemented for this adapter!") @abc.abstractmethod def commit(self) -> None: """Commit a transaction. (passable)""" - raise dbt.exceptions.NotImplementedException( - "`commit` is not implemented for this adapter!" - ) + raise dbt.exceptions.NotImplementedError("`commit` is not implemented for this adapter!") @classmethod def _rollback_handle(cls, connection: Connection) -> None: @@ -365,7 +361,7 @@ def _close_handle(cls, connection: Connection) -> None: def _rollback(cls, connection: Connection) -> None: """Roll back the given connection.""" if connection.transaction_open is False: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Tried to rollback transaction on connection " f'"{connection.name}", but it does not have one open!' ) @@ -415,6 +411,4 @@ def execute( :return: A tuple of the query status and results (empty if fetch=False). :rtype: Tuple[AdapterResponse, agate.Table] """ - raise dbt.exceptions.NotImplementedException( - "`execute` is not implemented for this adapter!" - ) + raise dbt.exceptions.NotImplementedError("`execute` is not implemented for this adapter!") diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 64ebbeac5dd..98b78217c14 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -22,20 +22,20 @@ import pytz from dbt.exceptions import ( - InternalException, - InvalidMacroArgType, - InvalidMacroResult, - InvalidQuoteConfigType, - NotImplementedException, - NullRelationCacheAttempted, - NullRelationDropAttempted, - RelationReturnedMultipleResults, - RenameToNoneAttempted, - RuntimeException, - SnapshotTargetIncomplete, - SnapshotTargetNotSnapshotTable, - UnexpectedNull, - UnexpectedNonTimestamp, + DbtInternalError, + MacroArgTypeError, + MacroResultError, + QuoteConfigTypeError, + NotImplementedError, + NullRelationCacheAttemptedError, + NullRelationDropAttemptedError, + RelationReturnedMultipleResultsError, + RenameToNoneAttemptedError, + DbtRuntimeError, + SnapshotTargetIncompleteError, + SnapshotTargetNotSnapshotTableError, + UnexpectedNullError, + UnexpectedNonTimestampError, ) from dbt.adapters.protocol import ( @@ -75,7 +75,7 @@ def _expect_row_value(key: str, row: agate.Row): if key not in row.keys(): - raise InternalException( + raise DbtInternalError( 'Got a row without "{}" column, columns: {}'.format(key, row.keys()) ) return row[key] @@ -104,10 +104,10 @@ def _utc(dt: Optional[datetime], source: BaseRelation, field_name: str) -> datet assume the datetime is already for UTC and add the timezone. """ if dt is None: - raise UnexpectedNull(field_name, source) + raise UnexpectedNullError(field_name, source) elif not hasattr(dt, "tzinfo"): - raise UnexpectedNonTimestamp(field_name, source, dt) + raise UnexpectedNonTimestampError(field_name, source, dt) elif dt.tzinfo: return dt.astimezone(pytz.UTC) @@ -433,7 +433,7 @@ def cache_added(self, relation: Optional[BaseRelation]) -> str: """Cache a new relation in dbt. It will show up in `list relations`.""" if relation is None: name = self.nice_connection_name() - raise NullRelationCacheAttempted(name) + raise NullRelationCacheAttemptedError(name) self.cache.add(relation) # so jinja doesn't render things return "" @@ -445,7 +445,7 @@ def cache_dropped(self, relation: Optional[BaseRelation]) -> str: """ if relation is None: name = self.nice_connection_name() - raise NullRelationDropAttempted(name) + raise NullRelationDropAttemptedError(name) self.cache.drop(relation) return "" @@ -462,7 +462,7 @@ def cache_renamed( name = self.nice_connection_name() src_name = _relation_name(from_relation) dst_name = _relation_name(to_relation) - raise RenameToNoneAttempted(src_name, dst_name, name) + raise RenameToNoneAttemptedError(src_name, dst_name, name) self.cache.rename(from_relation, to_relation) return "" @@ -474,12 +474,12 @@ def cache_renamed( @abc.abstractmethod def date_function(cls) -> str: """Get the date function used by this adapter's database.""" - raise NotImplementedException("`date_function` is not implemented for this adapter!") + raise NotImplementedError("`date_function` is not implemented for this adapter!") @classmethod @abc.abstractmethod def is_cancelable(cls) -> bool: - raise NotImplementedException("`is_cancelable` is not implemented for this adapter!") + raise NotImplementedError("`is_cancelable` is not implemented for this adapter!") ### # Abstract methods about schemas @@ -487,7 +487,7 @@ def is_cancelable(cls) -> bool: @abc.abstractmethod def list_schemas(self, database: str) -> List[str]: """Get a list of existing schemas in database""" - raise NotImplementedException("`list_schemas` is not implemented for this adapter!") + raise NotImplementedError("`list_schemas` is not implemented for this adapter!") @available.parse(lambda *a, **k: False) def check_schema_exists(self, database: str, schema: str) -> bool: @@ -510,13 +510,13 @@ def drop_relation(self, relation: BaseRelation) -> None: *Implementors must call self.cache.drop() to preserve cache state!* """ - raise NotImplementedException("`drop_relation` is not implemented for this adapter!") + raise NotImplementedError("`drop_relation` is not implemented for this adapter!") @abc.abstractmethod @available.parse_none def truncate_relation(self, relation: BaseRelation) -> None: """Truncate the given relation.""" - raise NotImplementedException("`truncate_relation` is not implemented for this adapter!") + raise NotImplementedError("`truncate_relation` is not implemented for this adapter!") @abc.abstractmethod @available.parse_none @@ -525,15 +525,13 @@ def rename_relation(self, from_relation: BaseRelation, to_relation: BaseRelation Implementors must call self.cache.rename() to preserve cache state. """ - raise NotImplementedException("`rename_relation` is not implemented for this adapter!") + raise NotImplementedError("`rename_relation` is not implemented for this adapter!") @abc.abstractmethod @available.parse_list def get_columns_in_relation(self, relation: BaseRelation) -> List[BaseColumn]: """Get a list of the columns in the given Relation.""" - raise NotImplementedException( - "`get_columns_in_relation` is not implemented for this adapter!" - ) + raise NotImplementedError("`get_columns_in_relation` is not implemented for this adapter!") @available.deprecated("get_columns_in_relation", lambda *a, **k: []) def get_columns_in_table(self, schema: str, identifier: str) -> List[BaseColumn]: @@ -555,7 +553,7 @@ def expand_column_types(self, goal: BaseRelation, current: BaseRelation) -> None :param self.Relation current: A relation that currently exists in the database with columns of unspecified types. """ - raise NotImplementedException( + raise NotImplementedError( "`expand_target_column_types` is not implemented for this adapter!" ) @@ -570,7 +568,7 @@ def list_relations_without_caching(self, schema_relation: BaseRelation) -> List[ :return: The relations in schema :rtype: List[self.Relation] """ - raise NotImplementedException( + raise NotImplementedError( "`list_relations_without_caching` is not implemented for this adapter!" ) @@ -612,7 +610,7 @@ def get_missing_columns( to_relation. """ if not isinstance(from_relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="get_missing_columns", arg_name="from_relation", got_value=from_relation, @@ -620,7 +618,7 @@ def get_missing_columns( ) if not isinstance(to_relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="get_missing_columns", arg_name="to_relation", got_value=to_relation, @@ -641,11 +639,11 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: expected columns. :param Relation relation: The relation to check - :raises CompilationException: If the columns are + :raises InvalidMacroArgType: If the columns are incorrect. """ if not isinstance(relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="valid_snapshot_target", arg_name="relation", got_value=relation, @@ -666,16 +664,16 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None: if missing: if extra: - raise SnapshotTargetIncomplete(extra, missing) + raise SnapshotTargetIncompleteError(extra, missing) else: - raise SnapshotTargetNotSnapshotTable(missing) + raise SnapshotTargetNotSnapshotTableError(missing) @available.parse_none def expand_target_column_types( self, from_relation: BaseRelation, to_relation: BaseRelation ) -> None: if not isinstance(from_relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="expand_target_column_types", arg_name="from_relation", got_value=from_relation, @@ -683,7 +681,7 @@ def expand_target_column_types( ) if not isinstance(to_relation, self.Relation): - raise InvalidMacroArgType( + raise MacroArgTypeError( method_name="expand_target_column_types", arg_name="to_relation", got_value=to_relation, @@ -765,7 +763,7 @@ def get_relation(self, database: str, schema: str, identifier: str) -> Optional[ "schema": schema, "database": database, } - raise RelationReturnedMultipleResults(kwargs, matches) + raise RelationReturnedMultipleResultsError(kwargs, matches) elif matches: return matches[0] @@ -787,20 +785,20 @@ def already_exists(self, schema: str, name: str) -> bool: @available.parse_none def create_schema(self, relation: BaseRelation): """Create the given schema if it does not exist.""" - raise NotImplementedException("`create_schema` is not implemented for this adapter!") + raise NotImplementedError("`create_schema` is not implemented for this adapter!") @abc.abstractmethod @available.parse_none def drop_schema(self, relation: BaseRelation): """Drop the given schema (and everything in it) if it exists.""" - raise NotImplementedException("`drop_schema` is not implemented for this adapter!") + raise NotImplementedError("`drop_schema` is not implemented for this adapter!") @available @classmethod @abc.abstractmethod def quote(cls, identifier: str) -> str: """Quote the given identifier, as appropriate for the database.""" - raise NotImplementedException("`quote` is not implemented for this adapter!") + raise NotImplementedError("`quote` is not implemented for this adapter!") @available def quote_as_configured(self, identifier: str, quote_key: str) -> str: @@ -829,7 +827,7 @@ def quote_seed_column(self, column: str, quote_config: Optional[bool]) -> str: elif quote_config is None: pass else: - raise InvalidQuoteConfigType(quote_config) + raise QuoteConfigTypeError(quote_config) if quote_columns: return self.quote(column) @@ -850,7 +848,7 @@ def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException("`convert_text_type` is not implemented for this adapter!") + raise NotImplementedError("`convert_text_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -862,7 +860,7 @@ def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException("`convert_number_type` is not implemented for this adapter!") + raise NotImplementedError("`convert_number_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -874,9 +872,7 @@ def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException( - "`convert_boolean_type` is not implemented for this adapter!" - ) + raise NotImplementedError("`convert_boolean_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -888,9 +884,7 @@ def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException( - "`convert_datetime_type` is not implemented for this adapter!" - ) + raise NotImplementedError("`convert_datetime_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -902,7 +896,7 @@ def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException("`convert_date_type` is not implemented for this adapter!") + raise NotImplementedError("`convert_date_type` is not implemented for this adapter!") @classmethod @abc.abstractmethod @@ -914,7 +908,7 @@ def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str: :param col_idx: The index into the agate table for the column. :return: The name of the type in the database """ - raise NotImplementedException("`convert_time_type` is not implemented for this adapter!") + raise NotImplementedError("`convert_time_type` is not implemented for this adapter!") @available @classmethod @@ -981,7 +975,7 @@ def execute_macro( else: package_name = 'the "{}" package'.format(project) - raise RuntimeException( + raise DbtRuntimeError( 'dbt could not find a macro with the name "{}" in {}'.format( macro_name, package_name ) @@ -1079,7 +1073,7 @@ def calculate_freshness( # now we have a 1-row table of the maximum `loaded_at_field` value and # the current time according to the db. if len(table) != 1 or len(table[0]) != 2: - raise InvalidMacroResult(FRESHNESS_MACRO_NAME, table) + raise MacroResultError(FRESHNESS_MACRO_NAME, table) if table[0][0] is None: # no records in the table, so really the max_loaded_at was # infinitely long ago. Just call it 0:00 January 1 year UTC @@ -1156,7 +1150,7 @@ def string_add_sql( elif location == "prepend": return f"'{value}' || {add_to}" else: - raise RuntimeException(f'Got an unexpected location value of "{location}"') + raise DbtRuntimeError(f'Got an unexpected location value of "{location}"') def get_rows_different_sql( self, @@ -1214,7 +1208,7 @@ def submit_python_job(self, parsed_model: dict, compiled_code: str) -> AdapterRe return self.generate_python_submission_response(submission_result) def generate_python_submission_response(self, submission_result: Any) -> AdapterResponse: - raise NotImplementedException( + raise NotImplementedError( "Your adapter need to implement generate_python_submission_response" ) @@ -1238,7 +1232,7 @@ def get_incremental_strategy_macro(self, model_context, strategy: str): valid_strategies.append("default") builtin_strategies = self.builtin_incremental_strategies() if strategy in builtin_strategies and strategy not in valid_strategies: - raise RuntimeException( + raise DbtRuntimeError( f"The incremental strategy '{strategy}' is not valid for this adapter" ) @@ -1246,7 +1240,7 @@ def get_incremental_strategy_macro(self, model_context, strategy: str): macro_name = f"get_incremental_{strategy}_sql" # The model_context should have MacroGenerator callable objects for all macros if macro_name not in model_context: - raise RuntimeException( + raise DbtRuntimeError( 'dbt could not find an incremental strategy macro with the name "{}" in {}'.format( macro_name, self.config.project_name ) diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py index f0d348d8f57..f1a77f89b9d 100644 --- a/core/dbt/adapters/base/plugin.py +++ b/core/dbt/adapters/base/plugin.py @@ -1,7 +1,7 @@ from typing import List, Optional, Type from dbt.adapters.base import Credentials -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.adapters.protocol import AdapterProtocol @@ -11,7 +11,7 @@ def project_name_from_path(include_path: str) -> str: partial = Project.partial_load(include_path) if partial.project_name is None: - raise CompilationException(f"Invalid project at {include_path}: name not set!") + raise CompilationError(f"Invalid project at {include_path}: name not set!") return partial.project_name diff --git a/core/dbt/adapters/base/query_headers.py b/core/dbt/adapters/base/query_headers.py index dd88fdb2d41..bfacd2aee8c 100644 --- a/core/dbt/adapters/base/query_headers.py +++ b/core/dbt/adapters/base/query_headers.py @@ -7,7 +7,7 @@ from dbt.contracts.connection import AdapterRequiredConfig, QueryComment from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError class NodeWrapper: @@ -48,7 +48,7 @@ def set(self, comment: Optional[str], append: bool): if isinstance(comment, str) and "*/" in comment: # tell the user "no" so they don't hurt themselves by writing # garbage - raise RuntimeException(f'query comment contains illegal value "*/": {comment}') + raise DbtRuntimeError(f'query comment contains illegal value "*/": {comment}') self.query_comment = comment self.append = append diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py index 5bc0c56b264..13f64c01742 100644 --- a/core/dbt/adapters/base/relation.py +++ b/core/dbt/adapters/base/relation.py @@ -11,7 +11,11 @@ Policy, Path, ) -from dbt.exceptions import ApproximateMatch, InternalException, MultipleDatabasesNotAllowed +from dbt.exceptions import ( + ApproximateMatchError, + DbtInternalError, + MultipleDatabasesNotAllowedError, +) from dbt.node_types import NodeType from dbt.utils import filter_null_values, deep_merge, classproperty @@ -83,7 +87,7 @@ def matches( if not search: # nothing was passed in - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( "Tried to match relation, but no search path was passed!" ) @@ -100,7 +104,7 @@ def matches( if approximate_match and not exact_match: target = self.create(database=database, schema=schema, identifier=identifier) - raise ApproximateMatch(target, self) + raise ApproximateMatchError(target, self) return exact_match @@ -249,14 +253,14 @@ def create_from( ) -> Self: if node.resource_type == NodeType.Source: if not isinstance(node, SourceDefinition): - raise InternalException( + raise DbtInternalError( "type mismatch, expected SourceDefinition but got {}".format(type(node)) ) return cls.create_from_source(node, **kwargs) else: # Can't use ManifestNode here because of parameterized generics if not isinstance(node, (ParsedNode)): - raise InternalException( + raise DbtInternalError( f"type mismatch, expected ManifestNode but got {type(node)}" ) return cls.create_from_node(config, node, **kwargs) @@ -354,7 +358,7 @@ class InformationSchema(BaseRelation): def __post_init__(self): if not isinstance(self.information_schema_view, (type(None), str)): - raise dbt.exceptions.CompilationException( + raise dbt.exceptions.CompilationError( "Got an invalid name: {}".format(self.information_schema_view) ) @@ -438,7 +442,7 @@ def flatten(self, allow_multiple_databases: bool = False): if not allow_multiple_databases: seen = {r.database.lower() for r in self if r.database} if len(seen) > 1: - raise MultipleDatabasesNotAllowed(seen) + raise MultipleDatabasesNotAllowedError(seen) for information_schema_name, schema in self.search(): path = {"database": information_schema_name.database, "schema": schema} diff --git a/core/dbt/adapters/cache.py b/core/dbt/adapters/cache.py index 430c79d3b3a..24a0e469df1 100644 --- a/core/dbt/adapters/cache.py +++ b/core/dbt/adapters/cache.py @@ -9,11 +9,11 @@ _ReferenceKey, ) from dbt.exceptions import ( - DependentLinkNotCached, - NewNameAlreadyInCache, - NoneRelationFound, - ReferencedLinkNotCached, - TruncatedModelNameCausedCollision, + DependentLinkNotCachedError, + NewNameAlreadyInCacheError, + NoneRelationFoundError, + ReferencedLinkNotCachedError, + TruncatedModelNameCausedCollisionError, ) from dbt.events.functions import fire_event, fire_event_if from dbt.events.types import CacheAction, CacheDumpGraph @@ -141,7 +141,7 @@ def rename_key(self, old_key, new_key): :raises InternalError: If the new key already exists. """ if new_key in self.referenced_by: - raise NewNameAlreadyInCache(old_key, new_key) + raise NewNameAlreadyInCacheError(old_key, new_key) if old_key not in self.referenced_by: return @@ -257,11 +257,11 @@ def _add_link(self, referenced_key, dependent_key): if referenced is None: return if referenced is None: - raise ReferencedLinkNotCached(referenced_key) + raise ReferencedLinkNotCachedError(referenced_key) dependent = self.relations.get(dependent_key) if dependent is None: - raise DependentLinkNotCached(dependent_key) + raise DependentLinkNotCachedError(dependent_key) assert dependent is not None # we just raised! @@ -426,7 +426,7 @@ def _check_rename_constraints(self, old_key, new_key): if new_key in self.relations: # Tell user when collision caused by model names truncated during # materialization. - raise TruncatedModelNameCausedCollision(new_key, self.relations) + raise TruncatedModelNameCausedCollisionError(new_key, self.relations) if old_key not in self.relations: fire_event( @@ -490,7 +490,7 @@ def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[ ] if None in results: - raise NoneRelationFound() + raise NoneRelationFoundError() return results def clear(self): diff --git a/core/dbt/adapters/factory.py b/core/dbt/adapters/factory.py index 16a0a3ffcd1..38c6bcb7894 100644 --- a/core/dbt/adapters/factory.py +++ b/core/dbt/adapters/factory.py @@ -10,7 +10,7 @@ from dbt.contracts.connection import AdapterRequiredConfig, Credentials from dbt.events.functions import fire_event from dbt.events.types import AdapterImportError, PluginLoadError -from dbt.exceptions import InternalException, RuntimeException +from dbt.exceptions import DbtInternalError, DbtRuntimeError from dbt.include.global_project import PACKAGE_PATH as GLOBAL_PROJECT_PATH from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME @@ -34,7 +34,7 @@ def get_plugin_by_name(self, name: str) -> AdapterPlugin: names = ", ".join(self.plugins.keys()) message = f"Invalid adapter type {name}! Must be one of {names}" - raise RuntimeException(message) + raise DbtRuntimeError(message) def get_adapter_class_by_name(self, name: str) -> Type[Adapter]: plugin = self.get_plugin_by_name(name) @@ -60,7 +60,7 @@ def load_plugin(self, name: str) -> Type[Credentials]: # the user about it via a runtime error if exc.name == "dbt.adapters." + name: fire_event(AdapterImportError(exc=str(exc))) - raise RuntimeException(f"Could not find adapter type {name}!") + raise DbtRuntimeError(f"Could not find adapter type {name}!") # otherwise, the error had to have come from some underlying # library. Log the stack trace. @@ -70,7 +70,7 @@ def load_plugin(self, name: str) -> Type[Credentials]: plugin_type = plugin.adapter.type() if plugin_type != name: - raise RuntimeException( + raise DbtRuntimeError( f"Expected to find adapter with type named {name}, got " f"adapter with type {plugin_type}" ) @@ -132,7 +132,7 @@ def get_adapter_plugins(self, name: Optional[str]) -> List[AdapterPlugin]: try: plugin = self.plugins[plugin_name] except KeyError: - raise InternalException(f"No plugin found for {plugin_name}") from None + raise DbtInternalError(f"No plugin found for {plugin_name}") from None plugins.append(plugin) seen.add(plugin_name) for dep in plugin.dependencies: @@ -151,7 +151,7 @@ def get_include_paths(self, name: Optional[str]) -> List[Path]: try: path = self.packages[package_name] except KeyError: - raise InternalException(f"No internal package listing found for {package_name}") + raise DbtInternalError(f"No internal package listing found for {package_name}") paths.append(path) return paths diff --git a/core/dbt/adapters/sql/connections.py b/core/dbt/adapters/sql/connections.py index bc1a562ad86..e13cf12e319 100644 --- a/core/dbt/adapters/sql/connections.py +++ b/core/dbt/adapters/sql/connections.py @@ -27,9 +27,7 @@ class SQLConnectionManager(BaseConnectionManager): @abc.abstractmethod def cancel(self, connection: Connection): """Cancel the given connection.""" - raise dbt.exceptions.NotImplementedException( - "`cancel` is not implemented for this adapter!" - ) + raise dbt.exceptions.NotImplementedError("`cancel` is not implemented for this adapter!") def cancel_open(self) -> List[str]: names = [] @@ -95,7 +93,7 @@ def add_query( @abc.abstractmethod def get_response(cls, cursor: Any) -> AdapterResponse: """Get the status of the cursor.""" - raise dbt.exceptions.NotImplementedException( + raise dbt.exceptions.NotImplementedError( "`get_response` is not implemented for this adapter!" ) @@ -151,7 +149,7 @@ def add_commit_query(self): def begin(self): connection = self.get_thread_connection() if connection.transaction_open is True: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( 'Tried to begin a new transaction on connection "{}", but ' "it already had one open!".format(connection.name) ) @@ -164,7 +162,7 @@ def begin(self): def commit(self): connection = self.get_thread_connection() if connection.transaction_open is False: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( 'Tried to commit transaction on connection "{}", but ' "it does not have one open!".format(connection.name) ) diff --git a/core/dbt/adapters/sql/impl.py b/core/dbt/adapters/sql/impl.py index 4606b046f54..fc787f0c834 100644 --- a/core/dbt/adapters/sql/impl.py +++ b/core/dbt/adapters/sql/impl.py @@ -2,7 +2,7 @@ from typing import Any, Optional, Tuple, Type, List from dbt.contracts.connection import Connection -from dbt.exceptions import RelationTypeNull +from dbt.exceptions import RelationTypeNullError from dbt.adapters.base import BaseAdapter, available from dbt.adapters.cache import _make_ref_key_msg from dbt.adapters.sql import SQLConnectionManager @@ -131,7 +131,7 @@ def alter_column_type(self, relation, column_name, new_column_type) -> None: def drop_relation(self, relation): if relation.type is None: - raise RelationTypeNull(relation) + raise RelationTypeNullError(relation) self.cache_dropped(relation) self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation}) diff --git a/core/dbt/clients/_jinja_blocks.py b/core/dbt/clients/_jinja_blocks.py index fa74a317649..1ada0a6234d 100644 --- a/core/dbt/clients/_jinja_blocks.py +++ b/core/dbt/clients/_jinja_blocks.py @@ -2,13 +2,13 @@ from collections import namedtuple from dbt.exceptions import ( - BlockDefinitionNotAtTop, - InternalException, - MissingCloseTag, - MissingControlFlowStartTag, - NestedTags, - UnexpectedControlFlowEndTag, - UnexpectedMacroEOF, + BlockDefinitionNotAtTopError, + DbtInternalError, + MissingCloseTagError, + MissingControlFlowStartTagError, + NestedTagsError, + UnexpectedControlFlowEndTagError, + UnexpectedMacroEOFError, ) @@ -147,7 +147,7 @@ def _first_match(self, *patterns, **kwargs): def _expect_match(self, expected_name, *patterns, **kwargs): match = self._first_match(*patterns, **kwargs) if match is None: - raise UnexpectedMacroEOF(expected_name, self.data[self.pos :]) + raise UnexpectedMacroEOFError(expected_name, self.data[self.pos :]) return match def handle_expr(self, match): @@ -261,7 +261,7 @@ def find_tags(self): elif block_type_name is not None: yield self.handle_tag(match) else: - raise InternalException( + raise DbtInternalError( "Invalid regex match in next_block, expected block start, " "expr start, or comment start" ) @@ -317,16 +317,16 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): found = self.stack.pop() else: expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name] - raise UnexpectedControlFlowEndTag(tag, expected, self.tag_parser) + raise UnexpectedControlFlowEndTagError(tag, expected, self.tag_parser) expected = _CONTROL_FLOW_TAGS[found] if expected != tag.block_type_name: - raise MissingControlFlowStartTag(tag, expected, self.tag_parser) + raise MissingControlFlowStartTagError(tag, expected, self.tag_parser) if tag.block_type_name in allowed_blocks: if self.stack: - raise BlockDefinitionNotAtTop(self.tag_parser, tag.start) + raise BlockDefinitionNotAtTopError(self.tag_parser, tag.start) if self.current is not None: - raise NestedTags(outer=self.current, inner=tag) + raise NestedTagsError(outer=self.current, inner=tag) if collect_raw_data: raw_data = self.data[self.last_position : tag.start] self.last_position = tag.start @@ -347,7 +347,7 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True): if self.current: linecount = self.data[: self.current.end].count("\n") + 1 - raise MissingCloseTag(self.current.block_type_name, linecount) + raise MissingCloseTagError(self.current.block_type_name, linecount) if collect_raw_data: raw_data = self.data[self.last_position :] diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py index 11492a9faef..1d69a2bd17f 100644 --- a/core/dbt/clients/agate_helper.py +++ b/core/dbt/clients/agate_helper.py @@ -7,7 +7,7 @@ import dbt.utils from typing import Iterable, List, Dict, Union, Optional, Any -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError BOM = BOM_UTF8.decode("utf-8") # '\ufeff' @@ -168,7 +168,7 @@ def __setitem__(self, key, value): return elif not isinstance(value, type(existing_type)): # actual type mismatch! - raise RuntimeException( + raise DbtRuntimeError( f"Tables contain columns with the same names ({key}), " f"but different types ({value} vs {existing_type})" ) diff --git a/core/dbt/clients/git.py b/core/dbt/clients/git.py index 4ddbb1969ee..d6cb3f3870c 100644 --- a/core/dbt/clients/git.py +++ b/core/dbt/clients/git.py @@ -16,8 +16,8 @@ CommandResultError, GitCheckoutError, GitCloningError, - GitCloningProblem, - RuntimeException, + UnknownGitCloningProblemError, + DbtRuntimeError, ) from packaging import version @@ -134,7 +134,7 @@ def clone_and_checkout( err = exc.stderr exists = re.match("fatal: destination path '(.+)' already exists", err) if not exists: - raise GitCloningProblem(repo) + raise UnknownGitCloningProblemError(repo) directory = None start_sha = None @@ -144,7 +144,7 @@ def clone_and_checkout( else: matches = re.match("Cloning into '(.+)'", err.decode("utf-8")) if matches is None: - raise RuntimeException(f'Error cloning {repo} - never saw "Cloning into ..." from git') + raise DbtRuntimeError(f'Error cloning {repo} - never saw "Cloning into ..." from git') directory = matches.group(1) fire_event(GitProgressPullingNewDependency(dir=directory)) full_path = os.path.join(cwd, directory) diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index c1b8865e33e..e9dcb45017b 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -28,17 +28,17 @@ from dbt.contracts.graph.nodes import GenericTestNode from dbt.exceptions import ( - CaughtMacroException, - CaughtMacroExceptionWithNode, - CompilationException, - InternalException, - InvalidMaterializationArg, - JinjaRenderingException, + CaughtMacroError, + CaughtMacroErrorWithNodeError, + CompilationError, + DbtInternalError, + MaterializationArgError, + JinjaRenderingError, MacroReturn, - MaterializtionMacroNotUsed, - NoSupportedLanguagesFound, - UndefinedCompilation, - UndefinedMacroException, + MaterializtionMacroNotUsedError, + NoSupportedLanguagesFoundError, + UndefinedCompilationError, + UndefinedMacroError, ) from dbt import flags from dbt.node_types import ModelLanguage @@ -161,9 +161,9 @@ def quoted_native_concat(nodes): except (ValueError, SyntaxError, MemoryError): result = raw if isinstance(raw, BoolMarker) and not isinstance(result, bool): - raise JinjaRenderingException(f"Could not convert value '{raw!s}' into type 'bool'") + raise JinjaRenderingError(f"Could not convert value '{raw!s}' into type 'bool'") if isinstance(raw, NumberMarker) and not _is_number(result): - raise JinjaRenderingException(f"Could not convert value '{raw!s}' into type 'number'") + raise JinjaRenderingError(f"Could not convert value '{raw!s}' into type 'number'") return result @@ -241,12 +241,12 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise CaughtMacroException(e) + raise CaughtMacroError(e) def call_macro(self, *args, **kwargs): # called from __call__ methods if self.context is None: - raise InternalException("Context is still None in call_macro!") + raise DbtInternalError("Context is still None in call_macro!") assert self.context is not None macro = self.get_macro() @@ -273,7 +273,7 @@ def push(self, name): def pop(self, name): got = self.call_stack.pop() if got != name: - raise InternalException(f"popped {got}, expected {name}") + raise DbtInternalError(f"popped {got}, expected {name}") class MacroGenerator(BaseMacroGenerator): @@ -300,8 +300,8 @@ def exception_handler(self) -> Iterator[None]: try: yield except (TypeError, jinja2.exceptions.TemplateRuntimeError) as e: - raise CaughtMacroExceptionWithNode(exc=e, node=self.macro) - except CompilationException as e: + raise CaughtMacroErrorWithNodeError(exc=e, node=self.macro) + except CompilationError as e: e.stack.append(self.macro) raise e @@ -380,7 +380,7 @@ def parse(self, parser): node.defaults.append(languages) else: - raise InvalidMaterializationArg(materialization_name, target.name) + raise MaterializationArgError(materialization_name, target.name) if SUPPORTED_LANG_ARG not in node.args: node.args.append(SUPPORTED_LANG_ARG) @@ -455,7 +455,7 @@ def __call__(self, *args, **kwargs): return self def __reduce__(self): - raise UndefinedCompilation(name=self.name, node=node) + raise UndefinedCompilationError(name=self.name, node=node) return Undefined @@ -513,10 +513,10 @@ def catch_jinja(node=None) -> Iterator[None]: yield except jinja2.exceptions.TemplateSyntaxError as e: e.translated = False - raise CompilationException(str(e), node) from e + raise CompilationError(str(e), node) from e except jinja2.exceptions.UndefinedError as e: - raise UndefinedMacroException(str(e), node) from e - except CompilationException as exc: + raise UndefinedMacroError(str(e), node) from e + except CompilationError as exc: exc.add_node(node) raise @@ -655,13 +655,13 @@ def _convert_function(value: Any, keypath: Tuple[Union[str, int], ...]) -> Any: def get_supported_languages(node: jinja2.nodes.Macro) -> List[ModelLanguage]: if "materialization" not in node.name: - raise MaterializtionMacroNotUsed(node=node) + raise MaterializtionMacroNotUsedError(node=node) no_kwargs = not node.defaults no_langs_found = SUPPORTED_LANG_ARG not in node.args if no_kwargs or no_langs_found: - raise NoSupportedLanguagesFound(node=node) + raise NoSupportedLanguagesFoundError(node=node) lang_idx = node.args.index(SUPPORTED_LANG_ARG) # indexing defaults from the end diff --git a/core/dbt/clients/jinja_static.py b/core/dbt/clients/jinja_static.py index d71211cea6e..47790166ae5 100644 --- a/core/dbt/clients/jinja_static.py +++ b/core/dbt/clients/jinja_static.py @@ -1,6 +1,6 @@ import jinja2 from dbt.clients.jinja import get_environment -from dbt.exceptions import MacroNamespaceNotString, MacroNameNotString +from dbt.exceptions import MacroNamespaceNotStringError, MacroNameNotStringError def statically_extract_macro_calls(string, ctx, db_wrapper=None): @@ -117,14 +117,14 @@ def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper): func_name = kwarg.value.value possible_macro_calls.append(func_name) else: - raise MacroNameNotString(kwarg_value=kwarg.value.value) + raise MacroNameNotStringError(kwarg_value=kwarg.value.value) elif kwarg.key == "macro_namespace": # This will remain to enable static resolution kwarg_type = type(kwarg.value).__name__ if kwarg_type == "Const": macro_namespace = kwarg.value.value else: - raise MacroNamespaceNotString(kwarg_type) + raise MacroNamespaceNotStringError(kwarg_type) # positional arguments if packages_arg: diff --git a/core/dbt/clients/system.py b/core/dbt/clients/system.py index 0382dcb98e8..6c72fadea52 100644 --- a/core/dbt/clients/system.py +++ b/core/dbt/clients/system.py @@ -412,7 +412,7 @@ def _interpret_oserror(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn: _handle_posix_error(exc, cwd, cmd) # this should not be reachable, raise _something_ at least! - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( "Unhandled exception in _interpret_oserror: {}".format(exc) ) diff --git a/core/dbt/clients/yaml_helper.py b/core/dbt/clients/yaml_helper.py index bc0ada41ebb..d5a29b0309f 100644 --- a/core/dbt/clients/yaml_helper.py +++ b/core/dbt/clients/yaml_helper.py @@ -60,4 +60,4 @@ def load_yaml_text(contents, path=None): else: error = str(e) - raise dbt.exceptions.ValidationException(error) + raise dbt.exceptions.DbtValidationError(error) diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 4ae78fd3485..19e603b6312 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -21,9 +21,9 @@ SeedNode, ) from dbt.exceptions import ( - GraphDependencyNotFound, - InternalException, - RuntimeException, + GraphDependencyNotFoundError, + DbtInternalError, + DbtRuntimeError, ) from dbt.graph import Graph from dbt.events.functions import fire_event @@ -257,7 +257,7 @@ def _recursively_prepend_ctes( inserting CTEs into the SQL. """ if model.compiled_code is None: - raise RuntimeException("Cannot inject ctes into an unparsed node", model) + raise DbtRuntimeError("Cannot inject ctes into an unparsed node", model) if model.extra_ctes_injected: return (model, model.extra_ctes) @@ -278,7 +278,7 @@ def _recursively_prepend_ctes( # ephemeral model. for cte in model.extra_ctes: if cte.id not in manifest.nodes: - raise InternalException( + raise DbtInternalError( f"During compilation, found a cte reference that " f"could not be resolved: {cte.id}" ) @@ -286,7 +286,7 @@ def _recursively_prepend_ctes( assert not isinstance(cte_model, SeedNode) if not cte_model.is_ephemeral_model: - raise InternalException(f"{cte.id} is not ephemeral") + raise DbtInternalError(f"{cte.id} is not ephemeral") # This model has already been compiled, so it's been # through here before @@ -399,7 +399,7 @@ def link_node(self, linker: Linker, node: GraphMemberNode, manifest: Manifest): elif dependency in manifest.metrics: linker.dependency(node.unique_id, (manifest.metrics[dependency].unique_id)) else: - raise GraphDependencyNotFound(node, dependency) + raise GraphDependencyNotFoundError(node, dependency) def link_graph(self, linker: Linker, manifest: Manifest, add_test_edges: bool = False): for source in manifest.sources.values(): diff --git a/core/dbt/config/profile.py b/core/dbt/config/profile.py index e8bf85dbd27..156c41445e9 100644 --- a/core/dbt/config/profile.py +++ b/core/dbt/config/profile.py @@ -10,12 +10,12 @@ from dbt.contracts.connection import Credentials, HasCredentials from dbt.contracts.project import ProfileConfig, UserConfig from dbt.exceptions import ( - CompilationException, + CompilationError, DbtProfileError, DbtProjectError, - ValidationException, - RuntimeException, - ProfileConfigInvalid, + DbtValidationError, + DbtRuntimeError, + ProfileConfigError, ) from dbt.events.types import MissingProfileTarget from dbt.events.functions import fire_event @@ -60,9 +60,9 @@ def read_profile(profiles_dir: str) -> Dict[str, Any]: msg = f"The profiles.yml file at {path} is empty" raise DbtProfileError(INVALID_PROFILE_MESSAGE.format(error_string=msg)) return yaml_content - except ValidationException as e: + except DbtValidationError as e: msg = INVALID_PROFILE_MESSAGE.format(error_string=e) - raise ValidationException(msg) from e + raise DbtValidationError(msg) from e return {} @@ -75,7 +75,7 @@ def read_user_config(directory: str) -> UserConfig: if user_config is not None: UserConfig.validate(user_config) return UserConfig.from_dict(user_config) - except (RuntimeException, ValidationError): + except (DbtRuntimeError, ValidationError): pass return UserConfig() @@ -158,7 +158,7 @@ def validate(self): dct = self.to_profile_info(serialize_credentials=True) ProfileConfig.validate(dct) except ValidationError as exc: - raise ProfileConfigInvalid(exc) from exc + raise ProfileConfigError(exc) from exc @staticmethod def _credentials_from_profile( @@ -182,8 +182,8 @@ def _credentials_from_profile( data = cls.translate_aliases(profile) cls.validate(data) credentials = cls.from_dict(data) - except (RuntimeException, ValidationError) as e: - msg = str(e) if isinstance(e, RuntimeException) else e.message + except (DbtRuntimeError, ValidationError) as e: + msg = str(e) if isinstance(e, DbtRuntimeError) else e.message raise DbtProfileError( 'Credentials in profile "{}", target "{}" invalid: {}'.format( profile_name, target_name, msg @@ -299,7 +299,7 @@ def render_profile( try: profile_data = renderer.render_data(raw_profile_data) - except CompilationException as exc: + except CompilationError as exc: raise DbtProfileError(str(exc)) from exc return target_name, profile_data diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index 69c6b79866c..7f0398f53c6 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -21,10 +21,10 @@ from dbt.contracts.connection import QueryComment from dbt.exceptions import ( DbtProjectError, - SemverException, - ProjectContractBroken, - ProjectContractInvalid, - RuntimeException, + SemverError, + ProjectContractBrokenError, + ProjectContractError, + DbtRuntimeError, ) from dbt.graph import SelectionSpec from dbt.helper_types import NoValue @@ -219,7 +219,7 @@ def _get_required_version( try: dbt_version = _parse_versions(dbt_raw_version) - except SemverException as e: + except SemverError as e: raise DbtProjectError(str(e)) from e if verify_version: @@ -325,7 +325,7 @@ def create_project(self, rendered: RenderComponents) -> "Project": ProjectContract.validate(rendered.project_dict) cfg = ProjectContract.from_dict(rendered.project_dict) except ValidationError as e: - raise ProjectContractInvalid(e) from e + raise ProjectContractError(e) from e # name/version are required in the Project definition, so we can assume # they are present name = cfg.name @@ -642,7 +642,7 @@ def validate(self): try: ProjectContract.validate(self.to_project_config()) except ValidationError as e: - raise ProjectContractBroken(e) from e + raise ProjectContractBrokenError(e) from e @classmethod def partial_load(cls, project_root: str, *, verify_version: bool = False) -> PartialProject: @@ -667,7 +667,7 @@ def hashed_name(self): def get_selector(self, name: str) -> Union[SelectionSpec, bool]: if name not in self.selectors: - raise RuntimeException( + raise DbtRuntimeError( f"Could not find selector named {name}, expected one of {list(self.selectors)}" ) return self.selectors[name]["definition"] diff --git a/core/dbt/config/renderer.py b/core/dbt/config/renderer.py index 434e30666a4..68958dbbce5 100644 --- a/core/dbt/config/renderer.py +++ b/core/dbt/config/renderer.py @@ -8,7 +8,7 @@ from dbt.context.secret import SecretContext, SECRET_PLACEHOLDER from dbt.context.base import BaseContext from dbt.contracts.connection import HasCredentials -from dbt.exceptions import DbtProjectError, CompilationException, RecursionException +from dbt.exceptions import DbtProjectError, CompilationError, RecursionError from dbt.utils import deep_map_render @@ -40,14 +40,14 @@ def render_value(self, value: Any, keypath: Optional[Keypath] = None) -> Any: try: with catch_jinja(): return get_rendered(value, self.context, native=True) - except CompilationException as exc: + except CompilationError as exc: msg = f"Could not render {value}: {exc.msg}" - raise CompilationException(msg) from exc + raise CompilationError(msg) from exc def render_data(self, data: Dict[str, Any]) -> Dict[str, Any]: try: return deep_map_render(self.render_entry, data) - except RecursionException: + except RecursionError: raise DbtProjectError( f"Cycle detected: {self.name} input has a reference to itself", project=data ) diff --git a/core/dbt/config/runtime.py b/core/dbt/config/runtime.py index 8b1b30f383b..b0b74b9a222 100644 --- a/core/dbt/config/runtime.py +++ b/core/dbt/config/runtime.py @@ -25,11 +25,11 @@ from dbt.contracts.relation import ComponentName from dbt.dataclass_schema import ValidationError from dbt.exceptions import ( - ConfigContractBroken, + ConfigContractBrokenError, DbtProjectError, - NonUniquePackageName, - RuntimeException, - UninstalledPackagesFound, + NonUniquePackageNameError, + DbtRuntimeError, + UninstalledPackagesFoundError, ) from dbt.events.functions import warn_or_error from dbt.events.types import UnusedResourceConfigPath @@ -187,7 +187,7 @@ def validate(self): try: Configuration.validate(self.serialize()) except ValidationError as e: - raise ConfigContractBroken(e) from e + raise ConfigContractBrokenError(e) from e @classmethod def _get_rendered_profile( @@ -258,7 +258,7 @@ def from_args(cls, args: Any) -> "RuntimeConfig": :param args: The arguments as parsed from the cli. :raises DbtProjectError: If the project is invalid or missing. :raises DbtProfileError: If the profile is invalid or missing. - :raises ValidationException: If the cli variables are invalid. + :raises DbtValidationError: If the cli variables are invalid. """ project, profile = cls.collect_parts(args) @@ -353,7 +353,7 @@ def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: count_packages_specified = len(self.packages.packages) # type: ignore count_packages_installed = len(tuple(self._get_project_directories())) if count_packages_specified > count_packages_installed: - raise UninstalledPackagesFound( + raise UninstalledPackagesFoundError( count_packages_specified, count_packages_installed, self.packages_install_path, @@ -361,7 +361,7 @@ def load_dependencies(self, base_only=False) -> Mapping[str, "RuntimeConfig"]: project_paths = itertools.chain(internal_packages, self._get_project_directories()) for project_name, project in self.load_projects(project_paths): if project_name in all_projects: - raise NonUniquePackageName(project_name) + raise NonUniquePackageNameError(project_name) all_projects[project_name] = project self.dependencies = all_projects return self.dependencies @@ -426,7 +426,7 @@ def to_target_dict(self): def __getattribute__(self, name): if name in {"profile_name", "target_name", "threads"}: - raise RuntimeException(f'Error: disallowed attribute "{name}" - no profile!') + raise DbtRuntimeError(f'Error: disallowed attribute "{name}" - no profile!') return Profile.__getattribute__(self, name) @@ -453,7 +453,7 @@ def __post_init__(self): def __getattribute__(self, name): # Override __getattribute__ to check that the attribute isn't 'banned'. if name in {"profile_name", "target_name"}: - raise RuntimeException(f'Error: disallowed attribute "{name}" - no profile!') + raise DbtRuntimeError(f'Error: disallowed attribute "{name}" - no profile!') # avoid every attribute access triggering infinite recursion return RuntimeConfig.__getattribute__(self, name) @@ -602,7 +602,7 @@ def from_args(cls: Type[RuntimeConfig], args: Any) -> "RuntimeConfig": :param args: The arguments as parsed from the cli. :raises DbtProjectError: If the project is invalid or missing. :raises DbtProfileError: If the profile is invalid or missing. - :raises ValidationException: If the cli variables are invalid. + :raises DbtValidationError: If the cli variables are invalid. """ project, profile = cls.collect_parts(args) diff --git a/core/dbt/config/selectors.py b/core/dbt/config/selectors.py index 193a1bb70a8..e26ee01d316 100644 --- a/core/dbt/config/selectors.py +++ b/core/dbt/config/selectors.py @@ -12,7 +12,7 @@ resolve_path_from_base, ) from dbt.contracts.selection import SelectorFile -from dbt.exceptions import DbtSelectorsError, RuntimeException +from dbt.exceptions import DbtSelectorsError, DbtRuntimeError from dbt.graph import parse_from_selectors_definition, SelectionSpec from dbt.graph.selector_spec import SelectionCriteria @@ -46,7 +46,7 @@ def selectors_from_dict(cls, data: Dict[str, Any]) -> "SelectorConfig": f"yaml-selectors", result_type="invalid_selector", ) from exc - except RuntimeException as exc: + except DbtRuntimeError as exc: raise DbtSelectorsError( f"Could not read selector file data: {exc}", result_type="invalid_selector", @@ -62,7 +62,7 @@ def render_from_dict( ) -> "SelectorConfig": try: rendered = renderer.render_data(data) - except (ValidationError, RuntimeException) as exc: + except (ValidationError, DbtRuntimeError) as exc: raise DbtSelectorsError( f"Could not render selector data: {exc}", result_type="invalid_selector", @@ -77,7 +77,7 @@ def from_path( ) -> "SelectorConfig": try: data = load_yaml_text(load_file_contents(str(path))) - except (ValidationError, RuntimeException) as exc: + except (ValidationError, DbtRuntimeError) as exc: raise DbtSelectorsError( f"Could not read selector file: {exc}", result_type="invalid_selector", diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py index 921626ba088..eb379b5d1f7 100644 --- a/core/dbt/config/utils.py +++ b/core/dbt/config/utils.py @@ -9,7 +9,7 @@ from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt.events.functions import fire_event from dbt.events.types import InvalidVarsYAML -from dbt.exceptions import ValidationException, VarsArgNotYamlDict +from dbt.exceptions import DbtValidationError, VarsArgNotYamlDictError def parse_cli_vars(var_string: str) -> Dict[str, Any]: @@ -19,8 +19,8 @@ def parse_cli_vars(var_string: str) -> Dict[str, Any]: if var_type is dict: return cli_vars else: - raise VarsArgNotYamlDict(var_type) - except ValidationException: + raise VarsArgNotYamlDictError(var_type) + except DbtValidationError: fire_event(InvalidVarsYAML()) raise diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index fc218538bac..edf0895fe31 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -10,12 +10,12 @@ from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER from dbt.contracts.graph.nodes import Resource from dbt.exceptions import ( - DisallowSecretEnvVar, - EnvVarMissing, + SecretEnvVarLocationError, + EnvVarMissingError, MacroReturn, - RequiredVarNotFound, - SetStrictWrongType, - ZipStrictWrongType, + RequiredVarNotFoundError, + SetStrictWrongTypeError, + ZipStrictWrongTypeError, ) from dbt.events.functions import fire_event, get_invocation_id from dbt.events.types import JinjaLogInfo, JinjaLogDebug @@ -153,7 +153,7 @@ def node_name(self): return "" def get_missing_var(self, var_name): - raise RequiredVarNotFound(var_name, self._merged, self._node) + raise RequiredVarNotFoundError(var_name, self._merged, self._node) def has_var(self, var_name: str): return var_name in self._merged @@ -297,7 +297,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - raise DisallowSecretEnvVar(var) + raise SecretEnvVarLocationError(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -312,7 +312,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) if os.environ.get("DBT_MACRO_DEBUGGING"): @@ -493,7 +493,7 @@ def set_strict(value: Iterable[Any]) -> Set[Any]: try: return set(value) except TypeError as e: - raise SetStrictWrongType(e) + raise SetStrictWrongTypeError(e) @contextmember("zip") @staticmethod @@ -537,7 +537,7 @@ def zip_strict(*args: Iterable[Any]) -> Iterable[Any]: try: return zip(*args) except TypeError as e: - raise ZipStrictWrongType(e) + raise ZipStrictWrongTypeError(e) @contextmember @staticmethod diff --git a/core/dbt/context/configured.py b/core/dbt/context/configured.py index ca1de35423b..da4132e8046 100644 --- a/core/dbt/context/configured.py +++ b/core/dbt/context/configured.py @@ -8,7 +8,7 @@ from dbt.context.base import contextproperty, contextmember, Var from dbt.context.target import TargetContext -from dbt.exceptions import EnvVarMissing, DisallowSecretEnvVar +from dbt.exceptions import EnvVarMissingError, SecretEnvVarLocationError class ConfiguredContext(TargetContext): @@ -86,7 +86,7 @@ def var(self) -> ConfiguredVar: def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - raise DisallowSecretEnvVar(var) + raise SecretEnvVarLocationError(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -104,7 +104,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) class MacroResolvingContext(ConfiguredContext): diff --git a/core/dbt/context/context_config.py b/core/dbt/context/context_config.py index 2b0aafe7189..b497887ab45 100644 --- a/core/dbt/context/context_config.py +++ b/core/dbt/context/context_config.py @@ -5,7 +5,7 @@ from dbt.config import RuntimeConfig, Project, IsFQNResource from dbt.contracts.graph.model_config import BaseConfig, get_config_for, _listify -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.node_types import NodeType from dbt.utils import fqn_search @@ -89,7 +89,7 @@ def get_node_project(self, project_name: str): return self._active_project dependencies = self._active_project.load_dependencies() if project_name not in dependencies: - raise InternalException( + raise DbtInternalError( f"Project name {project_name} not found in dependencies " f"(found {list(dependencies)})" ) @@ -287,14 +287,14 @@ def _add_config_call(cls, config_call_dict, opts: Dict[str, Any]) -> None: elif k in BaseConfig.mergebehavior["update"]: if not isinstance(v, dict): - raise InternalException(f"expected dict, got {v}") + raise DbtInternalError(f"expected dict, got {v}") if k in config_call_dict and isinstance(config_call_dict[k], dict): config_call_dict[k].update(v) else: config_call_dict[k] = v elif k in BaseConfig.mergebehavior["dict_key_append"]: if not isinstance(v, dict): - raise InternalException(f"expected dict, got {v}") + raise DbtInternalError(f"expected dict, got {v}") if k in config_call_dict: # should always be a dict for key, value in v.items(): extend = False diff --git a/core/dbt/context/docs.py b/core/dbt/context/docs.py index 89a652736dd..3d5abf42e11 100644 --- a/core/dbt/context/docs.py +++ b/core/dbt/context/docs.py @@ -1,8 +1,8 @@ from typing import Any, Dict, Union from dbt.exceptions import ( - DocTargetNotFound, - InvalidDocArgs, + DocTargetNotFoundError, + DocArgsError, ) from dbt.config.runtime import RuntimeConfig from dbt.contracts.graph.manifest import Manifest @@ -52,7 +52,7 @@ def doc(self, *args: str) -> str: elif len(args) == 2: doc_package_name, doc_name = args else: - raise InvalidDocArgs(self.node, args) + raise DocArgsError(self.node, args) # Documentation target_doc = self.manifest.resolve_doc( @@ -68,7 +68,7 @@ def doc(self, *args: str) -> str: # TODO CT-211 source_file.add_node(self.node.unique_id) # type: ignore[union-attr] else: - raise DocTargetNotFound( + raise DocTargetNotFoundError( node=self.node, target_doc_name=doc_name, target_doc_package=doc_package_name ) diff --git a/core/dbt/context/exceptions_jinja.py b/core/dbt/context/exceptions_jinja.py index a1f49e416fb..98f19048f1a 100644 --- a/core/dbt/context/exceptions_jinja.py +++ b/core/dbt/context/exceptions_jinja.py @@ -6,23 +6,23 @@ from dbt.events.types import JinjaLogWarning from dbt.exceptions import ( - RuntimeException, - MissingConfig, - MissingMaterialization, - MissingRelation, - AmbiguousAlias, - AmbiguousCatalogMatch, - CacheInconsistency, - DataclassNotDict, - CompilationException, - DatabaseException, - DependencyNotFound, - DependencyException, - DuplicatePatchPath, - DuplicateResourceName, - InvalidPropertyYML, - NotImplementedException, - RelationWrongType, + DbtRuntimeError, + MissingConfigError, + MissingMaterializationError, + MissingRelationError, + AmbiguousAliasError, + AmbiguousCatalogMatchError, + CacheInconsistencyError, + DataclassNotDictError, + CompilationError, + DbtDatabaseError, + DependencyNotFoundError, + DependencyError, + DuplicatePatchPathError, + DuplicateResourceNameError, + PropertyYMLError, + NotImplementedError, + RelationWrongTypeError, ) @@ -32,69 +32,69 @@ def warn(msg, node=None): def missing_config(model, name) -> NoReturn: - raise MissingConfig(unique_id=model.unique_id, name=name) + raise MissingConfigError(unique_id=model.unique_id, name=name) def missing_materialization(model, adapter_type) -> NoReturn: - raise MissingMaterialization( + raise MissingMaterializationError( materialization=model.config.materialized, adapter_type=adapter_type ) def missing_relation(relation, model=None) -> NoReturn: - raise MissingRelation(relation, model) + raise MissingRelationError(relation, model) def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: - raise AmbiguousAlias(node_1, node_2, duped_name) + raise AmbiguousAliasError(node_1, node_2, duped_name) def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: - raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + raise AmbiguousCatalogMatchError(unique_id, match_1, match_2) def raise_cache_inconsistent(message) -> NoReturn: - raise CacheInconsistency(message) + raise CacheInconsistencyError(message) def raise_dataclass_not_dict(obj) -> NoReturn: - raise DataclassNotDict(obj) + raise DataclassNotDictError(obj) def raise_compiler_error(msg, node=None) -> NoReturn: - raise CompilationException(msg, node) + raise CompilationError(msg, node) def raise_database_error(msg, node=None) -> NoReturn: - raise DatabaseException(msg, node) + raise DbtDatabaseError(msg, node) def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: - raise DependencyNotFound(node, node_description, required_pkg) + raise DependencyNotFoundError(node, node_description, required_pkg) def raise_dependency_error(msg) -> NoReturn: - raise DependencyException(scrub_secrets(msg, env_secrets())) + raise DependencyError(scrub_secrets(msg, env_secrets())) def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: - raise DuplicatePatchPath(patch_1, existing_patch_path) + raise DuplicatePatchPathError(patch_1, existing_patch_path) def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: - raise DuplicateResourceName(node_1, node_2) + raise DuplicateResourceNameError(node_1, node_2) def raise_invalid_property_yml_version(path, issue) -> NoReturn: - raise InvalidPropertyYML(path, issue) + raise PropertyYMLError(path, issue) def raise_not_implemented(msg) -> NoReturn: - raise NotImplementedException(msg) + raise NotImplementedError(msg) def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: - raise RelationWrongType(relation, expected_type, model) + raise RelationWrongTypeError(relation, expected_type, model) # Update this when a new function should be added to the @@ -130,7 +130,7 @@ def wrap(func): def inner(*args, **kwargs): try: return func(*args, **kwargs) - except RuntimeException as exc: + except DbtRuntimeError as exc: exc.add_node(model) raise exc diff --git a/core/dbt/context/macro_resolver.py b/core/dbt/context/macro_resolver.py index 6e70bafd05e..20f97febcb0 100644 --- a/core/dbt/context/macro_resolver.py +++ b/core/dbt/context/macro_resolver.py @@ -1,6 +1,6 @@ from typing import Dict, MutableMapping, Optional from dbt.contracts.graph.nodes import Macro -from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro +from dbt.exceptions import DuplicateMacroNameError, PackageNotFoundForMacroError from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME from dbt.clients.jinja import MacroGenerator @@ -86,7 +86,7 @@ def _add_macro_to( package_namespaces[macro.package_name] = namespace if macro.name in namespace: - raise DuplicateMacroName(macro, macro, macro.package_name) + raise DuplicateMacroNameError(macro, macro, macro.package_name) package_namespaces[macro.package_name][macro.name] = macro def add_macro(self, macro: Macro): @@ -187,7 +187,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.macro_resolver.packages: macro = self.macro_resolver.packages[package_name].get(name) else: - raise PackageNotFoundForMacro(package_name) + raise PackageNotFoundForMacroError(package_name) if not macro: return None macro_func = MacroGenerator(macro, self.ctx, self.node, self.thread_ctx) diff --git a/core/dbt/context/macros.py b/core/dbt/context/macros.py index 921480ec05a..1c61e564e06 100644 --- a/core/dbt/context/macros.py +++ b/core/dbt/context/macros.py @@ -3,7 +3,7 @@ from dbt.clients.jinja import MacroGenerator, MacroStack from dbt.contracts.graph.nodes import Macro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME -from dbt.exceptions import DuplicateMacroName, PackageNotFoundForMacro +from dbt.exceptions import DuplicateMacroNameError, PackageNotFoundForMacroError FlatNamespace = Dict[str, MacroGenerator] @@ -75,7 +75,7 @@ def get_from_package(self, package_name: Optional[str], name: str) -> Optional[M elif package_name in self.packages: return self.packages[package_name].get(name) else: - raise PackageNotFoundForMacro(package_name) + raise PackageNotFoundForMacroError(package_name) # This class builds the MacroNamespace by adding macros to @@ -122,7 +122,7 @@ def _add_macro_to( hierarchy[macro.package_name] = namespace if macro.name in namespace: - raise DuplicateMacroName(macro_func.macro, macro, macro.package_name) + raise DuplicateMacroNameError(macro_func.macro, macro, macro.package_name) hierarchy[macro.package_name][macro.name] = macro_func def add_macro(self, macro: Macro, ctx: Dict[str, Any]): diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 2e7af0a79f2..fec5111e36c 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -41,28 +41,28 @@ from dbt.contracts.graph.metrics import MetricReference, ResolvedMetricReference from dbt.events.functions import get_metadata_vars from dbt.exceptions import ( - CompilationException, - ConflictingConfigKeys, - DisallowSecretEnvVar, - EnvVarMissing, - InternalException, - InvalidInlineModelConfig, - InvalidNumberSourceArgs, - InvalidPersistDocsValueType, - LoadAgateTableNotSeed, + CompilationError, + ConflictingConfigKeysError, + SecretEnvVarLocationError, + EnvVarMissingError, + DbtInternalError, + InlineModelConfigError, + NumberSourceArgsError, + PersistDocsValueTypeError, + LoadAgateTableNotSeedError, LoadAgateTableValueError, - MacroInvalidDispatchArg, - MacrosSourcesUnWriteable, - MetricInvalidArgs, - MissingConfig, - OperationsCannotRefEphemeralNodes, - PackageNotInDeps, - ParsingException, - RefBadContext, - RefInvalidArgs, - RuntimeException, - TargetNotFound, - ValidationException, + MacroDispatchArgError, + MacrosSourcesUnWriteableError, + MetricArgsError, + MissingConfigError, + OperationsCannotRefEphemeralNodesError, + PackageNotInDepsError, + ParsingError, + RefBadContextError, + RefArgsError, + DbtRuntimeError, + TargetNotFoundError, + DbtValidationError, ) from dbt.config import IsFQNResource from dbt.node_types import NodeType, ModelLanguage @@ -144,10 +144,10 @@ def dispatch( f'`adapter.dispatch("{suggest_macro_name}", ' f'macro_namespace="{suggest_macro_namespace}")`?' ) - raise CompilationException(msg) + raise CompilationError(msg) if packages is not None: - raise MacroInvalidDispatchArg(macro_name) + raise MacroDispatchArgError(macro_name) namespace = macro_namespace @@ -159,7 +159,7 @@ def dispatch( search_packages = [self.config.project_name, namespace] else: # Not a string and not None so must be a list - raise CompilationException( + raise CompilationError( f"In adapter.dispatch, got a list macro_namespace argument " f'("{macro_namespace}"), but macro_namespace should be None or a string.' ) @@ -172,8 +172,8 @@ def dispatch( try: # this uses the namespace from the context macro = self._namespace.get_from_package(package_name, search_name) - except CompilationException: - # Only raise CompilationException if macro is not found in + except CompilationError: + # Only raise CompilationError if macro is not found in # any package macro = None @@ -187,7 +187,7 @@ def dispatch( searched = ", ".join(repr(a) for a in attempts) msg = f"In dispatch: No macro named '{macro_name}' found\n Searched for: {searched}" - raise CompilationException(msg) + raise CompilationError(msg) class BaseResolver(metaclass=abc.ABCMeta): @@ -223,12 +223,12 @@ def _repack_args(self, name: str, package: Optional[str]) -> List[str]: def validate_args(self, name: str, package: Optional[str]): if not isinstance(name, str): - raise CompilationException( + raise CompilationError( f"The name argument to ref() must be a string, got {type(name)}" ) if package is not None and not isinstance(package, str): - raise CompilationException( + raise CompilationError( f"The package argument to ref() must be a string or None, got {type(package)}" ) @@ -241,7 +241,7 @@ def __call__(self, *args: str) -> RelationProxy: elif len(args) == 2: package, name = args else: - raise RefInvalidArgs(node=self.model, args=args) + raise RefArgsError(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -253,19 +253,19 @@ def resolve(self, source_name: str, table_name: str): def validate_args(self, source_name: str, table_name: str): if not isinstance(source_name, str): - raise CompilationException( + raise CompilationError( f"The source name (first) argument to source() must be a " f"string, got {type(source_name)}" ) if not isinstance(table_name, str): - raise CompilationException( + raise CompilationError( f"The table name (second) argument to source() must be a " f"string, got {type(table_name)}" ) def __call__(self, *args: str) -> RelationProxy: if len(args) != 2: - raise InvalidNumberSourceArgs(args, node=self.model) + raise NumberSourceArgsError(args, node=self.model) self.validate_args(args[0], args[1]) return self.resolve(args[0], args[1]) @@ -282,12 +282,12 @@ def _repack_args(self, name: str, package: Optional[str]) -> List[str]: def validate_args(self, name: str, package: Optional[str]): if not isinstance(name, str): - raise CompilationException( + raise CompilationError( f"The name argument to metric() must be a string, got {type(name)}" ) if package is not None and not isinstance(package, str): - raise CompilationException( + raise CompilationError( f"The package argument to metric() must be a string or None, got {type(package)}" ) @@ -300,7 +300,7 @@ def __call__(self, *args: str) -> MetricReference: elif len(args) == 2: package, name = args else: - raise MetricInvalidArgs(node=self.model, args=args) + raise MetricArgsError(node=self.model, args=args) self.validate_args(name, package) return self.resolve(name, package) @@ -321,7 +321,7 @@ def _transform_config(self, config): if oldkey in config: newkey = oldkey.replace("_", "-") if newkey in config: - raise ConflictingConfigKeys(oldkey, newkey, node=self.model) + raise ConflictingConfigKeysError(oldkey, newkey, node=self.model) config[newkey] = config.pop(oldkey) return config @@ -331,14 +331,14 @@ def __call__(self, *args, **kwargs): elif len(args) == 0 and len(kwargs) > 0: opts = kwargs else: - raise InvalidInlineModelConfig(node=self.model) + raise InlineModelConfigError(node=self.model) opts = self._transform_config(opts) # it's ok to have a parse context with no context config, but you must # not call it! if self.context_config is None: - raise RuntimeException("At parse time, did not receive a context config") + raise DbtRuntimeError("At parse time, did not receive a context config") self.context_config.add_config_call(opts) return "" @@ -379,7 +379,7 @@ def _lookup(self, name, default=_MISSING): else: result = self.model.config.get(name, default) if result is _MISSING: - raise MissingConfig(unique_id=self.model.unique_id, name=name) + raise MissingConfigError(unique_id=self.model.unique_id, name=name) return result def require(self, name, validator=None): @@ -401,14 +401,14 @@ def get(self, name, default=None, validator=None): def persist_relation_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise InvalidPersistDocsValueType(persist_docs) + raise PersistDocsValueTypeError(persist_docs) return persist_docs.get("relation", False) def persist_column_docs(self) -> bool: persist_docs = self.get("persist_docs", default={}) if not isinstance(persist_docs, dict): - raise InvalidPersistDocsValueType(persist_docs) + raise PersistDocsValueTypeError(persist_docs) return persist_docs.get("columns", False) @@ -467,7 +467,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Rel ) if target_model is None or isinstance(target_model, Disabled): - raise TargetNotFound( + raise TargetNotFoundError( node=self.model, target_name=target_name, target_kind="node", @@ -489,7 +489,7 @@ def validate( ) -> None: if resolved.unique_id not in self.model.depends_on.nodes: args = self._repack_args(target_name, target_package) - raise RefBadContext(node=self.model, args=args) + raise RefBadContextError(node=self.model, args=args) class OperationRefResolver(RuntimeRefResolver): @@ -505,7 +505,7 @@ def create_relation(self, target_model: ManifestNode, name: str) -> RelationProx if target_model.is_ephemeral_model: # In operations, we can't ref() ephemeral nodes, because # Macros do not support set_cte - raise OperationsCannotRefEphemeralNodes(target_model.name, node=self.model) + raise OperationsCannotRefEphemeralNodesError(target_model.name, node=self.model) else: return super().create_relation(target_model, name) @@ -528,7 +528,7 @@ def resolve(self, source_name: str, table_name: str): ) if target_source is None or isinstance(target_source, Disabled): - raise TargetNotFound( + raise TargetNotFoundError( node=self.model, target_name=f"{source_name}.{table_name}", target_kind="source", @@ -555,7 +555,7 @@ def resolve(self, target_name: str, target_package: Optional[str] = None) -> Met ) if target_metric is None or isinstance(target_metric, Disabled): - raise TargetNotFound( + raise TargetNotFoundError( node=self.model, target_name=target_name, target_kind="metric", @@ -584,7 +584,7 @@ def packages_for_node(self) -> Iterable[Project]: if package_name != self._config.project_name: if package_name not in dependencies: # I don't think this is actually reachable - raise PackageNotInDeps(package_name, node=self._node) + raise PackageNotInDepsError(package_name, node=self._node) yield dependencies[package_name] yield self._config @@ -674,7 +674,7 @@ def __init__( context_config: Optional[ContextConfig], ) -> None: if provider is None: - raise InternalException(f"Invalid provider given to context: {provider}") + raise DbtInternalError(f"Invalid provider given to context: {provider}") # mypy appeasement - we know it'll be a RuntimeConfig self.config: RuntimeConfig self.model: Union[Macro, ManifestNode] = model @@ -751,7 +751,7 @@ def inner(value: T) -> None: return elif value == arg: return - raise ValidationException( + raise DbtValidationError( 'Expected value "{}" to be one of {}'.format(value, ",".join(map(str, args))) ) @@ -767,7 +767,7 @@ def inner(value: T) -> None: def write(self, payload: str) -> str: # macros/source defs aren't 'writeable'. if isinstance(self.model, (Macro, SourceDefinition)): - raise MacrosSourcesUnWriteable(node=self.model) + raise MacrosSourcesUnWriteableError(node=self.model) self.model.build_path = self.model.write_node(self.config.target_path, "run", payload) return "" @@ -782,12 +782,12 @@ def try_or_compiler_error( try: return func(*args, **kwargs) except Exception: - raise CompilationException(message_if_exception, self.model) + raise CompilationError(message_if_exception, self.model) @contextmember def load_agate_table(self) -> agate.Table: if not isinstance(self.model, SeedNode): - raise LoadAgateTableNotSeed(self.model.resource_type, node=self.model) + raise LoadAgateTableNotSeedError(self.model.resource_type, node=self.model) assert self.model.root_path path = os.path.join(self.model.root_path, self.model.original_file_path) column_types = self.model.config.column_types @@ -1185,7 +1185,7 @@ def adapter_macro(self, name: str, *args, **kwargs): "https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch)" " adapter_macro was called for: {macro_name}".format(macro_name=name) ) - raise CompilationException(msg) + raise CompilationError(msg) @contextmember def env_var(self, var: str, default: Optional[str] = None) -> str: @@ -1196,7 +1196,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: """ return_value = None if var.startswith(SECRET_ENV_PREFIX): - raise DisallowSecretEnvVar(var) + raise SecretEnvVarLocationError(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1229,7 +1229,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.env_vars.append(var) # type: ignore[union-attr] return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) @contextproperty def selected_resources(self) -> List[str]: @@ -1248,7 +1248,7 @@ def submit_python_job(self, parsed_model: Dict, compiled_code: str) -> AdapterRe and self.context_macro_stack.call_stack[1] == "macro.dbt.statement" and "materialization" in self.context_macro_stack.call_stack[0] ): - raise RuntimeException( + raise DbtRuntimeError( f"submit_python_job is not intended to be called here, at model {parsed_model['alias']}, with macro call_stack {self.context_macro_stack.call_stack}." ) return self.adapter.submit_python_job(parsed_model, compiled_code) @@ -1410,7 +1410,7 @@ def generate_runtime_macro_context( class ExposureRefResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) not in (1, 2): - raise RefInvalidArgs(node=self.model, args=args) + raise RefArgsError(node=self.model, args=args) self.model.refs.append(list(args)) return "" @@ -1418,7 +1418,7 @@ def __call__(self, *args) -> str: class ExposureSourceResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) != 2: - raise InvalidNumberSourceArgs(args, node=self.model) + raise NumberSourceArgsError(args, node=self.model) self.model.sources.append(list(args)) return "" @@ -1426,7 +1426,7 @@ def __call__(self, *args) -> str: class ExposureMetricResolver(BaseResolver): def __call__(self, *args) -> str: if len(args) not in (1, 2): - raise MetricInvalidArgs(node=self.model, args=args) + raise MetricArgsError(node=self.model, args=args) self.model.metrics.append(list(args)) return "" @@ -1468,14 +1468,14 @@ def __call__(self, *args) -> str: elif len(args) == 2: package, name = args else: - raise RefInvalidArgs(node=self.model, args=args) + raise RefArgsError(node=self.model, args=args) self.validate_args(name, package) self.model.refs.append(list(args)) return "" def validate_args(self, name, package): if not isinstance(name, str): - raise ParsingException( + raise ParsingError( f"In a metrics section in {self.model.original_file_path} " "the name argument to ref() must be a string" ) @@ -1558,7 +1558,7 @@ def _build_test_namespace(self): def env_var(self, var: str, default: Optional[str] = None) -> str: return_value = None if var.startswith(SECRET_ENV_PREFIX): - raise DisallowSecretEnvVar(var) + raise SecretEnvVarLocationError(var) if var in os.environ: return_value = os.environ[var] elif default is not None: @@ -1584,7 +1584,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: source_file.add_env_var(var, yaml_key, name) # type: ignore[union-attr] return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) def generate_test_context( diff --git a/core/dbt/context/secret.py b/core/dbt/context/secret.py index da13509ef50..4d8ff342aff 100644 --- a/core/dbt/context/secret.py +++ b/core/dbt/context/secret.py @@ -4,7 +4,7 @@ from .base import BaseContext, contextmember from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER -from dbt.exceptions import EnvVarMissing +from dbt.exceptions import EnvVarMissingError SECRET_PLACEHOLDER = "$$$DBT_SECRET_START$$${}$$$DBT_SECRET_END$$$" @@ -50,7 +50,7 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: self.env_vars[var] = return_value if var in os.environ else DEFAULT_ENV_PLACEHOLDER return return_value else: - raise EnvVarMissing(var) + raise EnvVarMissingError(var) def generate_secret_context(cli_vars: Dict[str, Any]) -> Dict[str, Any]: diff --git a/core/dbt/contracts/connection.py b/core/dbt/contracts/connection.py index fe4ae912229..3f12a603363 100644 --- a/core/dbt/contracts/connection.py +++ b/core/dbt/contracts/connection.py @@ -12,7 +12,7 @@ List, Callable, ) -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.utils import translate_aliases from dbt.events.functions import fire_event from dbt.events.types import NewConnectionOpening @@ -94,7 +94,7 @@ def handle(self): # this will actually change 'self._handle'. self._handle.resolve(self) except RecursionError as exc: - raise InternalException( + raise DbtInternalError( "A connection's open() method attempted to read the handle value" ) from exc return self._handle diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index c43012ec521..4dd2ddc2f33 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -40,10 +40,10 @@ from dbt.contracts.util import BaseArtifactMetadata, SourceKey, ArtifactMixin, schema_version from dbt.dataclass_schema import dbtClassMixin from dbt.exceptions import ( - CompilationException, - DuplicateResourceName, - DuplicateMacroInPackage, - DuplicateMaterializationName, + CompilationError, + DuplicateResourceNameError, + DuplicateMacroInPackageError, + DuplicateMaterializationNameError, ) from dbt.helper_types import PathSet from dbt.events.functions import fire_event @@ -102,7 +102,7 @@ def populate(self, manifest): def perform_lookup(self, unique_id: UniqueID, manifest) -> Documentation: if unique_id not in manifest.docs: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Doc {unique_id} found in cache but not found in manifest" ) return manifest.docs[unique_id] @@ -135,7 +135,7 @@ def populate(self, manifest): def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> SourceDefinition: if unique_id not in manifest.sources: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Source {unique_id} found in cache but not found in manifest" ) return manifest.sources[unique_id] @@ -173,7 +173,7 @@ def populate(self, manifest): def perform_lookup(self, unique_id: UniqueID, manifest) -> ManifestNode: if unique_id not in manifest.nodes: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Node {unique_id} found in cache but not found in manifest" ) return manifest.nodes[unique_id] @@ -206,7 +206,7 @@ def populate(self, manifest): def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> Metric: if unique_id not in manifest.metrics: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Metric {unique_id} found in cache but not found in manifest" ) return manifest.metrics[unique_id] @@ -398,7 +398,7 @@ def __eq__(self, other: object) -> bool: return NotImplemented equal = self.specificity == other.specificity and self.locality == other.locality if equal: - raise DuplicateMaterializationName(self.macro, other) + raise DuplicateMaterializationNameError(self.macro, other) return equal @@ -480,13 +480,13 @@ def _update_into(dest: MutableMapping[str, T], new_item: T): """ unique_id = new_item.unique_id if unique_id not in dest: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"got an update_{new_item.resource_type} call with an " f"unrecognized {new_item.resource_type}: {new_item.unique_id}" ) existing = dest[unique_id] if new_item.original_file_path != existing.original_file_path: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( f"cannot update a {new_item.resource_type} to have a new file path!" ) dest[unique_id] = new_item @@ -839,7 +839,7 @@ def expect(self, unique_id: str) -> GraphMemberNode: return self.metrics[unique_id] else: # something terrible has happened - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( "Expected node {} not found in manifest".format(unique_id) ) @@ -1035,7 +1035,7 @@ def merge_from_artifact( def add_macro(self, source_file: SourceFile, macro: Macro): if macro.unique_id in self.macros: # detect that the macro exists and emit an error - raise DuplicateMacroInPackage(macro=macro, macro_mapping=self.macros) + raise DuplicateMacroInPackageError(macro=macro, macro_mapping=self.macros) self.macros[macro.unique_id] = macro source_file.macros.append(macro.unique_id) @@ -1213,7 +1213,7 @@ def __post_serialize__(self, dct): def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]): if value.unique_id in src: - raise DuplicateResourceName(value, src[value.unique_id]) + raise DuplicateResourceNameError(value, src[value.unique_id]) K_T = TypeVar("K_T") @@ -1222,7 +1222,7 @@ def _check_duplicates(value: BaseNode, src: Mapping[str, BaseNode]): def _expect_value(key: K_T, src: Mapping[K_T, V_T], old_file: SourceFile, name: str) -> V_T: if key not in src: - raise CompilationException( + raise CompilationError( 'Expected to find "{}" in cached "result.{}" based ' "on cached file information: {}!".format(key, name, old_file) ) diff --git a/core/dbt/contracts/graph/model_config.py b/core/dbt/contracts/graph/model_config.py index b22f724de53..407c5435786 100644 --- a/core/dbt/contracts/graph/model_config.py +++ b/core/dbt/contracts/graph/model_config.py @@ -9,7 +9,7 @@ ) from dbt.contracts.graph.unparsed import AdditionalPropertiesAllowed, Docs from dbt.contracts.graph.utils import validate_color -from dbt.exceptions import InternalException, CompilationException +from dbt.exceptions import DbtInternalError, CompilationError from dbt.contracts.util import Replaceable, list_str from dbt import hooks from dbt.node_types import NodeType @@ -30,7 +30,7 @@ def _get_meta_value(cls: Type[M], fld: Field, key: str, default: Any) -> M: try: return cls(value) except ValueError as exc: - raise InternalException(f"Invalid {cls} value: {value}") from exc + raise DbtInternalError(f"Invalid {cls} value: {value}") from exc def _set_meta_value(obj: M, key: str, existing: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: @@ -140,17 +140,17 @@ def _merge_field_value( return _listify(self_value) + _listify(other_value) elif merge_behavior == MergeBehavior.Update: if not isinstance(self_value, dict): - raise InternalException(f"expected dict, got {self_value}") + raise DbtInternalError(f"expected dict, got {self_value}") if not isinstance(other_value, dict): - raise InternalException(f"expected dict, got {other_value}") + raise DbtInternalError(f"expected dict, got {other_value}") value = self_value.copy() value.update(other_value) return value elif merge_behavior == MergeBehavior.DictKeyAppend: if not isinstance(self_value, dict): - raise InternalException(f"expected dict, got {self_value}") + raise DbtInternalError(f"expected dict, got {self_value}") if not isinstance(other_value, dict): - raise InternalException(f"expected dict, got {other_value}") + raise DbtInternalError(f"expected dict, got {other_value}") new_dict = {} for key in self_value.keys(): new_dict[key] = _listify(self_value[key]) @@ -172,7 +172,7 @@ def _merge_field_value( return new_dict else: - raise InternalException(f"Got an invalid merge_behavior: {merge_behavior}") + raise DbtInternalError(f"Got an invalid merge_behavior: {merge_behavior}") def insensitive_patterns(*patterns: str): @@ -227,7 +227,7 @@ def __delitem__(self, key): msg = ( 'Error, tried to delete config key "{}": Cannot delete ' "built-in keys" ).format(key) - raise CompilationException(msg) + raise CompilationError(msg) else: del self._extra[key] diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py index ba2e48c7c9c..6521e644542 100644 --- a/core/dbt/contracts/graph/unparsed.py +++ b/core/dbt/contracts/graph/unparsed.py @@ -11,7 +11,7 @@ # trigger the PathEncoder import dbt.helper_types # noqa:F401 -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError from dbt.dataclass_schema import dbtClassMixin, StrEnum, ExtensibleDbtClassMixin, ValidationError @@ -222,7 +222,7 @@ class ExternalPartition(AdditionalPropertiesAllowed, Replaceable): def __post_init__(self): if self.name == "" or self.data_type == "": - raise CompilationException("External partition columns must have names and data types") + raise CompilationError("External partition columns must have names and data types") @dataclass @@ -514,7 +514,7 @@ def validate(cls, data): errors.append("must contain only letters, numbers and underscores") if errors: - raise ParsingException( + raise ParsingError( f"The metric name '{data['name']}' is invalid. It {', '.join(e for e in errors)}" ) diff --git a/core/dbt/contracts/relation.py b/core/dbt/contracts/relation.py index e8cba2ad155..e557c358966 100644 --- a/core/dbt/contracts/relation.py +++ b/core/dbt/contracts/relation.py @@ -9,7 +9,7 @@ from dbt.dataclass_schema import dbtClassMixin, StrEnum from dbt.contracts.util import Replaceable -from dbt.exceptions import CompilationException, DataclassNotDict +from dbt.exceptions import CompilationError, DataclassNotDictError from dbt.utils import deep_merge @@ -43,10 +43,10 @@ def __getitem__(self, key): raise KeyError(key) from None def __iter__(self): - raise DataclassNotDict(self) + raise DataclassNotDictError(self) def __len__(self): - raise DataclassNotDict(self) + raise DataclassNotDictError(self) def incorporate(self, **kwargs): value = self.to_dict(omit_none=True) @@ -88,13 +88,11 @@ class Path(FakeAPIObject): def __post_init__(self): # handle pesky jinja2.Undefined sneaking in here and messing up rende if not isinstance(self.database, (type(None), str)): - raise CompilationException("Got an invalid path database: {}".format(self.database)) + raise CompilationError("Got an invalid path database: {}".format(self.database)) if not isinstance(self.schema, (type(None), str)): - raise CompilationException("Got an invalid path schema: {}".format(self.schema)) + raise CompilationError("Got an invalid path schema: {}".format(self.schema)) if not isinstance(self.identifier, (type(None), str)): - raise CompilationException( - "Got an invalid path identifier: {}".format(self.identifier) - ) + raise CompilationError("Got an invalid path identifier: {}".format(self.identifier)) def get_lowered_part(self, key: ComponentName) -> Optional[str]: part = self.get_part(key) diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index 97c43396e33..9243750284f 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -7,7 +7,7 @@ Replaceable, schema_version, ) -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.events.functions import fire_event from dbt.events.types import TimingInfoCollected from dbt.events.proto_types import RunResultMsg, TimingInfoMsg @@ -343,14 +343,14 @@ def process_freshness_result(result: FreshnessNodeResult) -> FreshnessNodeOutput # we know that this must be a SourceFreshnessResult if not isinstance(result, SourceFreshnessResult): - raise InternalException( + raise DbtInternalError( "Got {} instead of a SourceFreshnessResult for a " "non-error result in freshness execution!".format(type(result)) ) # if we're here, we must have a non-None freshness threshold criteria = result.node.freshness if criteria is None: - raise InternalException( + raise DbtInternalError( "Somehow evaluated a freshness result for a source that has no freshness criteria!" ) return SourceFreshnessOutput( diff --git a/core/dbt/contracts/state.py b/core/dbt/contracts/state.py index 9940a0cb93d..cb135e241ac 100644 --- a/core/dbt/contracts/state.py +++ b/core/dbt/contracts/state.py @@ -3,7 +3,7 @@ from .results import RunResultsArtifact from .results import FreshnessExecutionResultArtifact from typing import Optional -from dbt.exceptions import IncompatibleSchemaException +from dbt.exceptions import IncompatibleSchemaError class PreviousState: @@ -19,7 +19,7 @@ def __init__(self, path: Path, current_path: Path): if manifest_path.exists() and manifest_path.is_file(): try: self.manifest = WritableManifest.read_and_check_versions(str(manifest_path)) - except IncompatibleSchemaException as exc: + except IncompatibleSchemaError as exc: exc.add_filename(str(manifest_path)) raise @@ -27,7 +27,7 @@ def __init__(self, path: Path, current_path: Path): if results_path.exists() and results_path.is_file(): try: self.results = RunResultsArtifact.read_and_check_versions(str(results_path)) - except IncompatibleSchemaException as exc: + except IncompatibleSchemaError as exc: exc.add_filename(str(results_path)) raise @@ -37,7 +37,7 @@ def __init__(self, path: Path, current_path: Path): self.sources = FreshnessExecutionResultArtifact.read_and_check_versions( str(sources_path) ) - except IncompatibleSchemaException as exc: + except IncompatibleSchemaError as exc: exc.add_filename(str(sources_path)) raise @@ -47,6 +47,6 @@ def __init__(self, path: Path, current_path: Path): self.sources_current = FreshnessExecutionResultArtifact.read_and_check_versions( str(sources_current_path) ) - except IncompatibleSchemaException as exc: + except IncompatibleSchemaError as exc: exc.add_filename(str(sources_current_path)) raise diff --git a/core/dbt/contracts/util.py b/core/dbt/contracts/util.py index 99f7a35c66d..fb2af2dac59 100644 --- a/core/dbt/contracts/util.py +++ b/core/dbt/contracts/util.py @@ -5,9 +5,9 @@ from dbt.clients.system import write_json, read_json from dbt import deprecations from dbt.exceptions import ( - InternalException, - RuntimeException, - IncompatibleSchemaException, + DbtInternalError, + DbtRuntimeError, + IncompatibleSchemaError, ) from dbt.version import __version__ from dbt.events.functions import get_invocation_id, get_metadata_vars @@ -123,7 +123,7 @@ def read(cls, path: str): try: data = read_json(path) except (EnvironmentError, ValueError) as exc: - raise RuntimeException( + raise DbtRuntimeError( f'Could not read {cls.__name__} at "{path}" as JSON: {exc}' ) from exc @@ -320,7 +320,7 @@ def read_and_check_versions(cls, path: str): try: data = read_json(path) except (EnvironmentError, ValueError) as exc: - raise RuntimeException( + raise DbtRuntimeError( f'Could not read {cls.__name__} at "{path}" as JSON: {exc}' ) from exc @@ -332,7 +332,7 @@ def read_and_check_versions(cls, path: str): previous_schema_version = data["metadata"]["dbt_schema_version"] # cls.dbt_schema_version is a SchemaVersion object if not cls.is_compatible_version(previous_schema_version): - raise IncompatibleSchemaException( + raise IncompatibleSchemaError( expected=str(cls.dbt_schema_version), found=previous_schema_version, ) @@ -357,7 +357,7 @@ class ArtifactMixin(VersionedSchema, Writable, Readable): def validate(cls, data): super().validate(data) if cls.dbt_schema_version is None: - raise InternalException("Cannot call from_dict with no schema version!") + raise DbtInternalError("Cannot call from_dict with no schema version!") class Identifier(ValidatedStringMixin): diff --git a/core/dbt/deps/git.py b/core/dbt/deps/git.py index 5d7a1331c58..a32f91ee158 100644 --- a/core/dbt/deps/git.py +++ b/core/dbt/deps/git.py @@ -9,7 +9,7 @@ GitPackage, ) from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path -from dbt.exceptions import ExecutableError, MultipleVersionGitDeps +from dbt.exceptions import ExecutableError, MultipleVersionGitDepsError from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import EnsureGitInstalled, DepsUnpinned @@ -143,7 +143,7 @@ def resolved(self) -> GitPinnedPackage: if len(requested) == 0: requested = {"HEAD"} elif len(requested) > 1: - raise MultipleVersionGitDeps(self.git, requested) + raise MultipleVersionGitDepsError(self.git, requested) return GitPinnedPackage( git=self.git, diff --git a/core/dbt/deps/registry.py b/core/dbt/deps/registry.py index f3398f4b16f..e1f39a7551d 100644 --- a/core/dbt/deps/registry.py +++ b/core/dbt/deps/registry.py @@ -10,10 +10,10 @@ ) from dbt.deps.base import PinnedPackage, UnpinnedPackage from dbt.exceptions import ( - DependencyException, - PackageNotFound, - PackageVersionNotFound, - VersionsNotCompatibleException, + DependencyError, + PackageNotFoundError, + PackageVersionNotFoundError, + VersionsNotCompatibleError, ) @@ -71,7 +71,7 @@ def __init__( def _check_in_index(self): index = registry.index_cached() if self.package not in index: - raise PackageNotFound(self.package) + raise PackageNotFoundError(self.package) @classmethod def from_contract(cls, contract: RegistryPackage) -> "RegistryUnpinnedPackage": @@ -95,9 +95,9 @@ def resolved(self) -> RegistryPinnedPackage: self._check_in_index() try: range_ = semver.reduce_versions(*self.versions) - except VersionsNotCompatibleException as e: + except VersionsNotCompatibleError as e: new_msg = "Version error for package {}: {}".format(self.name, e) - raise DependencyException(new_msg) from e + raise DependencyError(new_msg) from e should_version_check = bool(flags.VERSION_CHECK) dbt_version = get_installed_version() @@ -118,7 +118,9 @@ def resolved(self) -> RegistryPinnedPackage: target = None if not target: # raise an exception if no installable target version is found - raise PackageVersionNotFound(self.package, range_, installable, should_version_check) + raise PackageVersionNotFoundError( + self.package, range_, installable, should_version_check + ) latest_compatible = installable[-1] return RegistryPinnedPackage( package=self.package, version=target, version_latest=latest_compatible diff --git a/core/dbt/deps/resolver.py b/core/dbt/deps/resolver.py index 323e2f562c1..db57ef0f641 100644 --- a/core/dbt/deps/resolver.py +++ b/core/dbt/deps/resolver.py @@ -2,10 +2,10 @@ from typing import Dict, List, NoReturn, Union, Type, Iterator, Set from dbt.exceptions import ( - DuplicateDependencyToRoot, - DuplicateProjectDependency, - MismatchedDependencyTypes, - InternalException, + DuplicateDependencyToRootError, + DuplicateProjectDependencyError, + MismatchedDependencyTypeError, + DbtInternalError, ) from dbt.config import Project, RuntimeConfig @@ -56,7 +56,7 @@ def __setitem__(self, key: BasePackage, value): self.packages[key_str] = value def _mismatched_types(self, old: UnpinnedPackage, new: UnpinnedPackage) -> NoReturn: - raise MismatchedDependencyTypes(new, old) + raise MismatchedDependencyTypeError(new, old) def incorporate(self, package: UnpinnedPackage): key: str = self._pick_key(package) @@ -80,7 +80,7 @@ def update_from(self, src: List[PackageContract]) -> None: elif isinstance(contract, RegistryPackage): pkg = RegistryUnpinnedPackage.from_contract(contract) else: - raise InternalException("Invalid package type {}".format(type(contract))) + raise DbtInternalError("Invalid package type {}".format(type(contract))) self.incorporate(pkg) @classmethod @@ -107,9 +107,9 @@ def _check_for_duplicate_project_names( for package in final_deps: project_name = package.get_project_name(config, renderer) if project_name in seen: - raise DuplicateProjectDependency(project_name) + raise DuplicateProjectDependencyError(project_name) elif project_name == config.project_name: - raise DuplicateDependencyToRoot(project_name) + raise DuplicateDependencyToRootError(project_name) seen.add(project_name) diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index 9722fb5fecf..06e5a89965c 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -163,9 +163,9 @@ def msg_to_dict(msg: EventMsg) -> dict: def warn_or_error(event, node=None): if flags.WARN_ERROR: # TODO: resolve this circular import when at top - from dbt.exceptions import EventCompilationException + from dbt.exceptions import EventCompilationError - raise EventCompilationException(event.message(), node) + raise EventCompilationError(event.message(), node) else: fire_event(event) diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 746ce294067..536e0c6c7cc 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -429,19 +429,19 @@ class ExposureNameDeprecationMsg(betterproto.Message): @dataclass -class FunctionDeprecated(betterproto.Message): +class InternalDeprecation(betterproto.Message): """D008""" - function_name: str = betterproto.string_field(1) + name: str = betterproto.string_field(1) reason: str = betterproto.string_field(2) suggested_action: str = betterproto.string_field(3) version: str = betterproto.string_field(4) @dataclass -class FunctionDeprecatedMsg(betterproto.Message): +class InternalDeprecationMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "FunctionDeprecated" = betterproto.message_field(2) + data: "InternalDeprecation" = betterproto.message_field(2) @dataclass @@ -999,20 +999,20 @@ class MacroFileParseMsg(betterproto.Message): @dataclass -class PartialParsingExceptionProcessingFile(betterproto.Message): +class PartialParsingErrorProcessingFile(betterproto.Message): """I014""" file: str = betterproto.string_field(1) @dataclass -class PartialParsingExceptionProcessingFileMsg(betterproto.Message): +class PartialParsingErrorProcessingFileMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "PartialParsingExceptionProcessingFile" = betterproto.message_field(2) + data: "PartialParsingErrorProcessingFile" = betterproto.message_field(2) @dataclass -class PartialParsingException(betterproto.Message): +class PartialParsingError(betterproto.Message): """I016""" exc_info: Dict[str, str] = betterproto.map_field( @@ -1021,9 +1021,9 @@ class PartialParsingException(betterproto.Message): @dataclass -class PartialParsingExceptionMsg(betterproto.Message): +class PartialParsingErrorMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "PartialParsingException" = betterproto.message_field(2) + data: "PartialParsingError" = betterproto.message_field(2) @dataclass @@ -2239,7 +2239,7 @@ class CatchableExceptionOnRunMsg(betterproto.Message): @dataclass -class InternalExceptionOnRun(betterproto.Message): +class InternalErrorOnRun(betterproto.Message): """W003""" build_path: str = betterproto.string_field(1) @@ -2247,9 +2247,9 @@ class InternalExceptionOnRun(betterproto.Message): @dataclass -class InternalExceptionOnRunMsg(betterproto.Message): +class InternalErrorOnRunMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "InternalExceptionOnRun" = betterproto.message_field(2) + data: "InternalErrorOnRun" = betterproto.message_field(2) @dataclass diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 85d46692089..80510687f81 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -336,16 +336,16 @@ message ExposureNameDeprecationMsg { } //D008 -message FunctionDeprecated { - string function_name = 1; +message InternalDeprecation { + string name = 1; string reason = 2; string suggested_action = 3; string version = 4; } -message FunctionDeprecatedMsg { +message InternalDeprecationMsg { EventInfo info = 1; - FunctionDeprecated data = 2; + InternalDeprecation data = 2; } // E - DB Adapter @@ -797,23 +797,23 @@ message MacroFileParseMsg { // Skipping I013 // I014 -message PartialParsingExceptionProcessingFile { +message PartialParsingErrorProcessingFile { string file = 1; } -message PartialParsingExceptionProcessingFileMsg { +message PartialParsingErrorProcessingFileMsg { EventInfo info = 1; - PartialParsingExceptionProcessingFile data = 2; + PartialParsingErrorProcessingFile data = 2; } // I016 -message PartialParsingException { +message PartialParsingError { map exc_info = 1; } -message PartialParsingExceptionMsg { +message PartialParsingErrorMsg { EventInfo info = 1; - PartialParsingException data = 2; + PartialParsingError data = 2; } // I017 @@ -825,10 +825,8 @@ message PartialParsingSkipParsingMsg { PartialParsingSkipParsing data = 2; } - // Skipped I018, I019, I020, I021, I022, I023 - // I024 message UnableToPartialParse { string reason = 1; @@ -1795,14 +1793,14 @@ message CatchableExceptionOnRunMsg { } // W003 -message InternalExceptionOnRun { +message InternalErrorOnRun { string build_path = 1; string exc = 2; } -message InternalExceptionOnRunMsg { +message InternalErrorOnRunMsg { EventInfo info = 1; - InternalExceptionOnRun data = 2; + InternalErrorOnRun data = 2; } // W004 diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index b76188a8c97..cfa0f1feaa4 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -378,7 +378,7 @@ def message(self): @dataclass -class FunctionDeprecated(WarnLevel, pt.FunctionDeprecated): +class InternalDeprecation(WarnLevel, pt.InternalDeprecation): def code(self): return "D008" @@ -387,7 +387,7 @@ def message(self): if self.reason: extra_reason = f"\n{self.reason}" msg = ( - f"`{self.function_name}` is deprecated and will be removed in dbt-core version {self.version}\n\n" + f"`{self.name}` is deprecated and will be removed in dbt-core version {self.version}\n\n" f"Adapter maintainers can resolve this deprecation by {self.suggested_action}. {extra_reason}" ) return warning_tag(msg) @@ -802,7 +802,7 @@ def message(self) -> str: @dataclass -class PartialParsingExceptionProcessingFile(DebugLevel, pt.PartialParsingExceptionProcessingFile): +class PartialParsingErrorProcessingFile(DebugLevel, pt.PartialParsingErrorProcessingFile): def code(self): return "I014" @@ -814,7 +814,7 @@ def message(self) -> str: @dataclass -class PartialParsingException(DebugLevel, pt.PartialParsingException): +class PartialParsingError(DebugLevel, pt.PartialParsingError): def code(self): return "I016" @@ -1879,7 +1879,7 @@ def message(self) -> str: @dataclass -class InternalExceptionOnRun(DebugLevel, pt.InternalExceptionOnRun): +class InternalErrorOnRun(DebugLevel, pt.InternalErrorOnRun): def code(self): return "W003" diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index 7d8326cd352..f207496e9b1 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -38,7 +38,7 @@ def data(self): } -class InternalException(Exception): +class DbtInternalError(Exception): def __init__(self, msg: str): self.stack: List = [] self.msg = scrub_secrets(msg, env_secrets()) @@ -79,7 +79,7 @@ def __str__(self): return lines[0] + "\n" + "\n".join([" " + line for line in lines[1:]]) -class RuntimeException(RuntimeError, Exception): +class DbtRuntimeError(RuntimeError, Exception): CODE = 10001 MESSAGE = "Runtime error" @@ -172,72 +172,7 @@ def data(self): return result -class RPCFailureResult(RuntimeException): - CODE = 10002 - MESSAGE = "RPC execution error" - - -class RPCTimeoutException(RuntimeException): - CODE = 10008 - MESSAGE = "RPC timeout error" - - def __init__(self, timeout: Optional[float]): - super().__init__(self.MESSAGE) - self.timeout = timeout - - def data(self): - result = super().data() - result.update( - { - "timeout": self.timeout, - "message": f"RPC timed out after {self.timeout}s", - } - ) - return result - - -class RPCKilledException(RuntimeException): - CODE = 10009 - MESSAGE = "RPC process killed" - - def __init__(self, signum: int): - self.signum = signum - self.msg = f"RPC process killed by signal {self.signum}" - super().__init__(self.msg) - - def data(self): - return { - "signum": self.signum, - "message": self.msg, - } - - -class RPCCompiling(RuntimeException): - CODE = 10010 - MESSAGE = 'RPC server is compiling the project, call the "status" method for' " compile status" - - def __init__(self, msg: str = None, node=None): - if msg is None: - msg = "compile in progress" - super().__init__(msg, node) - - -class RPCLoadException(RuntimeException): - CODE = 10011 - MESSAGE = ( - 'RPC server failed to compile project, call the "status" method for' " compile status" - ) - - def __init__(self, cause: Dict[str, Any]): - self.cause = cause - self.msg = f'{self.MESSAGE}: {self.cause["message"]}' - super().__init__(self.msg) - - def data(self): - return {"cause": self.cause, "message": self.msg} - - -class DatabaseException(RuntimeException): +class DbtDatabaseError(DbtRuntimeError): CODE = 10003 MESSAGE = "Database Error" @@ -247,14 +182,14 @@ def process_stack(self): if hasattr(self.node, "build_path") and self.node.build_path: lines.append(f"compiled Code at {self.node.build_path}") - return lines + RuntimeException.process_stack(self) + return lines + DbtRuntimeError.process_stack(self) @property def type(self): return "Database" -class CompilationException(RuntimeException): +class CompilationError(DbtRuntimeError): CODE = 10004 MESSAGE = "Compilation Error" @@ -274,16 +209,16 @@ def _fix_dupe_msg(self, path_1: str, path_2: str, name: str, type_name: str) -> ) -class RecursionException(RuntimeException): +class RecursionError(DbtRuntimeError): pass -class ValidationException(RuntimeException): +class DbtValidationError(DbtRuntimeError): CODE = 10005 MESSAGE = "Validation Error" -class ParsingException(RuntimeException): +class ParsingError(DbtRuntimeError): CODE = 10015 MESSAGE = "Parsing Error" @@ -293,7 +228,7 @@ def type(self): # TODO: this isn't raised in the core codebase. Is it raised elsewhere? -class JSONValidationException(ValidationException): +class JSONValidationError(DbtValidationError): def __init__(self, typename, errors): self.typename = typename self.errors = errors @@ -303,11 +238,11 @@ def __init__(self, typename, errors): def __reduce__(self): # see https://stackoverflow.com/a/36342588 for why this is necessary - return (JSONValidationException, (self.typename, self.errors)) + return (JSONValidationError, (self.typename, self.errors)) -class IncompatibleSchemaException(RuntimeException): - def __init__(self, expected: str, found: Optional[str]): +class IncompatibleSchemaError(DbtRuntimeError): + def __init__(self, expected: str, found: Optional[str] = None): self.expected = expected self.found = found self.filename = "input file" @@ -334,11 +269,11 @@ def get_message(self) -> str: MESSAGE = "Incompatible Schema" -class JinjaRenderingException(CompilationException): +class JinjaRenderingError(CompilationError): pass -class UndefinedMacroException(CompilationException): +class UndefinedMacroError(CompilationError): def __str__(self, prefix: str = "! ") -> str: msg = super().__str__(prefix) return ( @@ -348,28 +283,16 @@ def __str__(self, prefix: str = "! ") -> str: ) -class UnknownAsyncIDException(Exception): - CODE = 10012 - MESSAGE = "RPC server got an unknown async ID" - - def __init__(self, task_id): - self.task_id = task_id - - def __str__(self): - return f"{self.MESSAGE}: {self.task_id}" - - -class AliasException(ValidationException): +class AliasError(DbtValidationError): pass -class DependencyException(Exception): - # this can happen due to raise_dependency_error and its callers +class DependencyError(Exception): CODE = 10006 MESSAGE = "Dependency Error" -class DbtConfigError(RuntimeException): +class DbtConfigError(DbtRuntimeError): CODE = 10007 MESSAGE = "DBT Configuration Error" @@ -387,7 +310,7 @@ def __str__(self, prefix="! ") -> str: return f"{msg}\n\nError encountered in {self.path}" -class FailFastException(RuntimeException): +class FailFastError(DbtRuntimeError): CODE = 10013 MESSAGE = "FailFast Error" @@ -412,7 +335,7 @@ class DbtProfileError(DbtConfigError): pass -class SemverException(Exception): +class SemverError(Exception): def __init__(self, msg: str = None): self.msg = msg if msg is not None: @@ -421,22 +344,22 @@ def __init__(self, msg: str = None): super().__init__() -class VersionsNotCompatibleException(SemverException): +class VersionsNotCompatibleError(SemverError): pass -class NotImplementedException(Exception): +class NotImplementedError(Exception): def __init__(self, msg: str): self.msg = msg self.formatted_msg = f"ERROR: {self.msg}" super().__init__(self.formatted_msg) -class FailedToConnectException(DatabaseException): +class FailedToConnectError(DbtDatabaseError): pass -class CommandError(RuntimeException): +class CommandError(DbtRuntimeError): def __init__(self, cwd: str, cmd: List[str], msg: str = "Error running command"): cmd_scrubbed = list(scrub_secrets(cmd_txt, env_secrets()) for cmd_txt in cmd) super().__init__(msg) @@ -483,7 +406,7 @@ def __str__(self): return f"{self.msg} running: {self.cmd}" -class InvalidConnectionException(RuntimeException): +class InvalidConnectionError(DbtRuntimeError): def __init__(self, thread_id, known: List): self.thread_id = thread_id self.known = known @@ -492,17 +415,17 @@ def __init__(self, thread_id, known: List): ) -class InvalidSelectorException(RuntimeException): +class InvalidSelectorError(DbtRuntimeError): def __init__(self, name: str): self.name = name super().__init__(name) -class DuplicateYamlKeyException(CompilationException): +class DuplicateYamlKeyError(CompilationError): pass -class ConnectionException(Exception): +class ConnectionError(Exception): """ There was a problem with the connection that returned a bad response, timed out, or resulted in a file that is corrupt. @@ -512,7 +435,7 @@ class ConnectionException(Exception): # event level exception -class EventCompilationException(CompilationException): +class EventCompilationError(CompilationError): def __init__(self, msg: str, node): self.msg = scrub_secrets(msg, env_secrets()) self.node = node @@ -520,7 +443,7 @@ def __init__(self, msg: str, node): # compilation level exceptions -class GraphDependencyNotFound(CompilationException): +class GraphDependencyNotFoundError(CompilationError): def __init__(self, node, dependency: str): self.node = node self.dependency = dependency @@ -534,21 +457,21 @@ def get_message(self) -> str: # client level exceptions -class NoSupportedLanguagesFound(CompilationException): +class NoSupportedLanguagesFoundError(CompilationError): def __init__(self, node): self.node = node self.msg = f"No supported_languages found in materialization macro {self.node.name}" super().__init__(msg=self.msg) -class MaterializtionMacroNotUsed(CompilationException): +class MaterializtionMacroNotUsedError(CompilationError): def __init__(self, node): self.node = node self.msg = "Only materialization macros can be used with this function" super().__init__(msg=self.msg) -class UndefinedCompilation(CompilationException): +class UndefinedCompilationError(CompilationError): def __init__(self, name: str, node): self.name = name self.node = node @@ -556,20 +479,20 @@ def __init__(self, name: str, node): super().__init__(msg=self.msg) -class CaughtMacroExceptionWithNode(CompilationException): +class CaughtMacroErrorWithNodeError(CompilationError): def __init__(self, exc, node): self.exc = exc self.node = node super().__init__(msg=str(exc)) -class CaughtMacroException(CompilationException): +class CaughtMacroError(CompilationError): def __init__(self, exc): self.exc = exc super().__init__(msg=str(exc)) -class MacroNameNotString(CompilationException): +class MacroNameNotStringError(CompilationError): def __init__(self, kwarg_value): self.kwarg_value = kwarg_value super().__init__(msg=self.get_message()) @@ -582,7 +505,7 @@ def get_message(self) -> str: return msg -class MissingControlFlowStartTag(CompilationException): +class MissingControlFlowStartTagError(CompilationError): def __init__(self, tag, expected_tag: str, tag_parser): self.tag = tag self.expected_tag = expected_tag @@ -598,7 +521,7 @@ def get_message(self) -> str: return msg -class UnexpectedControlFlowEndTag(CompilationException): +class UnexpectedControlFlowEndTagError(CompilationError): def __init__(self, tag, expected_tag: str, tag_parser): self.tag = tag self.expected_tag = expected_tag @@ -614,7 +537,7 @@ def get_message(self) -> str: return msg -class UnexpectedMacroEOF(CompilationException): +class UnexpectedMacroEOFError(CompilationError): def __init__(self, expected_name: str, actual_name: str): self.expected_name = expected_name self.actual_name = actual_name @@ -625,7 +548,7 @@ def get_message(self) -> str: return msg -class MacroNamespaceNotString(CompilationException): +class MacroNamespaceNotStringError(CompilationError): def __init__(self, kwarg_type: Any): self.kwarg_type = kwarg_type super().__init__(msg=self.get_message()) @@ -638,7 +561,7 @@ def get_message(self) -> str: return msg -class NestedTags(CompilationException): +class NestedTagsError(CompilationError): def __init__(self, outer, inner): self.outer = outer self.inner = inner @@ -653,7 +576,7 @@ def get_message(self) -> str: return msg -class BlockDefinitionNotAtTop(CompilationException): +class BlockDefinitionNotAtTopError(CompilationError): def __init__(self, tag_parser, tag_start): self.tag_parser = tag_parser self.tag_start = tag_start @@ -668,7 +591,7 @@ def get_message(self) -> str: return msg -class MissingCloseTag(CompilationException): +class MissingCloseTagError(CompilationError): def __init__(self, block_type_name: str, linecount: int): self.block_type_name = block_type_name self.linecount = linecount @@ -679,7 +602,7 @@ def get_message(self) -> str: return msg -class GitCloningProblem(RuntimeException): +class UnknownGitCloningProblemError(DbtRuntimeError): def __init__(self, repo: str): self.repo = scrub_secrets(repo, env_secrets()) super().__init__(msg=self.get_message()) @@ -692,7 +615,7 @@ def get_message(self) -> str: return msg -class BadSpecError(InternalException): +class BadSpecError(DbtInternalError): def __init__(self, repo, revision, error): self.repo = repo self.revision = revision @@ -704,7 +627,7 @@ def get_message(self) -> str: return msg -class GitCloningError(InternalException): +class GitCloningError(DbtInternalError): def __init__(self, repo: str, revision: str, error: CommandResultError): self.repo = repo self.revision = revision @@ -727,7 +650,7 @@ class GitCheckoutError(BadSpecError): pass -class InvalidMaterializationArg(CompilationException): +class MaterializationArgError(CompilationError): def __init__(self, name: str, argument: str): self.name = name self.argument = argument @@ -738,7 +661,7 @@ def get_message(self) -> str: return msg -class OperationException(CompilationException): +class OperationError(CompilationError): def __init__(self, operation_name): self.operation_name = operation_name super().__init__(msg=self.get_message()) @@ -753,7 +676,7 @@ def get_message(self) -> str: return msg -class SymbolicLinkError(CompilationException): +class SymbolicLinkError(CompilationError): def __init__(self): super().__init__(msg=self.get_message()) @@ -768,23 +691,21 @@ def get_message(self) -> str: # context level exceptions - - -class ZipStrictWrongType(CompilationException): +class ZipStrictWrongTypeError(CompilationError): def __init__(self, exc): self.exc = exc msg = str(self.exc) super().__init__(msg=msg) -class SetStrictWrongType(CompilationException): +class SetStrictWrongTypeError(CompilationError): def __init__(self, exc): self.exc = exc msg = str(self.exc) super().__init__(msg=msg) -class LoadAgateTableValueError(CompilationException): +class LoadAgateTableValueError(CompilationError): def __init__(self, exc: ValueError, node): self.exc = exc self.node = node @@ -792,7 +713,7 @@ def __init__(self, exc: ValueError, node): super().__init__(msg=msg) -class LoadAgateTableNotSeed(CompilationException): +class LoadAgateTableNotSeedError(CompilationError): def __init__(self, resource_type, node): self.resource_type = resource_type self.node = node @@ -800,14 +721,14 @@ def __init__(self, resource_type, node): super().__init__(msg=msg) -class MacrosSourcesUnWriteable(CompilationException): +class MacrosSourcesUnWriteableError(CompilationError): def __init__(self, node): self.node = node msg = 'cannot "write" macros or sources' super().__init__(msg=msg) -class PackageNotInDeps(CompilationException): +class PackageNotInDepsError(CompilationError): def __init__(self, package_name: str, node): self.package_name = package_name self.node = node @@ -815,7 +736,7 @@ def __init__(self, package_name: str, node): super().__init__(msg=msg) -class OperationsCannotRefEphemeralNodes(CompilationException): +class OperationsCannotRefEphemeralNodesError(CompilationError): def __init__(self, target_name: str, node): self.target_name = target_name self.node = node @@ -823,7 +744,7 @@ def __init__(self, target_name: str, node): super().__init__(msg=msg) -class InvalidPersistDocsValueType(CompilationException): +class PersistDocsValueTypeError(CompilationError): def __init__(self, persist_docs: Any): self.persist_docs = persist_docs msg = ( @@ -833,14 +754,14 @@ def __init__(self, persist_docs: Any): super().__init__(msg=msg) -class InvalidInlineModelConfig(CompilationException): +class InlineModelConfigError(CompilationError): def __init__(self, node): self.node = node msg = "Invalid inline model config" super().__init__(msg=msg) -class ConflictingConfigKeys(CompilationException): +class ConflictingConfigKeysError(CompilationError): def __init__(self, oldkey: str, newkey: str, node): self.oldkey = oldkey self.newkey = newkey @@ -849,7 +770,7 @@ def __init__(self, oldkey: str, newkey: str, node): super().__init__(msg=msg) -class InvalidNumberSourceArgs(CompilationException): +class NumberSourceArgsError(CompilationError): def __init__(self, args, node): self.args = args self.node = node @@ -857,7 +778,7 @@ def __init__(self, args, node): super().__init__(msg=msg) -class RequiredVarNotFound(CompilationException): +class RequiredVarNotFoundError(CompilationError): def __init__(self, var_name: str, merged: Dict, node): self.var_name = var_name self.merged = merged @@ -877,14 +798,14 @@ def get_message(self) -> str: return msg -class PackageNotFoundForMacro(CompilationException): +class PackageNotFoundForMacroError(CompilationError): def __init__(self, package_name: str): self.package_name = package_name msg = f"Could not find package '{self.package_name}'" super().__init__(msg=msg) -class DisallowSecretEnvVar(ParsingException): +class SecretEnvVarLocationError(ParsingError): def __init__(self, env_var_name: str): self.env_var_name = env_var_name super().__init__(msg=self.get_message()) @@ -897,7 +818,7 @@ def get_message(self) -> str: return msg -class InvalidMacroArgType(CompilationException): +class MacroArgTypeError(CompilationError): def __init__(self, method_name: str, arg_name: str, got_value: Any, expected_type): self.method_name = method_name self.arg_name = arg_name @@ -915,7 +836,7 @@ def get_message(self) -> str: return msg -class InvalidBoolean(CompilationException): +class BooleanError(CompilationError): def __init__(self, return_value: Any, macro_name: str): self.return_value = return_value self.macro_name = macro_name @@ -929,7 +850,7 @@ def get_message(self) -> str: return msg -class RefInvalidArgs(CompilationException): +class RefArgsError(CompilationError): def __init__(self, node, args): self.node = node self.args = args @@ -940,7 +861,7 @@ def get_message(self) -> str: return msg -class MetricInvalidArgs(CompilationException): +class MetricArgsError(CompilationError): def __init__(self, node, args): self.node = node self.args = args @@ -951,7 +872,7 @@ def get_message(self) -> str: return msg -class RefBadContext(CompilationException): +class RefBadContextError(CompilationError): def __init__(self, node, args): self.node = node self.args = args @@ -980,7 +901,7 @@ def get_message(self) -> str: return msg -class InvalidDocArgs(CompilationException): +class DocArgsError(CompilationError): def __init__(self, node, args): self.node = node self.args = args @@ -991,8 +912,8 @@ def get_message(self) -> str: return msg -class DocTargetNotFound(CompilationException): - def __init__(self, node, target_doc_name: str, target_doc_package: Optional[str]): +class DocTargetNotFoundError(CompilationError): + def __init__(self, node, target_doc_name: str, target_doc_package: Optional[str] = None): self.node = node self.target_doc_name = target_doc_name self.target_doc_package = target_doc_package @@ -1006,7 +927,7 @@ def get_message(self) -> str: return msg -class MacroInvalidDispatchArg(CompilationException): +class MacroDispatchArgError(CompilationError): def __init__(self, macro_name: str): self.macro_name = macro_name super().__init__(msg=self.get_message()) @@ -1025,7 +946,7 @@ def get_message(self) -> str: return msg -class DuplicateMacroName(CompilationException): +class DuplicateMacroNameError(CompilationError): def __init__(self, node_1, node_2, namespace: str): self.node_1 = node_1 self.node_2 = node_2 @@ -1051,7 +972,7 @@ def get_message(self) -> str: # parser level exceptions -class InvalidDictParse(ParsingException): +class DictParseError(ParsingError): def __init__(self, exc: ValidationError, node): self.exc = exc self.node = node @@ -1059,7 +980,7 @@ def __init__(self, exc: ValidationError, node): super().__init__(msg=msg) -class InvalidConfigUpdate(ParsingException): +class ConfigUpdateError(ParsingError): def __init__(self, exc: ValidationError, node): self.exc = exc self.node = node @@ -1067,7 +988,7 @@ def __init__(self, exc: ValidationError, node): super().__init__(msg=msg) -class PythonParsingException(ParsingException): +class PythonParsingError(ParsingError): def __init__(self, exc: SyntaxError, node): self.exc = exc self.node = node @@ -1079,7 +1000,7 @@ def get_message(self) -> str: return msg -class PythonLiteralEval(ParsingException): +class PythonLiteralEvalError(ParsingError): def __init__(self, exc: Exception, node): self.exc = exc self.node = node @@ -1095,14 +1016,14 @@ def get_message(self) -> str: return msg -class InvalidModelConfig(ParsingException): +class ModelConfigError(ParsingError): def __init__(self, exc: ValidationError, node): self.msg = self.validator_error_message(exc) self.node = node super().__init__(msg=self.msg) -class YamlParseListFailure(ParsingException): +class YamlParseListError(ParsingError): def __init__( self, path: str, @@ -1127,7 +1048,7 @@ def get_message(self) -> str: return msg -class YamlParseDictFailure(ParsingException): +class YamlParseDictError(ParsingError): def __init__( self, path: str, @@ -1152,8 +1073,13 @@ def get_message(self) -> str: return msg -class YamlLoadFailure(ParsingException): - def __init__(self, project_name: Optional[str], path: str, exc: ValidationException): +class YamlLoadError(ParsingError): + def __init__( + self, + path: str, + exc: DbtValidationError, + project_name: Optional[str] = None, + ): self.project_name = project_name self.path = path self.exc = exc @@ -1167,49 +1093,54 @@ def get_message(self) -> str: return msg -class InvalidTestConfig(ParsingException): +class TestConfigError(ParsingError): def __init__(self, exc: ValidationError, node): self.msg = self.validator_error_message(exc) self.node = node super().__init__(msg=self.msg) -class InvalidSchemaConfig(ParsingException): +class SchemaConfigError(ParsingError): def __init__(self, exc: ValidationError, node): self.msg = self.validator_error_message(exc) self.node = node super().__init__(msg=self.msg) -class InvalidSnapshopConfig(ParsingException): +class SnapshopConfigError(ParsingError): def __init__(self, exc: ValidationError, node): self.msg = self.validator_error_message(exc) self.node = node super().__init__(msg=self.msg) -class SameKeyNested(CompilationException): +class SameKeyNestedError(CompilationError): def __init__(self): msg = "Test cannot have the same key at the top-level and in config" super().__init__(msg=msg) -class TestArgIncludesModel(CompilationException): +class TestArgIncludesModelError(CompilationError): def __init__(self): msg = 'Test arguments include "model", which is a reserved argument' super().__init__(msg=msg) -class UnexpectedTestNamePattern(CompilationException): +class UnexpectedTestNamePatternError(CompilationError): def __init__(self, test_name: str): self.test_name = test_name msg = f"Test name string did not match expected pattern: {self.test_name}" super().__init__(msg=msg) -class CustomMacroPopulatingConfigValues(CompilationException): +class CustomMacroPopulatingConfigValueError(CompilationError): def __init__( - self, target_name: str, column_name: Optional[str], name: str, key: str, err_msg: str + self, + target_name: str, + name: str, + key: str, + err_msg: str, + column_name: Optional[str] = None, ): self.target_name = target_name self.column_name = column_name @@ -1239,21 +1170,21 @@ def get_message(self) -> str: return msg -class TagsNotListOfStrings(CompilationException): +class TagsNotListOfStringsError(CompilationError): def __init__(self, tags: Any): self.tags = tags msg = f"got {self.tags} ({type(self.tags)}) for tags, expected a list of strings" super().__init__(msg=msg) -class TagNotString(CompilationException): +class TagNotStringError(CompilationError): def __init__(self, tag: Any): self.tag = tag msg = f"got {self.tag} ({type(self.tag)}) for tag, expected a str" super().__init__(msg=msg) -class TestNameNotString(ParsingException): +class TestNameNotStringError(ParsingError): def __init__(self, test_name: Any): self.test_name = test_name super().__init__(msg=self.get_message()) @@ -1264,7 +1195,7 @@ def get_message(self) -> str: return msg -class TestArgsNotDict(ParsingException): +class TestArgsNotDictError(ParsingError): def __init__(self, test_args: Any): self.test_args = test_args super().__init__(msg=self.get_message()) @@ -1275,7 +1206,7 @@ def get_message(self) -> str: return msg -class TestDefinitionDictLength(ParsingException): +class TestDefinitionDictLengthError(ParsingError): def __init__(self, test): self.test = test super().__init__(msg=self.get_message()) @@ -1289,7 +1220,7 @@ def get_message(self) -> str: return msg -class TestInvalidType(ParsingException): +class TestTypeError(ParsingError): def __init__(self, test: Any): self.test = test super().__init__(msg=self.get_message()) @@ -1300,7 +1231,7 @@ def get_message(self) -> str: # This is triggered across multiple files -class EnvVarMissing(ParsingException): +class EnvVarMissingError(ParsingError): def __init__(self, var: str): self.var = var super().__init__(msg=self.get_message()) @@ -1310,7 +1241,7 @@ def get_message(self) -> str: return msg -class TargetNotFound(CompilationException): +class TargetNotFoundError(CompilationError): def __init__( self, node, @@ -1349,7 +1280,7 @@ def get_message(self) -> str: return msg -class DuplicateSourcePatchName(CompilationException): +class DuplicateSourcePatchNameError(CompilationError): def __init__(self, patch_1, patch_2): self.patch_1 = patch_1 self.patch_2 = patch_2 @@ -1371,7 +1302,7 @@ def get_message(self) -> str: return msg -class DuplicateMacroPatchName(CompilationException): +class DuplicateMacroPatchNameError(CompilationError): def __init__(self, patch_1, existing_patch_path): self.patch_1 = patch_1 self.existing_patch_path = existing_patch_path @@ -1392,7 +1323,7 @@ def get_message(self) -> str: # core level exceptions -class DuplicateAlias(AliasException): +class DuplicateAliasError(AliasError): def __init__(self, kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str): self.kwargs = kwargs self.aliases = aliases @@ -1409,9 +1340,7 @@ def get_message(self) -> str: # Postgres Exceptions - - -class UnexpectedDbReference(NotImplementedException): +class UnexpectedDbReferenceError(NotImplementedError): def __init__(self, adapter, database, expected): self.adapter = adapter self.database = database @@ -1423,7 +1352,7 @@ def get_message(self) -> str: return msg -class CrossDbReferenceProhibited(CompilationException): +class CrossDbReferenceProhibitedError(CompilationError): def __init__(self, adapter, exc_msg: str): self.adapter = adapter self.exc_msg = exc_msg @@ -1434,7 +1363,7 @@ def get_message(self) -> str: return msg -class IndexConfigNotDict(CompilationException): +class IndexConfigNotDictError(CompilationError): def __init__(self, raw_index: Any): self.raw_index = raw_index super().__init__(msg=self.get_message()) @@ -1448,7 +1377,7 @@ def get_message(self) -> str: return msg -class InvalidIndexConfig(CompilationException): +class IndexConfigError(CompilationError): def __init__(self, exc: TypeError): self.exc = exc super().__init__(msg=self.get_message()) @@ -1460,7 +1389,7 @@ def get_message(self) -> str: # adapters exceptions -class InvalidMacroResult(CompilationException): +class MacroResultError(CompilationError): def __init__(self, freshness_macro_name: str, table): self.freshness_macro_name = freshness_macro_name self.table = table @@ -1472,7 +1401,7 @@ def get_message(self) -> str: return msg -class SnapshotTargetNotSnapshotTable(CompilationException): +class SnapshotTargetNotSnapshotTableError(CompilationError): def __init__(self, missing: List): self.missing = missing super().__init__(msg=self.get_message()) @@ -1484,7 +1413,7 @@ def get_message(self) -> str: return msg -class SnapshotTargetIncomplete(CompilationException): +class SnapshotTargetIncompleteError(CompilationError): def __init__(self, extra: List, missing: List): self.extra = extra self.missing = missing @@ -1500,7 +1429,7 @@ def get_message(self) -> str: return msg -class RenameToNoneAttempted(CompilationException): +class RenameToNoneAttemptedError(CompilationError): def __init__(self, src_name: str, dst_name: str, name: str): self.src_name = src_name self.dst_name = dst_name @@ -1509,21 +1438,21 @@ def __init__(self, src_name: str, dst_name: str, name: str): super().__init__(msg=self.msg) -class NullRelationDropAttempted(CompilationException): +class NullRelationDropAttemptedError(CompilationError): def __init__(self, name: str): self.name = name self.msg = f"Attempted to drop a null relation for {self.name}" super().__init__(msg=self.msg) -class NullRelationCacheAttempted(CompilationException): +class NullRelationCacheAttemptedError(CompilationError): def __init__(self, name: str): self.name = name self.msg = f"Attempted to cache a null relation for {self.name}" super().__init__(msg=self.msg) -class InvalidQuoteConfigType(CompilationException): +class QuoteConfigTypeError(CompilationError): def __init__(self, quote_config: Any): self.quote_config = quote_config super().__init__(msg=self.get_message()) @@ -1536,7 +1465,7 @@ def get_message(self) -> str: return msg -class MultipleDatabasesNotAllowed(CompilationException): +class MultipleDatabasesNotAllowedError(CompilationError): def __init__(self, databases): self.databases = databases super().__init__(msg=self.get_message()) @@ -1546,14 +1475,14 @@ def get_message(self) -> str: return msg -class RelationTypeNull(CompilationException): +class RelationTypeNullError(CompilationError): def __init__(self, relation): self.relation = relation self.msg = f"Tried to drop relation {self.relation}, but its type is null." super().__init__(msg=self.msg) -class MaterializationNotAvailable(CompilationException): +class MaterializationNotAvailableError(CompilationError): def __init__(self, materialization, adapter_type: str): self.materialization = materialization self.adapter_type = adapter_type @@ -1564,7 +1493,7 @@ def get_message(self) -> str: return msg -class RelationReturnedMultipleResults(CompilationException): +class RelationReturnedMultipleResultsError(CompilationError): def __init__(self, kwargs: Mapping[str, Any], matches: List): self.kwargs = kwargs self.matches = matches @@ -1579,7 +1508,7 @@ def get_message(self) -> str: return msg -class ApproximateMatch(CompilationException): +class ApproximateMatchError(CompilationError): def __init__(self, target, relation): self.target = target self.relation = relation @@ -1597,8 +1526,7 @@ def get_message(self) -> str: return msg -# adapters exceptions -class UnexpectedNull(DatabaseException): +class UnexpectedNullError(DbtDatabaseError): def __init__(self, field_name: str, source): self.field_name = field_name self.source = source @@ -1609,7 +1537,7 @@ def __init__(self, field_name: str, source): super().__init__(msg) -class UnexpectedNonTimestamp(DatabaseException): +class UnexpectedNonTimestampError(DbtDatabaseError): def __init__(self, field_name: str, source, dt: Any): self.field_name = field_name self.source = source @@ -1622,7 +1550,7 @@ def __init__(self, field_name: str, source, dt: Any): # deps exceptions -class MultipleVersionGitDeps(DependencyException): +class MultipleVersionGitDepsError(DependencyError): def __init__(self, git: str, requested): self.git = git self.requested = requested @@ -1633,7 +1561,7 @@ def __init__(self, git: str, requested): super().__init__(msg) -class DuplicateProjectDependency(DependencyException): +class DuplicateProjectDependencyError(DependencyError): def __init__(self, project_name: str): self.project_name = project_name msg = ( @@ -1643,7 +1571,7 @@ def __init__(self, project_name: str): super().__init__(msg) -class DuplicateDependencyToRoot(DependencyException): +class DuplicateDependencyToRootError(DependencyError): def __init__(self, project_name: str): self.project_name = project_name msg = ( @@ -1654,7 +1582,7 @@ def __init__(self, project_name: str): super().__init__(msg) -class MismatchedDependencyTypes(DependencyException): +class MismatchedDependencyTypeError(DependencyError): def __init__(self, new, old): self.new = new self.old = old @@ -1665,7 +1593,7 @@ def __init__(self, new, old): super().__init__(msg) -class PackageVersionNotFound(DependencyException): +class PackageVersionNotFoundError(DependencyError): def __init__( self, package_name: str, @@ -1701,7 +1629,7 @@ def get_message(self) -> str: return msg -class PackageNotFound(DependencyException): +class PackageNotFoundError(DependencyError): def __init__(self, package_name: str): self.package_name = package_name msg = f"Package {self.package_name} was not found in the package index" @@ -1709,37 +1637,35 @@ def __init__(self, package_name: str): # config level exceptions - - -class ProfileConfigInvalid(DbtProfileError): +class ProfileConfigError(DbtProfileError): def __init__(self, exc: ValidationError): self.exc = exc msg = self.validator_error_message(self.exc) super().__init__(msg=msg) -class ProjectContractInvalid(DbtProjectError): +class ProjectContractError(DbtProjectError): def __init__(self, exc: ValidationError): self.exc = exc msg = self.validator_error_message(self.exc) super().__init__(msg=msg) -class ProjectContractBroken(DbtProjectError): +class ProjectContractBrokenError(DbtProjectError): def __init__(self, exc: ValidationError): self.exc = exc msg = self.validator_error_message(self.exc) super().__init__(msg=msg) -class ConfigContractBroken(DbtProjectError): +class ConfigContractBrokenError(DbtProjectError): def __init__(self, exc: ValidationError): self.exc = exc msg = self.validator_error_message(self.exc) super().__init__(msg=msg) -class NonUniquePackageName(CompilationException): +class NonUniquePackageNameError(CompilationError): def __init__(self, project_name: str): self.project_name = project_name super().__init__(msg=self.get_message()) @@ -1754,7 +1680,7 @@ def get_message(self) -> str: return msg -class UninstalledPackagesFound(CompilationException): +class UninstalledPackagesFoundError(CompilationError): def __init__( self, count_packages_specified: int, @@ -1777,7 +1703,7 @@ def get_message(self) -> str: return msg -class VarsArgNotYamlDict(CompilationException): +class VarsArgNotYamlDictError(CompilationError): def __init__(self, var_type): self.var_type = var_type super().__init__(msg=self.get_message()) @@ -1790,7 +1716,7 @@ def get_message(self) -> str: # contracts level -class UnrecognizedCredentialType(CompilationException): +class UnrecognizedCredentialTypeError(CompilationError): def __init__(self, typename: str, supported_types: List): self.typename = typename self.supported_types = supported_types @@ -1803,7 +1729,7 @@ def get_message(self) -> str: return msg -class DuplicateMacroInPackage(CompilationException): +class DuplicateMacroInPackageError(CompilationError): def __init__(self, macro, macro_mapping: Mapping): self.macro = macro self.macro_mapping = macro_mapping @@ -1832,7 +1758,7 @@ def get_message(self) -> str: return msg -class DuplicateMaterializationName(CompilationException): +class DuplicateMaterializationNameError(CompilationError): def __init__(self, macro, other_macro): self.macro = macro self.other_macro = other_macro @@ -1852,7 +1778,7 @@ def get_message(self) -> str: # jinja exceptions -class PatchTargetNotFound(CompilationException): +class PatchTargetNotFoundError(CompilationError): def __init__(self, patches: Dict): self.patches = patches super().__init__(msg=self.get_message()) @@ -1866,7 +1792,7 @@ def get_message(self) -> str: return msg -class MacroNotFound(CompilationException): +class MacroNotFoundError(CompilationError): def __init__(self, node, target_macro_id: str): self.node = node self.target_macro_id = target_macro_id @@ -1875,7 +1801,7 @@ def __init__(self, node, target_macro_id: str): super().__init__(msg=msg) -class MissingConfig(CompilationException): +class MissingConfigError(CompilationError): def __init__(self, unique_id: str, name: str): self.unique_id = unique_id self.name = name @@ -1885,7 +1811,7 @@ def __init__(self, unique_id: str, name: str): super().__init__(msg=msg) -class MissingMaterialization(CompilationException): +class MissingMaterializationError(CompilationError): def __init__(self, materialization, adapter_type): self.materialization = materialization self.adapter_type = adapter_type @@ -1902,7 +1828,7 @@ def get_message(self) -> str: return msg -class MissingRelation(CompilationException): +class MissingRelationError(CompilationError): def __init__(self, relation, model=None): self.relation = relation self.model = model @@ -1910,7 +1836,7 @@ def __init__(self, relation, model=None): super().__init__(msg=msg) -class AmbiguousAlias(CompilationException): +class AmbiguousAliasError(CompilationError): def __init__(self, node_1, node_2, duped_name=None): self.node_1 = node_1 self.node_2 = node_2 @@ -1931,7 +1857,7 @@ def get_message(self) -> str: return msg -class AmbiguousCatalogMatch(CompilationException): +class AmbiguousCatalogMatchError(CompilationError): def __init__(self, unique_id: str, match_1, match_2): self.unique_id = unique_id self.match_1 = match_1 @@ -1955,14 +1881,14 @@ def get_message(self) -> str: return msg -class CacheInconsistency(InternalException): +class CacheInconsistencyError(DbtInternalError): def __init__(self, msg: str): self.msg = msg formatted_msg = f"Cache inconsistency detected: {self.msg}" super().__init__(msg=formatted_msg) -class NewNameAlreadyInCache(CacheInconsistency): +class NewNameAlreadyInCacheError(CacheInconsistencyError): def __init__(self, old_key: str, new_key: str): self.old_key = old_key self.new_key = new_key @@ -1972,21 +1898,21 @@ def __init__(self, old_key: str, new_key: str): super().__init__(msg) -class ReferencedLinkNotCached(CacheInconsistency): +class ReferencedLinkNotCachedError(CacheInconsistencyError): def __init__(self, referenced_key: str): self.referenced_key = referenced_key msg = f"in add_link, referenced link key {self.referenced_key} not in cache!" super().__init__(msg) -class DependentLinkNotCached(CacheInconsistency): +class DependentLinkNotCachedError(CacheInconsistencyError): def __init__(self, dependent_key: str): self.dependent_key = dependent_key msg = f"in add_link, dependent link key {self.dependent_key} not in cache!" super().__init__(msg) -class TruncatedModelNameCausedCollision(CacheInconsistency): +class TruncatedModelNameCausedCollisionError(CacheInconsistencyError): def __init__(self, new_key, relations: Dict): self.new_key = new_key self.relations = relations @@ -2013,14 +1939,14 @@ def get_message(self) -> str: return msg -class NoneRelationFound(CacheInconsistency): +class NoneRelationFoundError(CacheInconsistencyError): def __init__(self): msg = "in get_relations, a None relation was found in the cache!" super().__init__(msg) # this is part of the context and also raised in dbt.contracts.relation.py -class DataclassNotDict(CompilationException): +class DataclassNotDictError(CompilationError): def __init__(self, obj: Any): self.obj = obj super().__init__(msg=self.get_message()) @@ -2034,7 +1960,7 @@ def get_message(self) -> str: return msg -class DependencyNotFound(CompilationException): +class DependencyNotFoundError(CompilationError): def __init__(self, node, node_description, required_pkg): self.node = node self.node_description = node_description @@ -2051,7 +1977,7 @@ def get_message(self) -> str: return msg -class DuplicatePatchPath(CompilationException): +class DuplicatePatchPathError(CompilationError): def __init__(self, patch_1, existing_patch_path): self.patch_1 = patch_1 self.existing_patch_path = existing_patch_path @@ -2073,8 +1999,8 @@ def get_message(self) -> str: return msg -# should this inherit ParsingException instead? -class DuplicateResourceName(CompilationException): +# should this inherit ParsingError instead? +class DuplicateResourceNameError(CompilationError): def __init__(self, node_1, node_2): self.node_1 = node_1 self.node_2 = node_2 @@ -2126,7 +2052,7 @@ def get_message(self) -> str: return msg -class InvalidPropertyYML(CompilationException): +class PropertyYMLError(CompilationError): def __init__(self, path: str, issue: str): self.path = path self.issue = issue @@ -2141,14 +2067,14 @@ def get_message(self) -> str: return msg -class PropertyYMLMissingVersion(InvalidPropertyYML): +class PropertyYMLMissingVersionError(PropertyYMLError): def __init__(self, path: str): self.path = path self.issue = f"the yml property file {self.path} is missing a version tag" super().__init__(self.path, self.issue) -class PropertyYMLVersionNotInt(InvalidPropertyYML): +class PropertyYMLVersionNotIntError(PropertyYMLError): def __init__(self, path: str, version: Any): self.path = path self.version = version @@ -2159,7 +2085,7 @@ def __init__(self, path: str, version: Any): super().__init__(self.path, self.issue) -class PropertyYMLInvalidTag(InvalidPropertyYML): +class PropertyYMLInvalidTagError(PropertyYMLError): def __init__(self, path: str, version: int): self.path = path self.version = version @@ -2167,7 +2093,7 @@ def __init__(self, path: str, version: int): super().__init__(self.path, self.issue) -class RelationWrongType(CompilationException): +class RelationWrongTypeError(CompilationError): def __init__(self, relation, expected_type, model=None): self.relation = relation self.expected_type = expected_type @@ -2185,6 +2111,83 @@ def get_message(self) -> str: return msg +# not modifying these since rpc should be deprecated soon +class UnknownAsyncIDException(Exception): + CODE = 10012 + MESSAGE = "RPC server got an unknown async ID" + + def __init__(self, task_id): + self.task_id = task_id + + def __str__(self): + return f"{self.MESSAGE}: {self.task_id}" + + +class RPCFailureResult(DbtRuntimeError): + CODE = 10002 + MESSAGE = "RPC execution error" + + +class RPCTimeoutException(DbtRuntimeError): + CODE = 10008 + MESSAGE = "RPC timeout error" + + def __init__(self, timeout: Optional[float] = None): + super().__init__(self.MESSAGE) + self.timeout = timeout + + def data(self): + result = super().data() + result.update( + { + "timeout": self.timeout, + "message": f"RPC timed out after {self.timeout}s", + } + ) + return result + + +class RPCKilledException(DbtRuntimeError): + CODE = 10009 + MESSAGE = "RPC process killed" + + def __init__(self, signum: int): + self.signum = signum + self.msg = f"RPC process killed by signal {self.signum}" + super().__init__(self.msg) + + def data(self): + return { + "signum": self.signum, + "message": self.msg, + } + + +class RPCCompiling(DbtRuntimeError): + CODE = 10010 + MESSAGE = 'RPC server is compiling the project, call the "status" method for' " compile status" + + def __init__(self, msg: str = None, node=None): + if msg is None: + msg = "compile in progress" + super().__init__(msg, node) + + +class RPCLoadException(DbtRuntimeError): + CODE = 10011 + MESSAGE = ( + 'RPC server failed to compile project, call the "status" method for' " compile status" + ) + + def __init__(self, cause: Dict[str, Any]): + self.cause = cause + self.msg = f'{self.MESSAGE}: {self.cause["message"]}' + super().__init__(self.msg) + + def data(self): + return {"cause": self.cause, "message": self.msg} + + # These are copies of what's in dbt/context/exceptions_jinja.py to not immediately break adapters # utilizing these functions as exceptions. These are direct copies to avoid circular imports. # They will be removed in 1 (or 2?) versions. Issue to be created to ensure it happens. @@ -2207,147 +2210,147 @@ def warn(msg, node=None): @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="MissingConfig"), + suggested_action=SUGGESTED_ACTION.format(exception="MissingConfigError"), reason=REASON, ) def missing_config(model, name) -> NoReturn: - raise MissingConfig(unique_id=model.unique_id, name=name) + raise MissingConfigError(unique_id=model.unique_id, name=name) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="MissingMaterialization"), + suggested_action=SUGGESTED_ACTION.format(exception="MissingMaterializationError"), reason=REASON, ) def missing_materialization(model, adapter_type) -> NoReturn: materialization = model.config.materialized - raise MissingMaterialization(materialization=materialization, adapter_type=adapter_type) + raise MissingMaterializationError(materialization=materialization, adapter_type=adapter_type) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="MissingRelation"), + suggested_action=SUGGESTED_ACTION.format(exception="MissingRelationError"), reason=REASON, ) def missing_relation(relation, model=None) -> NoReturn: - raise MissingRelation(relation, model) + raise MissingRelationError(relation, model) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousAlias"), + suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousAliasError"), reason=REASON, ) def raise_ambiguous_alias(node_1, node_2, duped_name=None) -> NoReturn: - raise AmbiguousAlias(node_1, node_2, duped_name) + raise AmbiguousAliasError(node_1, node_2, duped_name) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousCatalogMatch"), + suggested_action=SUGGESTED_ACTION.format(exception="AmbiguousCatalogMatchError"), reason=REASON, ) def raise_ambiguous_catalog_match(unique_id, match_1, match_2) -> NoReturn: - raise AmbiguousCatalogMatch(unique_id, match_1, match_2) + raise AmbiguousCatalogMatchError(unique_id, match_1, match_2) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="CacheInconsistency"), + suggested_action=SUGGESTED_ACTION.format(exception="CacheInconsistencyError"), reason=REASON, ) def raise_cache_inconsistent(message) -> NoReturn: - raise CacheInconsistency(message) + raise CacheInconsistencyError(message) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DataclassNotDict"), + suggested_action=SUGGESTED_ACTION.format(exception="DataclassNotDictError"), reason=REASON, ) def raise_dataclass_not_dict(obj) -> NoReturn: - raise DataclassNotDict(obj) + raise DataclassNotDictError(obj) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="CompilationException"), + suggested_action=SUGGESTED_ACTION.format(exception="CompilationError"), reason=REASON, ) def raise_compiler_error(msg, node=None) -> NoReturn: - raise CompilationException(msg, node) + raise CompilationError(msg, node) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DatabaseException"), + suggested_action=SUGGESTED_ACTION.format(exception="DbtDatabaseError"), reason=REASON, ) def raise_database_error(msg, node=None) -> NoReturn: - raise DatabaseException(msg, node) + raise DbtDatabaseError(msg, node) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DependencyNotFound"), + suggested_action=SUGGESTED_ACTION.format(exception="DependencyNotFoundError"), reason=REASON, ) def raise_dep_not_found(node, node_description, required_pkg) -> NoReturn: - raise DependencyNotFound(node, node_description, required_pkg) + raise DependencyNotFoundError(node, node_description, required_pkg) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DependencyException"), + suggested_action=SUGGESTED_ACTION.format(exception="DependencyError"), reason=REASON, ) def raise_dependency_error(msg) -> NoReturn: - raise DependencyException(scrub_secrets(msg, env_secrets())) + raise DependencyError(scrub_secrets(msg, env_secrets())) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DuplicatePatchPath"), + suggested_action=SUGGESTED_ACTION.format(exception="DuplicatePatchPathError"), reason=REASON, ) def raise_duplicate_patch_name(patch_1, existing_patch_path) -> NoReturn: - raise DuplicatePatchPath(patch_1, existing_patch_path) + raise DuplicatePatchPathError(patch_1, existing_patch_path) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DuplicateResourceName"), + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateResourceNameError"), reason=REASON, ) def raise_duplicate_resource_name(node_1, node_2) -> NoReturn: - raise DuplicateResourceName(node_1, node_2) + raise DuplicateResourceNameError(node_1, node_2) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="InvalidPropertyYML"), + suggested_action=SUGGESTED_ACTION.format(exception="PropertyYMLError"), reason=REASON, ) def raise_invalid_property_yml_version(path, issue) -> NoReturn: - raise InvalidPropertyYML(path, issue) + raise PropertyYMLError(path, issue) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="NotImplementedException"), + suggested_action=SUGGESTED_ACTION.format(exception="NotImplementedError"), reason=REASON, ) def raise_not_implemented(msg) -> NoReturn: - raise NotImplementedException(msg) + raise NotImplementedError(msg) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="RelationWrongType"), + suggested_action=SUGGESTED_ACTION.format(exception="RelationWrongTypeError"), reason=REASON, ) def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: - raise RelationWrongType(relation, expected_type, model) + raise RelationWrongTypeError(relation, expected_type, model) # these were implemented in core so deprecating here by calling the new exception directly @@ -2355,81 +2358,81 @@ def relation_wrong_type(relation, expected_type, model=None) -> NoReturn: @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DuplicateAlias"), + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateAliasError"), reason=REASON, ) def raise_duplicate_alias( kwargs: Mapping[str, Any], aliases: Mapping[str, str], canonical_key: str ) -> NoReturn: - raise DuplicateAlias(kwargs, aliases, canonical_key) + raise DuplicateAliasError(kwargs, aliases, canonical_key) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DuplicateSourcePatchName"), + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateSourcePatchNameError"), reason=REASON, ) def raise_duplicate_source_patch_name(patch_1, patch_2): - raise DuplicateSourcePatchName(patch_1, patch_2) + raise DuplicateSourcePatchNameError(patch_1, patch_2) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroPatchName"), + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroPatchNameError"), reason=REASON, ) def raise_duplicate_macro_patch_name(patch_1, existing_patch_path): - raise DuplicateMacroPatchName(patch_1, existing_patch_path) + raise DuplicateMacroPatchNameError(patch_1, existing_patch_path) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroName"), + suggested_action=SUGGESTED_ACTION.format(exception="DuplicateMacroNameError"), reason=REASON, ) def raise_duplicate_macro_name(node_1, node_2, namespace) -> NoReturn: - raise DuplicateMacroName(node_1, node_2, namespace) + raise DuplicateMacroNameError(node_1, node_2, namespace) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="ApproximateMatch"), + suggested_action=SUGGESTED_ACTION.format(exception="ApproximateMatchError"), reason=REASON, ) def approximate_relation_match(target, relation): - raise ApproximateMatch(target, relation) + raise ApproximateMatchError(target, relation) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResults"), + suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResultsError"), reason=REASON, ) def get_relation_returned_multiple_results(kwargs, matches): - raise RelationReturnedMultipleResults(kwargs, matches) + raise RelationReturnedMultipleResultsError(kwargs, matches) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="OperationException"), + suggested_action=SUGGESTED_ACTION.format(exception="OperationError"), reason=REASON, ) def system_error(operation_name): - raise OperationException(operation_name) + raise OperationError(operation_name) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="InvalidMaterializationArg"), + suggested_action=SUGGESTED_ACTION.format(exception="InvalidMaterializationArgError"), reason=REASON, ) def invalid_materialization_argument(name, argument): - raise InvalidMaterializationArg(name, argument) + raise MaterializationArgError(name, argument) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="BadSpecException"), + suggested_action=SUGGESTED_ACTION.format(exception="BadSpecError"), reason=REASON, ) def bad_package_spec(repo, spec, error_message): @@ -2447,34 +2450,34 @@ def raise_git_cloning_error(error: CommandResultError) -> NoReturn: @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="GitCloningProblem"), + suggested_action=SUGGESTED_ACTION.format(exception="UnknownGitCloningProblemError"), reason=REASON, ) def raise_git_cloning_problem(repo) -> NoReturn: - raise GitCloningProblem(repo) + raise UnknownGitCloningProblemError(repo) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="MacroInvalidDispatchArg"), + suggested_action=SUGGESTED_ACTION.format(exception="MacroDispatchArgError"), reason=REASON, ) def macro_invalid_dispatch_arg(macro_name) -> NoReturn: - raise MacroInvalidDispatchArg(macro_name) + raise MacroDispatchArgError(macro_name) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="GraphDependencyNotFound"), + suggested_action=SUGGESTED_ACTION.format(exception="GraphDependencyNotFoundError"), reason=REASON, ) def dependency_not_found(node, dependency): - raise GraphDependencyNotFound(node, dependency) + raise GraphDependencyNotFoundError(node, dependency) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="TargetNotFound"), + suggested_action=SUGGESTED_ACTION.format(exception="TargetNotFoundError"), reason=REASON, ) def target_not_found( @@ -2484,7 +2487,7 @@ def target_not_found( target_package: Optional[str] = None, disabled: Optional[bool] = None, ) -> NoReturn: - raise TargetNotFound( + raise TargetNotFoundError( node=node, target_name=target_name, target_kind=target_kind, @@ -2495,141 +2498,151 @@ def target_not_found( @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DocTargetNotFound"), + suggested_action=SUGGESTED_ACTION.format(exception="DocTargetNotFoundError"), reason=REASON, ) def doc_target_not_found( - model, target_doc_name: str, target_doc_package: Optional[str] + model, target_doc_name: str, target_doc_package: Optional[str] = None ) -> NoReturn: - raise DocTargetNotFound( + raise DocTargetNotFoundError( node=model, target_doc_name=target_doc_name, target_doc_package=target_doc_package ) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="InvalidDocArgs"), + suggested_action=SUGGESTED_ACTION.format(exception="DocArgsError"), reason=REASON, ) def doc_invalid_args(model, args) -> NoReturn: - raise InvalidDocArgs(node=model, args=args) + raise DocArgsError(node=model, args=args) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="RefBadContext"), + suggested_action=SUGGESTED_ACTION.format(exception="RefBadContextError"), reason=REASON, ) def ref_bad_context(model, args) -> NoReturn: - raise RefBadContext(node=model, args=args) + raise RefBadContextError(node=model, args=args) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="MetricInvalidArgs"), + suggested_action=SUGGESTED_ACTION.format(exception="MetricArgsError"), reason=REASON, ) def metric_invalid_args(model, args) -> NoReturn: - raise MetricInvalidArgs(node=model, args=args) + raise MetricArgsError(node=model, args=args) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="RefInvalidArgs"), + suggested_action=SUGGESTED_ACTION.format(exception="RefArgsError"), reason=REASON, ) def ref_invalid_args(model, args) -> NoReturn: - raise RefInvalidArgs(node=model, args=args) + raise RefArgsError(node=model, args=args) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="InvalidBoolean"), + suggested_action=SUGGESTED_ACTION.format(exception="BooleanError"), reason=REASON, ) def invalid_bool_error(got_value, macro_name) -> NoReturn: - raise InvalidBoolean(return_value=got_value, macro_name=macro_name) + raise BooleanError(return_value=got_value, macro_name=macro_name) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="InvalidMacroArgType"), + suggested_action=SUGGESTED_ACTION.format(exception="MacroArgTypeError"), reason=REASON, ) def invalid_type_error(method_name, arg_name, got_value, expected_type) -> NoReturn: - """Raise a CompilationException when an adapter method available to macros + """Raise a InvalidMacroArgType when an adapter method available to macros has changed. """ - raise InvalidMacroArgType(method_name, arg_name, got_value, expected_type) + raise MacroArgTypeError(method_name, arg_name, got_value, expected_type) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="DisallowSecretEnvVar"), + suggested_action=SUGGESTED_ACTION.format(exception="SecretEnvVarLocationError"), reason=REASON, ) def disallow_secret_env_var(env_var_name) -> NoReturn: """Raise an error when a secret env var is referenced outside allowed rendering contexts""" - raise DisallowSecretEnvVar(env_var_name) + raise SecretEnvVarLocationError(env_var_name) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="ParsingException"), + suggested_action=SUGGESTED_ACTION.format(exception="ParsingError"), reason=REASON, ) def raise_parsing_error(msg, node=None) -> NoReturn: - raise ParsingException(msg, node) + raise ParsingError(msg, node) +# These are the exceptions functions that were not called within dbt-core but will remain +# here deprecated to give a chance for adapters to rework @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="CompilationException"), + suggested_action=SUGGESTED_ACTION.format(exception="UnrecognizedCredentialTypeError"), reason=REASON, ) def raise_unrecognized_credentials_type(typename, supported_types): - raise UnrecognizedCredentialType(typename, supported_types) + raise UnrecognizedCredentialTypeError(typename, supported_types) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="CompilationException"), + suggested_action=SUGGESTED_ACTION.format(exception="PatchTargetNotFoundError"), reason=REASON, ) def raise_patch_targets_not_found(patches): - raise PatchTargetNotFound(patches) + raise PatchTargetNotFoundError(patches) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResults"), + suggested_action=SUGGESTED_ACTION.format(exception="RelationReturnedMultipleResultsError"), reason=REASON, ) def multiple_matching_relations(kwargs, matches): - raise RelationReturnedMultipleResults(kwargs, matches) + raise RelationReturnedMultipleResultsError(kwargs, matches) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="MaterializationNotAvailable"), + suggested_action=SUGGESTED_ACTION.format(exception="MaterializationNotAvailableError"), reason=REASON, ) def materialization_not_available(model, adapter_type): materialization = model.config.materialized - raise MaterializationNotAvailable(materialization=materialization, adapter_type=adapter_type) + raise MaterializationNotAvailableError( + materialization=materialization, adapter_type=adapter_type + ) @deprecated( version=DEPRECATION_VERSION, - suggested_action=SUGGESTED_ACTION.format(exception="MacroNotFound"), + suggested_action=SUGGESTED_ACTION.format(exception="MacroNotFoundError"), reason=REASON, ) def macro_not_found(model, target_macro_id): - raise MacroNotFound(node=model, target_macro_id=target_macro_id) + raise MacroNotFoundError(node=model, target_macro_id=target_macro_id) # adapters use this to format messages. it should be deprecated but live on for now +# TODO: What should the message here be? +@deprecated( + version=DEPRECATION_VERSION, + suggested_action="Format this message in the adapter", + reason="`validator_error_message` is now a mathod on DbtRuntimeError", +) def validator_error_message(exc): """Given a dbt.dataclass_schema.ValidationError (which is basically a jsonschema.ValidationError), return the relevant parts as a string diff --git a/core/dbt/graph/cli.py b/core/dbt/graph/cli.py index 2ae0d814327..a5581ed1d78 100644 --- a/core/dbt/graph/cli.py +++ b/core/dbt/graph/cli.py @@ -7,7 +7,7 @@ from typing import Dict, List, Optional, Tuple, Any, Union from dbt.contracts.selection import SelectorDefinition, SelectorFile -from dbt.exceptions import InternalException, ValidationException +from dbt.exceptions import DbtInternalError, DbtValidationError from .selector_spec import ( SelectionUnion, @@ -94,15 +94,15 @@ def parse_difference( def _get_list_dicts(dct: Dict[str, Any], key: str) -> List[RawDefinition]: result: List[RawDefinition] = [] if key not in dct: - raise InternalException(f"Expected to find key {key} in dict, only found {list(dct)}") + raise DbtInternalError(f"Expected to find key {key} in dict, only found {list(dct)}") values = dct[key] if not isinstance(values, list): - raise ValidationException(f'Invalid value for key "{key}". Expected a list.') + raise DbtValidationError(f'Invalid value for key "{key}". Expected a list.') for value in values: if isinstance(value, dict): for value_key in value: if not isinstance(value_key, str): - raise ValidationException( + raise DbtValidationError( f'Expected all keys to "{key}" dict to be strings, ' f'but "{value_key}" is a "{type(value_key)}"' ) @@ -110,7 +110,7 @@ def _get_list_dicts(dct: Dict[str, Any], key: str) -> List[RawDefinition]: elif isinstance(value, str): result.append(value) else: - raise ValidationException( + raise DbtValidationError( f'Invalid value type {type(value)} in key "{key}", expected ' f"dict or str (value: {value})." ) @@ -140,7 +140,7 @@ def _parse_include_exclude_subdefs( # do not allow multiple exclude: defs at the same level if diff_arg is not None: yaml_sel_cfg = yaml.dump(definition) - raise ValidationException( + raise DbtValidationError( f"You cannot provide multiple exclude arguments to the " f"same selector set operator:\n{yaml_sel_cfg}" ) @@ -182,7 +182,7 @@ def parse_dict_definition(definition: Dict[str, Any], result={}) -> SelectionSpe key = list(definition)[0] value = definition[key] if not isinstance(key, str): - raise ValidationException( + raise DbtValidationError( f'Expected definition key to be a "str", got one of type ' f'"{type(key)}" ({key})' ) dct = { @@ -192,7 +192,7 @@ def parse_dict_definition(definition: Dict[str, Any], result={}) -> SelectionSpe elif definition.get("method") == "selector": sel_def = definition.get("value") if sel_def not in result: - raise ValidationException(f"Existing selector definition for {sel_def} not found.") + raise DbtValidationError(f"Existing selector definition for {sel_def} not found.") return result[definition["value"]]["definition"] elif "method" in definition and "value" in definition: dct = definition @@ -200,7 +200,7 @@ def parse_dict_definition(definition: Dict[str, Any], result={}) -> SelectionSpe diff_arg = _parse_exclusions(definition, result=result) dct = {k: v for k, v in dct.items() if k != "exclude"} else: - raise ValidationException( + raise DbtValidationError( f'Expected either 1 key or else "method" ' f'and "value" keys, but got {list(definition)}' ) @@ -226,7 +226,7 @@ def parse_from_definition( and len(definition) > 1 ): keys = ",".join(definition.keys()) - raise ValidationException( + raise DbtValidationError( f"Only a single 'union' or 'intersection' key is allowed " f"in a root level selector definition; found {keys}." ) @@ -239,7 +239,7 @@ def parse_from_definition( elif isinstance(definition, dict): return parse_dict_definition(definition, result=result) else: - raise ValidationException( + raise DbtValidationError( f"Expected to find union, intersection, str or dict, instead " f"found {type(definition)}: {definition}" ) diff --git a/core/dbt/graph/graph.py b/core/dbt/graph/graph.py index 2dda596e073..9c20750cd54 100644 --- a/core/dbt/graph/graph.py +++ b/core/dbt/graph/graph.py @@ -2,7 +2,7 @@ from itertools import product import networkx as nx # type: ignore -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError UniqueId = NewType("UniqueId", str) @@ -27,7 +27,7 @@ def __iter__(self) -> Iterator[UniqueId]: def ancestors(self, node: UniqueId, max_depth: Optional[int] = None) -> Set[UniqueId]: """Returns all nodes having a path to `node` in `graph`""" if not self.graph.has_node(node): - raise InternalException(f"Node {node} not found in the graph!") + raise DbtInternalError(f"Node {node} not found in the graph!") return { child for _, child in nx.bfs_edges(self.graph, node, reverse=True, depth_limit=max_depth) @@ -36,7 +36,7 @@ def ancestors(self, node: UniqueId, max_depth: Optional[int] = None) -> Set[Uniq def descendants(self, node: UniqueId, max_depth: Optional[int] = None) -> Set[UniqueId]: """Returns all nodes reachable from `node` in `graph`""" if not self.graph.has_node(node): - raise InternalException(f"Node {node} not found in the graph!") + raise DbtInternalError(f"Node {node} not found in the graph!") return {child for _, child in nx.bfs_edges(self.graph, node, depth_limit=max_depth)} def select_childrens_parents(self, selected: Set[UniqueId]) -> Set[UniqueId]: diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 8f9561c6519..fdae6327d0e 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -9,8 +9,8 @@ from dbt.events.types import SelectorReportInvalidSelector, NoNodesForSelectionCriteria from dbt.node_types import NodeType from dbt.exceptions import ( - InternalException, - InvalidSelectorException, + DbtInternalError, + InvalidSelectorError, ) from dbt.contracts.graph.nodes import GraphMemberNode from dbt.contracts.graph.manifest import Manifest @@ -78,7 +78,7 @@ def get_nodes_from_criteria( nodes = self.graph.nodes() try: collected = self.select_included(nodes, spec) - except InvalidSelectorException: + except InvalidSelectorError: valid_selectors = ", ".join(self.SELECTOR_METHODS) fire_event( SelectorReportInvalidSelector( @@ -183,7 +183,7 @@ def _is_match(self, unique_id: UniqueId) -> bool: elif unique_id in self.manifest.metrics: node = self.manifest.metrics[unique_id] else: - raise InternalException(f"Node {unique_id} not found in the manifest!") + raise DbtInternalError(f"Node {unique_id} not found in the manifest!") return self.node_is_match(node) def filter_selection(self, selected: Set[UniqueId]) -> Set[UniqueId]: diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py index c77625649bc..2c73d480dae 100644 --- a/core/dbt/graph/selector_methods.py +++ b/core/dbt/graph/selector_methods.py @@ -19,8 +19,8 @@ ) from dbt.contracts.state import PreviousState from dbt.exceptions import ( - InternalException, - RuntimeException, + DbtInternalError, + DbtRuntimeError, ) from dbt.node_types import NodeType @@ -207,7 +207,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu "`${{source_name}}.${{target_name}}`, or " "`${{package_name}}.${{source_name}}.${{target_name}}" ).format(selector) - raise RuntimeException(msg) + raise DbtRuntimeError(msg) for node, real_node in self.source_nodes(included_nodes): if target_package not in (real_node.package_name, SELECTOR_GLOB): @@ -234,7 +234,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu "the form ${{exposure_name}} or " "${{exposure_package.exposure_name}}" ).format(selector) - raise RuntimeException(msg) + raise DbtRuntimeError(msg) for node, real_node in self.exposure_nodes(included_nodes): if target_package not in (real_node.package_name, SELECTOR_GLOB): @@ -259,7 +259,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu "the form ${{metric_name}} or " "${{metric_package.metric_name}}" ).format(selector) - raise RuntimeException(msg) + raise DbtRuntimeError(msg) for node, real_node in self.metric_nodes(included_nodes): if target_package not in (real_node.package_name, SELECTOR_GLOB): @@ -367,7 +367,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu try: resource_type = NodeType(selector) except ValueError as exc: - raise RuntimeException(f'Invalid resource_type selector "{selector}"') from exc + raise DbtRuntimeError(f'Invalid resource_type selector "{selector}"') from exc for node, real_node in self.parsed_nodes(included_nodes): if real_node.resource_type == resource_type: yield node @@ -390,7 +390,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu elif selector in ("singular", "data"): search_type = SingularTestNode else: - raise RuntimeException( + raise DbtRuntimeError( f'Invalid test type selector {selector}: expected "generic" or ' '"singular"' ) @@ -407,7 +407,7 @@ def __init__(self, *args, **kwargs): def _macros_modified(self) -> List[str]: # we checked in the caller! if self.previous_state is None or self.previous_state.manifest is None: - raise InternalException("No comparison manifest in _macros_modified") + raise DbtInternalError("No comparison manifest in _macros_modified") old_macros = self.previous_state.manifest.macros new_macros = self.manifest.macros @@ -496,7 +496,7 @@ def check_new(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool: def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: if self.previous_state is None or self.previous_state.manifest is None: - raise RuntimeException("Got a state selector method, but no comparison manifest") + raise DbtRuntimeError("Got a state selector method, but no comparison manifest") state_checks = { # it's new if there is no old version @@ -514,7 +514,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu if selector in state_checks: checker = state_checks[selector] else: - raise RuntimeException( + raise DbtRuntimeError( f'Got an invalid selector "{selector}", expected one of ' f'"{list(state_checks)}"' ) @@ -538,7 +538,7 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu class ResultSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: if self.previous_state is None or self.previous_state.results is None: - raise InternalException("No comparison run_results") + raise DbtInternalError("No comparison run_results") matches = set( result.unique_id for result in self.previous_state.results if result.status == selector ) @@ -551,13 +551,11 @@ class SourceStatusSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: if self.previous_state is None or self.previous_state.sources is None: - raise InternalException( + raise DbtInternalError( "No previous state comparison freshness results in sources.json" ) elif self.previous_state.sources_current is None: - raise InternalException( - "No current state comparison freshness results in sources.json" - ) + raise DbtInternalError("No current state comparison freshness results in sources.json") current_state_sources = { result.unique_id: getattr(result, "max_loaded_at", 0) @@ -633,7 +631,7 @@ def __init__( def get_method(self, method: MethodName, method_arguments: List[str]) -> SelectorMethod: if method not in self.SELECTOR_METHODS: - raise InternalException( + raise DbtInternalError( f'Method name "{method}" is a valid node selection ' f"method name, but it is not handled" ) diff --git a/core/dbt/graph/selector_spec.py b/core/dbt/graph/selector_spec.py index 5b8e4560d5e..af7ae014163 100644 --- a/core/dbt/graph/selector_spec.py +++ b/core/dbt/graph/selector_spec.py @@ -7,7 +7,7 @@ from typing import Set, Iterator, List, Optional, Dict, Union, Any, Iterable, Tuple from .graph import UniqueId from .selector_methods import MethodName -from dbt.exceptions import RuntimeException, InvalidSelectorException +from dbt.exceptions import DbtRuntimeError, InvalidSelectorError RAW_SELECTOR_PATTERN = re.compile( @@ -47,7 +47,7 @@ def _match_to_int(match: Dict[str, str], key: str) -> Optional[int]: try: return int(raw) except ValueError as exc: - raise RuntimeException(f"Invalid node spec - could not handle parent depth {raw}") from exc + raise DbtRuntimeError(f"Invalid node spec - could not handle parent depth {raw}") from exc SelectionSpec = Union[ @@ -73,7 +73,7 @@ class SelectionCriteria: def __post_init__(self): if self.children and self.childrens_parents: - raise RuntimeException( + raise DbtRuntimeError( f'Invalid node spec {self.raw} - "@" prefix and "+" suffix ' "are incompatible" ) @@ -96,9 +96,7 @@ def parse_method(cls, groupdict: Dict[str, Any]) -> Tuple[MethodName, List[str]] try: method_name = MethodName(method_parts[0]) except ValueError as exc: - raise InvalidSelectorException( - f"'{method_parts[0]}' is not a valid method name" - ) from exc + raise InvalidSelectorError(f"'{method_parts[0]}' is not a valid method name") from exc method_arguments: List[str] = method_parts[1:] @@ -112,7 +110,7 @@ def selection_criteria_from_dict( indirect_selection: IndirectSelection = IndirectSelection.Eager, ) -> "SelectionCriteria": if "value" not in dct: - raise RuntimeException(f'Invalid node spec "{raw}" - no search value!') + raise DbtRuntimeError(f'Invalid node spec "{raw}" - no search value!') method_name, method_arguments = cls.parse_method(dct) parents_depth = _match_to_int(dct, "parents_depth") @@ -163,7 +161,7 @@ def from_single_spec( result = RAW_SELECTOR_PATTERN.match(raw) if result is None: # bad spec! - raise RuntimeException(f'Invalid selector spec "{raw}"') + raise DbtRuntimeError(f'Invalid selector spec "{raw}"') return cls.selection_criteria_from_dict( raw, result.groupdict(), indirect_selection=indirect_selection diff --git a/core/dbt/internal_deprecations.py b/core/dbt/internal_deprecations.py index e6154329ca7..fbc435026b6 100644 --- a/core/dbt/internal_deprecations.py +++ b/core/dbt/internal_deprecations.py @@ -2,18 +2,18 @@ from typing import Optional from dbt.events.functions import warn_or_error -from dbt.events.types import FunctionDeprecated +from dbt.events.types import InternalDeprecation def deprecated(suggested_action: str, version: str, reason: Optional[str]): def inner(func): @functools.wraps(func) def wrapped(*args, **kwargs): - function_name = func.__name__ + name = func.__name__ warn_or_error( - FunctionDeprecated( - function_name=function_name, + InternalDeprecation( + name=name, suggested_action=suggested_action, version=version, reason=reason, diff --git a/core/dbt/lib.py b/core/dbt/lib.py index f4b9ab5be0e..2726f101b00 100644 --- a/core/dbt/lib.py +++ b/core/dbt/lib.py @@ -4,7 +4,7 @@ from dbt.contracts.results import RunningStatus, collect_timing_info from dbt.events.functions import fire_event from dbt.events.types import NodeCompiling, NodeExecuting -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError from dbt import flags from dbt.task.sql import SqlCompileRunner from dataclasses import dataclass @@ -125,7 +125,7 @@ def get_task_by_type(type): elif type == "run_operation": return RunOperationTask - raise RuntimeException("not a valid task") + raise DbtRuntimeError("not a valid task") def create_task(type, args, manifest, config): diff --git a/core/dbt/main.py b/core/dbt/main.py index 1bdd59fef1f..1d2dad9b259 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -46,9 +46,9 @@ from dbt.config.profile import read_user_config from dbt.exceptions import ( Exception as dbtException, - InternalException, - NotImplementedException, - FailedToConnectException, + DbtInternalError, + NotImplementedError, + FailedToConnectError, ) @@ -92,7 +92,7 @@ def add_optional_argument_inverse( ): mutex_group = self.add_mutually_exclusive_group() if not name.startswith("--"): - raise InternalException( + raise DbtInternalError( 'cannot handle optional argument without "--" prefix: ' f'got "{name}"' ) if dest is None: @@ -207,7 +207,7 @@ def track_run(task): try: yield dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="ok") - except (NotImplementedException, FailedToConnectException) as e: + except (NotImplementedError, FailedToConnectError) as e: fire_event(MainEncounteredError(exc=str(e))) dbt.tracking.track_invocation_end(config=task.config, args=task.args, result_type="error") except Exception: @@ -220,7 +220,7 @@ def track_run(task): def run_from_args(parsed): log_cache_events(getattr(parsed, "log_cache_events", False)) - # this will convert DbtConfigErrors into RuntimeExceptions + # this will convert DbtConfigErrors into DbtRuntimeError # task could be any one of the task objects task = parsed.cls.from_args(args=parsed) diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 9c245214d83..1f01aff36f1 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -18,7 +18,7 @@ from dbt.contracts.graph.manifest import Manifest from dbt.contracts.graph.nodes import ManifestNode, BaseNode from dbt.contracts.graph.unparsed import UnparsedNode, Docs -from dbt.exceptions import InternalException, InvalidConfigUpdate, InvalidDictParse +from dbt.exceptions import DbtInternalError, ConfigUpdateError, DictParseError from dbt import hooks from dbt.node_types import NodeType, ModelLanguage from dbt.parser.search import FileBlock @@ -76,7 +76,7 @@ def __init__(self, config: RuntimeConfig, manifest: Manifest, component: str) -> root_project_name=config.project_name, ) if macro is None: - raise InternalException(f"No macro with name generate_{component}_name found") + raise DbtInternalError(f"No macro with name generate_{component}_name found") root_context = generate_generate_name_macro_context(macro, config, manifest) self.updater = MacroGenerator(macro, root_context) @@ -224,7 +224,7 @@ def _create_parsetime_node( original_file_path=block.path.original_file_path, raw_code=block.contents, ) - raise InvalidDictParse(exc, node=node) + raise DictParseError(exc, node=node) def _context_for(self, parsed_node: IntermediateNode, config: ContextConfig) -> Dict[str, Any]: return generate_parser_model_context(parsed_node, self.root_project, self.manifest, config) @@ -345,7 +345,7 @@ def initial_config(self, fqn: List[str]) -> ContextConfig: self.project.project_name, ) else: - raise InternalException( + raise DbtInternalError( f"Got an unexpected project version={config_version}, expected 2" ) @@ -363,7 +363,7 @@ def render_update(self, node: IntermediateNode, config: ContextConfig) -> None: self.update_parsed_node_config(node, config, context=context) except ValidationError as exc: # we got a ValidationError - probably bad types in config() - raise InvalidConfigUpdate(exc, node=node) from exc + raise ConfigUpdateError(exc, node=node) from exc def add_result_node(self, block: FileBlock, node: ManifestNode): if node.config.enabled: diff --git a/core/dbt/parser/generic_test.py b/core/dbt/parser/generic_test.py index 822dd5b2d85..ea281e1c993 100644 --- a/core/dbt/parser/generic_test.py +++ b/core/dbt/parser/generic_test.py @@ -2,7 +2,7 @@ import jinja2 -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.clients import jinja from dbt.contracts.graph.nodes import GenericTestNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro @@ -51,14 +51,14 @@ def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macr ) if isinstance(t, jinja.BlockTag) ] - except ParsingException as exc: + except ParsingError as exc: exc.add_node(base_node) raise for block in blocks: try: ast = jinja.parse(block.full_block) - except ParsingException as e: + except ParsingError as e: e.add_node(base_node) raise @@ -68,7 +68,7 @@ def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macr if len(generic_test_nodes) != 1: # things have gone disastrously wrong, we thought we only # parsed one block! - raise ParsingException( + raise ParsingError( f"Found multiple generic tests in {block.full_block}, expected 1", node=base_node, ) diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index af0282c953f..206e9c51438 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -22,17 +22,17 @@ UnparsedExposure, ) from dbt.exceptions import ( - CustomMacroPopulatingConfigValues, - SameKeyNested, - TagNotString, - TagsNotListOfStrings, - TestArgIncludesModel, - TestArgsNotDict, - TestDefinitionDictLength, - TestInvalidType, - TestNameNotString, - UnexpectedTestNamePattern, - UndefinedMacroException, + CustomMacroPopulatingConfigValueError, + SameKeyNestedError, + TagNotStringError, + TagsNotListOfStringsError, + TestArgIncludesModelError, + TestArgsNotDictError, + TestDefinitionDictLengthError, + TestTypeError, + TestNameNotStringError, + UnexpectedTestNamePatternError, + UndefinedMacroError, ) from dbt.parser.search import FileBlock @@ -234,7 +234,7 @@ def __init__( test_name, test_args = self.extract_test_args(test, column_name) self.args: Dict[str, Any] = test_args if "model" in self.args: - raise TestArgIncludesModel() + raise TestArgIncludesModelError() self.package_name: str = package_name self.target: Testable = target @@ -242,7 +242,7 @@ def __init__( match = self.TEST_NAME_PATTERN.match(test_name) if match is None: - raise UnexpectedTestNamePattern(test_name) + raise UnexpectedTestNamePatternError(test_name) groups = match.groupdict() self.name: str = groups["test_name"] @@ -259,15 +259,15 @@ def __init__( value = self.args.pop(key, None) # 'modifier' config could be either top level arg or in config if value and "config" in self.args and key in self.args["config"]: - raise SameKeyNested() + raise SameKeyNestedError() if not value and "config" in self.args: value = self.args["config"].pop(key, None) if isinstance(value, str): try: value = get_rendered(value, render_ctx, native=True) - except UndefinedMacroException as e: - raise CustomMacroPopulatingConfigValues( + except UndefinedMacroError as e: + raise CustomMacroPopulatingConfigValueError( target_name=self.target.name, column_name=column_name, name=self.name, @@ -310,7 +310,7 @@ def _bad_type(self) -> TypeError: @staticmethod def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: if not isinstance(test, dict): - raise TestInvalidType(test) + raise TestTypeError(test) # If the test is a dictionary with top-level keys, the test name is "test_name" # and the rest are arguments @@ -324,13 +324,13 @@ def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: else: test = list(test.items()) if len(test) != 1: - raise TestDefinitionDictLength(test) + raise TestDefinitionDictLengthError(test) test_name, test_args = test[0] if not isinstance(test_args, dict): - raise TestArgsNotDict(test_args) + raise TestArgsNotDictError(test_args) if not isinstance(test_name, str): - raise TestNameNotString(test_name) + raise TestNameNotStringError(test_name) test_args = deepcopy(test_args) if name is not None: test_args["column_name"] = name @@ -421,10 +421,10 @@ def tags(self) -> List[str]: if isinstance(tags, str): tags = [tags] if not isinstance(tags, list): - raise TagsNotListOfStrings(tags) + raise TagsNotListOfStringsError(tags) for tag in tags: if not isinstance(tag, str): - raise TagNotString(tag) + raise TagNotStringError(tag) return tags[:] def macro_name(self) -> str: diff --git a/core/dbt/parser/hooks.py b/core/dbt/parser/hooks.py index d05ea136dc5..d96257a0e71 100644 --- a/core/dbt/parser/hooks.py +++ b/core/dbt/parser/hooks.py @@ -4,7 +4,7 @@ from dbt.context.context_config import ContextConfig from dbt.contracts.files import FilePath from dbt.contracts.graph.nodes import HookNode -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.node_types import NodeType, RunHookType from dbt.parser.base import SimpleParser from dbt.parser.search import FileBlock @@ -46,7 +46,7 @@ def get_hook_defs(self) -> List[str]: elif self.hook_type == RunHookType.End: hooks = self.project.on_run_end else: - raise InternalException( + raise DbtInternalError( 'hook_type must be one of "{}" or "{}" (got {})'.format( RunHookType.Start, RunHookType.End, self.hook_type ) diff --git a/core/dbt/parser/macros.py b/core/dbt/parser/macros.py index 7c5336b8ccf..1a9ee03d57d 100644 --- a/core/dbt/parser/macros.py +++ b/core/dbt/parser/macros.py @@ -6,7 +6,7 @@ from dbt.contracts.graph.unparsed import UnparsedMacro from dbt.contracts.graph.nodes import Macro from dbt.contracts.files import FilePath, SourceFile -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.events.functions import fire_event from dbt.events.types import MacroFileParse from dbt.node_types import NodeType @@ -56,14 +56,14 @@ def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]: ) if isinstance(t, jinja.BlockTag) ] - except ParsingException as exc: + except ParsingError as exc: exc.add_node(base_node) raise for block in blocks: try: ast = jinja.parse(block.full_block) - except ParsingException as e: + except ParsingError as e: e.add_node(base_node) raise @@ -72,7 +72,7 @@ def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]: if len(macro_nodes) != 1: # things have gone disastrously wrong, we thought we only # parsed one block! - raise ParsingException( + raise ParsingError( f"Found multiple macros in {block.full_block}, expected 1", node=base_node ) diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index 988c4539c9e..fbfada4fc2a 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -21,8 +21,8 @@ from dbt.helper_types import PathSet from dbt.events.functions import fire_event, get_invocation_id, warn_or_error from dbt.events.types import ( - PartialParsingExceptionProcessingFile, - PartialParsingException, + PartialParsingErrorProcessingFile, + PartialParsingError, PartialParsingSkipParsing, UnableToPartialParse, PartialParsingNotEnabled, @@ -61,7 +61,7 @@ ResultNode, ) from dbt.contracts.util import Writable -from dbt.exceptions import TargetNotFound, AmbiguousAlias +from dbt.exceptions import TargetNotFoundError, AmbiguousAliasError from dbt.parser.base import Parser from dbt.parser.analysis import AnalysisParser from dbt.parser.generic_test import GenericTestParser @@ -278,9 +278,9 @@ def load(self): source_file = self.manifest.files[file_id] if source_file: parse_file_type = source_file.parse_file_type - fire_event(PartialParsingExceptionProcessingFile(file=file_id)) + fire_event(PartialParsingErrorProcessingFile(file=file_id)) exc_info["parse_file_type"] = parse_file_type - fire_event(PartialParsingException(exc_info=exc_info)) + fire_event(PartialParsingError(exc_info=exc_info)) # Send event if dbt.tracking.active_user is not None: @@ -989,7 +989,7 @@ def invalid_target_fail_unless_test( ) ) else: - raise TargetNotFound( + raise TargetNotFoundError( node=node, target_name=target_name, target_kind=target_kind, @@ -1017,11 +1017,13 @@ def _check_resource_uniqueness( existing_node = names_resources.get(name) if existing_node is not None: - raise dbt.exceptions.DuplicateResourceName(existing_node, node) + raise dbt.exceptions.DuplicateResourceNameError(existing_node, node) existing_alias = alias_resources.get(full_node_name) if existing_alias is not None: - raise AmbiguousAlias(node_1=existing_alias, node_2=node, duped_name=full_node_name) + raise AmbiguousAliasError( + node_1=existing_alias, node_2=node, duped_name=full_node_name + ) names_resources[name] = node alias_resources[full_node_name] = node @@ -1113,7 +1115,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur elif len(ref) == 2: target_model_package, target_model_name = ref else: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Refs should always be 1 or 2 arguments - got {len(ref)}" ) @@ -1157,7 +1159,7 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: M elif len(ref) == 2: target_model_package, target_model_name = ref else: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Refs should always be 1 or 2 arguments - got {len(ref)}" ) @@ -1208,7 +1210,7 @@ def _process_metrics_for_node( elif len(metric) == 2: target_metric_package, target_metric_name = metric else: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Metric references should always be 1 or 2 arguments - got {len(metric)}" ) @@ -1253,7 +1255,7 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif elif len(ref) == 2: target_model_package, target_model_name = ref else: - raise dbt.exceptions.InternalException( + raise dbt.exceptions.DbtInternalError( f"Refs should always be 1 or 2 arguments - got {len(ref)}" ) diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index 39bb18be714..597200abba5 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -30,11 +30,11 @@ import ast from dbt.dataclass_schema import ValidationError from dbt.exceptions import ( - InvalidModelConfig, - ParsingException, - PythonLiteralEval, - PythonParsingException, - UndefinedMacroException, + ModelConfigError, + ParsingError, + PythonLiteralEvalError, + PythonParsingError, + UndefinedMacroError, ) dbt_function_key_words = set(["ref", "source", "config", "get"]) @@ -66,13 +66,13 @@ def visit_FunctionDef(self, node: ast.FunctionDef) -> None: def check_error(self, node): if self.num_model_def != 1: - raise ParsingException( + raise ParsingError( f"dbt allows exactly one model defined per python file, found {self.num_model_def}", node=node, ) if len(self.dbt_errors) != 0: - raise ParsingException("\n".join(self.dbt_errors), node=node) + raise ParsingError("\n".join(self.dbt_errors), node=node) class PythonParseVisitor(ast.NodeVisitor): @@ -96,7 +96,7 @@ def _safe_eval(self, node): try: return ast.literal_eval(node) except (SyntaxError, ValueError, TypeError, MemoryError, RecursionError) as exc: - raise PythonLiteralEval(exc, node=self.dbt_node) from exc + raise PythonLiteralEvalError(exc, node=self.dbt_node) from exc def _get_call_literals(self, node): # List of literals @@ -176,9 +176,9 @@ def verify_python_model_code(node): node, ) if rendered_python != node.raw_code: - raise ParsingException("") - except (UndefinedMacroException, ParsingException): - raise ParsingException("No jinja in python model code is allowed", node=node) + raise ParsingError("") + except (UndefinedMacroError, ParsingError): + raise ParsingError("No jinja in python model code is allowed", node=node) class ModelParser(SimpleSQLParser[ModelNode]): @@ -202,7 +202,7 @@ def parse_python_model(self, node, config, context): try: tree = ast.parse(node.raw_code, filename=node.original_file_path) except SyntaxError as exc: - raise PythonParsingException(exc, node=node) from exc + raise PythonParsingError(exc, node=node) from exc # Only parse if AST tree has instructions in body if tree.body: @@ -219,12 +219,12 @@ def parse_python_model(self, node, config, context): if func == "get": num_args = len(args) if num_args == 0: - raise ParsingException( + raise ParsingError( "dbt.config.get() requires at least one argument", node=node, ) if num_args > 2: - raise ParsingException( + raise ParsingError( f"dbt.config.get() takes at most 2 arguments ({num_args} given)", node=node, ) @@ -255,7 +255,7 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None: except ValidationError as exc: # we got a ValidationError - probably bad types in config() - raise InvalidModelConfig(exc, node=node) from exc + raise ModelConfigError(exc, node=node) from exc return elif not flags.STATIC_PARSER: diff --git a/core/dbt/parser/read_files.py b/core/dbt/parser/read_files.py index ccb6b1b0790..531e5f39560 100644 --- a/core/dbt/parser/read_files.py +++ b/core/dbt/parser/read_files.py @@ -12,7 +12,7 @@ ) from dbt.parser.schemas import yaml_from_file, schema_file_keys, check_format_version -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.parser.search import filesystem_search from typing import Optional @@ -75,21 +75,21 @@ def validate_yaml(file_path, dct): f"The schema file at {file_path} is " f"invalid because the value of '{key}' is not a list" ) - raise ParsingException(msg) + raise ParsingError(msg) for element in dct[key]: if not isinstance(element, dict): msg = ( f"The schema file at {file_path} is " f"invalid because a list element for '{key}' is not a dictionary" ) - raise ParsingException(msg) + raise ParsingError(msg) if "name" not in element: msg = ( f"The schema file at {file_path} is " f"invalid because a list element for '{key}' does not have a " "name attribute." ) - raise ParsingException(msg) + raise ParsingError(msg) # Special processing for big seed files diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 32bfbb559a1..482eb5b6e35 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -50,22 +50,22 @@ UnparsedSourceDefinition, ) from dbt.exceptions import ( - CompilationException, - DuplicateMacroPatchName, - DuplicatePatchPath, - DuplicateSourcePatchName, - JSONValidationException, - InternalException, - InvalidSchemaConfig, - InvalidTestConfig, - ParsingException, - PropertyYMLInvalidTag, - PropertyYMLMissingVersion, - PropertyYMLVersionNotInt, - ValidationException, - YamlLoadFailure, - YamlParseDictFailure, - YamlParseListFailure, + CompilationError, + DuplicateMacroPatchNameError, + DuplicatePatchPathError, + DuplicateSourcePatchNameError, + JSONValidationError, + DbtInternalError, + SchemaConfigError, + TestConfigError, + ParsingError, + PropertyYMLInvalidTagError, + PropertyYMLMissingVersionError, + PropertyYMLVersionNotIntError, + DbtValidationError, + YamlLoadError, + YamlParseDictError, + YamlParseListError, ) from dbt.events.functions import warn_or_error from dbt.events.types import WrongResourceSchemaFile, NoNodeForYamlKey, MacroNotFoundForPatch @@ -102,8 +102,10 @@ def yaml_from_file(source_file: SchemaSourceFile) -> Dict[str, Any]: try: # source_file.contents can sometimes be None return load_yaml_text(source_file.contents or "", source_file.path) - except ValidationException as e: - raise YamlLoadFailure(source_file.project_name, source_file.path.relative_path, e) + except DbtValidationError as e: + raise YamlLoadError( + project_name=source_file.project_name, path=source_file.path.relative_path, exc=e + ) class ParserRef: @@ -255,7 +257,7 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List original_file_path=target.original_file_path, raw_code=raw_code, ) - raise InvalidTestConfig(exc, node) + raise TestConfigError(exc, node) # lots of time spent in this method def _parse_generic_test( @@ -278,20 +280,20 @@ def _parse_generic_test( self.store_env_vars(target, schema_file_id, self.schema_yaml_vars.env_vars) self.schema_yaml_vars.env_vars = {} - except ParsingException as exc: + except ParsingError as exc: context = _trimmed(str(target)) msg = "Invalid test config given in {}:\n\t{}\n\t@: {}".format( target.original_file_path, exc.msg, context ) - raise ParsingException(msg) from exc + raise ParsingError(msg) from exc - except CompilationException as exc: + except CompilationError as exc: context = _trimmed(str(target)) msg = ( "Invalid generic test configuration given in " f"{target.original_file_path}: \n{exc.msg}\n\t@: {context}" ) - raise CompilationException(msg) from exc + raise CompilationError(msg) from exc original_name = os.path.basename(target.original_file_path) compiled_path = get_pseudo_test_path(builder.compiled_name, original_name) @@ -397,7 +399,7 @@ def render_test_update(self, node, config, builder, schema_file_id): # env_vars should have been updated in the context env_var method except ValidationError as exc: # we got a ValidationError - probably bad types in config() - raise InvalidSchemaConfig(exc, node=node) from exc + raise SchemaConfigError(exc, node=node) from exc def parse_node(self, block: GenericTestBlock) -> GenericTestNode: """In schema parsing, we rewrite most of the part of parse_node that @@ -537,16 +539,16 @@ def parse_file(self, block: FileBlock, dct: Dict = None) -> None: def check_format_version(file_path, yaml_dct) -> None: if "version" not in yaml_dct: - raise PropertyYMLMissingVersion(file_path) + raise PropertyYMLMissingVersionError(file_path) version = yaml_dct["version"] # if it's not an integer, the version is malformed, or not # set. Either way, only 'version: 2' is supported. if not isinstance(version, int): - raise PropertyYMLVersionNotInt(file_path, version) + raise PropertyYMLVersionNotIntError(file_path, version) if version != 2: - raise PropertyYMLInvalidTag(file_path, version) + raise PropertyYMLInvalidTagError(file_path, version) Parsed = TypeVar("Parsed", UnpatchedSourceDefinition, ParsedNodePatch, ParsedMacroPatch) @@ -594,7 +596,7 @@ def root_project(self): def get_key_dicts(self) -> Iterable[Dict[str, Any]]: data = self.yaml.data.get(self.key, []) if not isinstance(data, list): - raise ParsingException( + raise ParsingError( "{} must be a list, got {} instead: ({})".format( self.key, type(data), _trimmed(str(data)) ) @@ -607,12 +609,10 @@ def get_key_dicts(self) -> Iterable[Dict[str, Any]]: # check that entry is a dict and that all dict values # are strings if coerce_dict_str(entry) is None: - raise YamlParseListFailure( - path, self.key, data, "expected a dict with string keys" - ) + raise YamlParseListError(path, self.key, data, "expected a dict with string keys") if "name" not in entry: - raise ParsingException("Entry did not contain a name") + raise ParsingError("Entry did not contain a name") # Render the data (except for tests and descriptions). # See the SchemaYamlRenderer @@ -631,8 +631,8 @@ def render_entry(self, dct): try: # This does a deep_map which will fail if there are circular references dct = self.renderer.render_data(dct) - except ParsingException as exc: - raise ParsingException( + except ParsingError as exc: + raise ParsingError( f"Failed to render {self.yaml.file.path.original_file_path} from " f"project {self.project.project_name}: {exc}" ) from exc @@ -655,8 +655,8 @@ def _target_from_dict(self, cls: Type[T], data: Dict[str, Any]) -> T: try: cls.validate(data) return cls.from_dict(data) - except (ValidationError, JSONValidationException) as exc: - raise YamlParseDictFailure(path, self.key, data, exc) + except (ValidationError, JSONValidationError) as exc: + raise YamlParseDictError(path, self.key, data, exc) # The other parse method returns TestBlocks. This one doesn't. # This takes the yaml dictionaries in 'sources' keys and uses them @@ -677,7 +677,7 @@ def parse(self) -> List[TestBlock]: # source patches must be unique key = (patch.overrides, patch.name) if key in self.manifest.source_patches: - raise DuplicateSourcePatchName(patch, self.manifest.source_patches[key]) + raise DuplicateSourcePatchNameError(patch, self.manifest.source_patches[key]) self.manifest.source_patches[key] = patch source_file.source_patches.append(key) else: @@ -780,8 +780,8 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: self.normalize_meta_attribute(data, path) self.normalize_docs_attribute(data, path) node = self._target_type().from_dict(data) - except (ValidationError, JSONValidationException) as exc: - raise YamlParseDictFailure(path, self.key, data, exc) + except (ValidationError, JSONValidationError) as exc: + raise YamlParseDictError(path, self.key, data, exc) else: yield node @@ -790,7 +790,7 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: def normalize_attribute(self, data, path, attribute): if attribute in data: if "config" in data and attribute in data["config"]: - raise ParsingException( + raise ParsingError( f""" In {path}: found {attribute} dictionary in 'config' dictionary and as top-level key. Remove the top-level key and define it under 'config' dictionary only. @@ -858,7 +858,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: elif patch.yaml_key == "analyses": unique_id = self.manifest.analysis_lookup.get_unique_id(patch.name, None) else: - raise InternalException( + raise DbtInternalError( f"Unexpected yaml_key {patch.yaml_key} for patch in " f"file {source_file.path.original_file_path}" ) @@ -877,7 +877,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: "unique id cannot be enabled in the schema file. They must be enabled " "in `dbt_project.yml` or in the sql files." ) - raise ParsingException(msg) + raise ParsingError(msg) # all nodes in the disabled dict have the same unique_id so just grab the first one # to append with the uniqe id @@ -905,7 +905,7 @@ def parse_patch(self, block: TargetBlock[NodeTarget], refs: ParserRef) -> None: if node: if node.patch_path: package_name, existing_file_path = node.patch_path.split("://") - raise DuplicatePatchPath(patch, existing_file_path) + raise DuplicatePatchPathError(patch, existing_file_path) source_file.append_patch(patch.yaml_key, node.unique_id) # re-calculate the node config with the patch config. Always do this @@ -961,7 +961,7 @@ def parse_patch(self, block: TargetBlock[UnparsedMacroUpdate], refs: ParserRef) return if macro.patch_path: package_name, existing_file_path = macro.patch_path.split("://") - raise DuplicateMacroPatchName(patch, existing_file_path) + raise DuplicateMacroPatchNameError(patch, existing_file_path) source_file.macro_patches[patch.name] = unique_id macro.patch(patch) @@ -997,7 +997,7 @@ def parse_exposure(self, unparsed: UnparsedExposure): ) if not isinstance(config, ExposureConfig): - raise InternalException( + raise DbtInternalError( f"Calculated a {type(config)} for an exposure, but expected an ExposureConfig" ) @@ -1063,8 +1063,8 @@ def parse(self): try: UnparsedExposure.validate(data) unparsed = UnparsedExposure.from_dict(data) - except (ValidationError, JSONValidationException) as exc: - raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) + except (ValidationError, JSONValidationError) as exc: + raise YamlParseDictError(self.yaml.path, self.key, data, exc) self.parse_exposure(unparsed) @@ -1100,7 +1100,7 @@ def parse_metric(self, unparsed: UnparsedMetric): ) if not isinstance(config, MetricConfig): - raise InternalException( + raise DbtInternalError( f"Calculated a {type(config)} for a metric, but expected a MetricConfig" ) @@ -1180,6 +1180,6 @@ def parse(self): UnparsedMetric.validate(data) unparsed = UnparsedMetric.from_dict(data) - except (ValidationError, JSONValidationException) as exc: - raise YamlParseDictFailure(self.yaml.path, self.key, data, exc) + except (ValidationError, JSONValidationError) as exc: + raise YamlParseDictError(self.yaml.path, self.key, data, exc) self.parse_metric(unparsed) diff --git a/core/dbt/parser/search.py b/core/dbt/parser/search.py index f8ccc974be4..75e7fa6636c 100644 --- a/core/dbt/parser/search.py +++ b/core/dbt/parser/search.py @@ -7,7 +7,7 @@ from dbt.clients.system import find_matching from dbt.config import Project from dbt.contracts.files import FilePath, AnySourceFile -from dbt.exceptions import ParsingException, InternalException +from dbt.exceptions import ParsingError, DbtInternalError # What's the point of wrapping a SourceFile with this class? @@ -73,7 +73,7 @@ def filesystem_search( file_path_list = [] for result in find_matching(root, relative_dirs, ext, ignore_spec): if "searched_path" not in result or "relative_path" not in result: - raise InternalException("Invalid result from find_matching: {}".format(result)) + raise DbtInternalError("Invalid result from find_matching: {}".format(result)) file_match = FilePath( searched_path=result["searched_path"], relative_path=result["relative_path"], @@ -113,7 +113,7 @@ def extract_blocks(self, source_file: FileBlock) -> Iterable[BlockTag]: assert isinstance(block, BlockTag) yield block - except ParsingException as exc: + except ParsingError as exc: if exc.node is None: exc.add_node(source_file) raise diff --git a/core/dbt/parser/snapshots.py b/core/dbt/parser/snapshots.py index dffc7d90641..72aec4ee976 100644 --- a/core/dbt/parser/snapshots.py +++ b/core/dbt/parser/snapshots.py @@ -4,7 +4,7 @@ from dbt.dataclass_schema import ValidationError from dbt.contracts.graph.nodes import IntermediateSnapshotNode, SnapshotNode -from dbt.exceptions import InvalidSnapshopConfig +from dbt.exceptions import SnapshopConfigError from dbt.node_types import NodeType from dbt.parser.base import SQLParser from dbt.parser.search import BlockContents, BlockSearcher, FileBlock @@ -68,7 +68,7 @@ def transform(self, node: IntermediateSnapshotNode) -> SnapshotNode: self.set_snapshot_attributes(parsed_node) return parsed_node except ValidationError as exc: - raise InvalidSnapshopConfig(exc, node) + raise SnapshopConfigError(exc, node) def parse_file(self, file_block: FileBlock) -> None: blocks = BlockSearcher( diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index cc9acea98c3..098ebde09c6 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -26,7 +26,7 @@ ) from dbt.events.functions import warn_or_error from dbt.events.types import UnusedTables -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.node_types import NodeType from dbt.parser.schemas import SchemaParser, ParserRef @@ -150,7 +150,7 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> SourceDefinition: ) if not isinstance(config, SourceConfig): - raise InternalException( + raise DbtInternalError( f"Calculated a {type(config)} for a source, but expected a SourceConfig" ) diff --git a/core/dbt/parser/sql.py b/core/dbt/parser/sql.py index 82d09c12d6b..98e28aadc19 100644 --- a/core/dbt/parser/sql.py +++ b/core/dbt/parser/sql.py @@ -5,7 +5,7 @@ from dbt.contracts.graph.manifest import SourceFile from dbt.contracts.graph.nodes import SqlNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.macros import MacroParser @@ -35,7 +35,7 @@ def resource_type(self) -> NodeType: def get_compiled_path(block: FileBlock): # we do it this way to make mypy happy if not isinstance(block, SqlBlock): - raise InternalException( + raise DbtInternalError( "While parsing SQL operation, got an actual file block instead of " "an SQL block: {}".format(block) ) diff --git a/core/dbt/semver.py b/core/dbt/semver.py index 7f8913c3600..24f00b333a1 100644 --- a/core/dbt/semver.py +++ b/core/dbt/semver.py @@ -5,7 +5,7 @@ from packaging import version as packaging_version -from dbt.exceptions import VersionsNotCompatibleException +from dbt.exceptions import VersionsNotCompatibleError import dbt.utils from dbt.dataclass_schema import dbtClassMixin, StrEnum @@ -94,7 +94,7 @@ def from_version_string(cls, version_string): match = _VERSION_REGEX.match(version_string) if not match: - raise dbt.exceptions.SemverException( + raise dbt.exceptions.SemverError( f'"{version_string}" is not a valid semantic version.' ) @@ -222,7 +222,7 @@ def _try_combine_exact(self, a, b): if a.compare(b) == 0: return a else: - raise VersionsNotCompatibleException() + raise VersionsNotCompatibleError() def _try_combine_lower_bound_with_exact(self, lower, exact): comparison = lower.compare(exact) @@ -230,7 +230,7 @@ def _try_combine_lower_bound_with_exact(self, lower, exact): if comparison < 0 or (comparison == 0 and lower.matcher == Matchers.GREATER_THAN_OR_EQUAL): return exact - raise VersionsNotCompatibleException() + raise VersionsNotCompatibleError() def _try_combine_lower_bound(self, a, b): if b.is_unbounded: @@ -258,7 +258,7 @@ def _try_combine_upper_bound_with_exact(self, upper, exact): if comparison > 0 or (comparison == 0 and upper.matcher == Matchers.LESS_THAN_OR_EQUAL): return exact - raise VersionsNotCompatibleException() + raise VersionsNotCompatibleError() def _try_combine_upper_bound(self, a, b): if b.is_unbounded: @@ -291,7 +291,7 @@ def reduce(self, other): end = self._try_combine_upper_bound(self.end, other.end) if start.compare(end) > 0: - raise VersionsNotCompatibleException() + raise VersionsNotCompatibleError() return VersionRange(start=start, end=end) @@ -379,8 +379,8 @@ def reduce_versions(*args): for version_specifier in version_specifiers: to_return = to_return.reduce(version_specifier.to_range()) - except VersionsNotCompatibleException: - raise VersionsNotCompatibleException( + except VersionsNotCompatibleError: + raise VersionsNotCompatibleError( "Could not find a satisfactory version from options: {}".format([str(a) for a in args]) ) @@ -394,7 +394,7 @@ def versions_compatible(*args): try: reduce_versions(*args) return True - except VersionsNotCompatibleException: + except VersionsNotCompatibleError: return False diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index b7ababdd067..e13f963cc7b 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -16,10 +16,10 @@ RunningStatus, ) from dbt.exceptions import ( - NotImplementedException, - CompilationException, - RuntimeException, - InternalException, + NotImplementedError, + CompilationError, + DbtRuntimeError, + DbtInternalError, ) from dbt.logger import log_manager from dbt.events.functions import fire_event @@ -27,7 +27,7 @@ LogDbtProjectError, LogDbtProfileError, CatchableExceptionOnRun, - InternalExceptionOnRun, + InternalErrorOnRun, GenericExceptionOnRun, NodeConnectionReleaseError, LogDebugStackTrace, @@ -99,17 +99,17 @@ def from_args(cls, args): fire_event(LogDbtProjectError(exc=str(exc))) tracking.track_invalid_invocation(args=args, result_type=exc.result_type) - raise dbt.exceptions.RuntimeException("Could not run dbt") from exc + raise dbt.exceptions.DbtRuntimeError("Could not run dbt") from exc except dbt.exceptions.DbtProfileError as exc: all_profile_names = list(read_profiles(flags.PROFILES_DIR).keys()) fire_event(LogDbtProfileError(exc=str(exc), profiles=all_profile_names)) tracking.track_invalid_invocation(args=args, result_type=exc.result_type) - raise dbt.exceptions.RuntimeException("Could not run dbt") from exc + raise dbt.exceptions.DbtRuntimeError("Could not run dbt") from exc return cls(args, config) @abstractmethod def run(self): - raise dbt.exceptions.NotImplementedException("Not Implemented") + raise dbt.exceptions.NotImplementedError("Not Implemented") def interpret_results(self, results): return True @@ -123,7 +123,7 @@ def get_nearest_project_dir(args): if os.path.exists(project_file): return args.project_dir else: - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( "fatal: Invalid --project-dir flag. Not a dbt project. " "Missing dbt_project.yml file" ) @@ -137,7 +137,7 @@ def get_nearest_project_dir(args): return cwd cwd = os.path.dirname(cwd) - raise dbt.exceptions.RuntimeException( + raise dbt.exceptions.DbtRuntimeError( "fatal: Not a dbt project (or any of the parent directories). " "Missing dbt_project.yml file" ) @@ -328,7 +328,7 @@ def _handle_catchable_exception(self, e, ctx): return str(e) def _handle_internal_exception(self, e, ctx): - fire_event(InternalExceptionOnRun(build_path=self.node.build_path, exc=str(e))) + fire_event(InternalErrorOnRun(build_path=self.node.build_path, exc=str(e))) return str(e) def _handle_generic_exception(self, e, ctx): @@ -344,10 +344,10 @@ def _handle_generic_exception(self, e, ctx): return str(e) def handle_exception(self, e, ctx): - catchable_errors = (CompilationException, RuntimeException) + catchable_errors = (CompilationError, DbtRuntimeError) if isinstance(e, catchable_errors): error = self._handle_catchable_exception(e, ctx) - elif isinstance(e, InternalException): + elif isinstance(e, DbtInternalError): error = self._handle_internal_exception(e, ctx) else: error = self._handle_generic_exception(e, ctx) @@ -402,16 +402,16 @@ def _safe_release_connection(self): return None def before_execute(self): - raise NotImplementedException() + raise NotImplementedError() def execute(self, compiled_node, manifest): - raise NotImplementedException() + raise NotImplementedError() def run(self, compiled_node, manifest): return self.execute(compiled_node, manifest) def after_execute(self, result): - raise NotImplementedException() + raise NotImplementedError() def _skip_caused_by_ephemeral_failure(self): if self.skip_cause is None or self.skip_cause.node is None: @@ -437,7 +437,7 @@ def on_skip(self): ) print_run_result_error(result=self.skip_cause, newline=False) if self.skip_cause is None: # mypy appeasement - raise InternalException( + raise DbtInternalError( "Skip cause not set but skip was somehow caused by an ephemeral failure" ) # set an error so dbt will exit with an error code diff --git a/core/dbt/task/build.py b/core/dbt/task/build.py index aabc561bd7c..8a5dc39c9b7 100644 --- a/core/dbt/task/build.py +++ b/core/dbt/task/build.py @@ -5,7 +5,7 @@ from dbt.adapters.factory import get_adapter from dbt.contracts.results import NodeStatus -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.graph import ResourceTypeSelector from dbt.node_types import NodeType from dbt.task.test import TestSelector @@ -44,7 +44,7 @@ def resource_types(self): def get_node_selector(self) -> ResourceTypeSelector: if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get node selection") + raise DbtInternalError("manifest and graph must be set to get node selection") resource_types = self.resource_types @@ -66,7 +66,7 @@ def get_runner_type(self, node): def compile_manifest(self): if self.manifest is None: - raise InternalException("compile_manifest called before manifest was loaded") + raise DbtInternalError("compile_manifest called before manifest was loaded") adapter = get_adapter(self.config) compiler = adapter.get_compiler() self.graph = compiler.compile(self.manifest, add_test_edges=True) diff --git a/core/dbt/task/compile.py b/core/dbt/task/compile.py index 740d35d37e9..995063491f6 100644 --- a/core/dbt/task/compile.py +++ b/core/dbt/task/compile.py @@ -6,7 +6,7 @@ from dbt.contracts.graph.manifest import WritableManifest from dbt.contracts.results import RunStatus, RunResult -from dbt.exceptions import InternalException, RuntimeException +from dbt.exceptions import DbtInternalError, DbtRuntimeError from dbt.graph import ResourceTypeSelector from dbt.events.functions import fire_event from dbt.events.types import CompileComplete @@ -43,7 +43,7 @@ def raise_on_first_error(self): def get_node_selector(self) -> ResourceTypeSelector: if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return ResourceTypeSelector( graph=self.graph, manifest=self.manifest, @@ -63,12 +63,12 @@ def _get_deferred_manifest(self) -> Optional[WritableManifest]: state = self.previous_state if state is None: - raise RuntimeException( + raise DbtRuntimeError( "Received a --defer argument, but no value was provided to --state" ) if state.manifest is None: - raise RuntimeException(f'Could not find manifest in --state path: "{self.args.state}"') + raise DbtRuntimeError(f'Could not find manifest in --state path: "{self.args.state}"') return state.manifest def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): @@ -76,7 +76,7 @@ def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): if deferred_manifest is None: return if self.manifest is None: - raise InternalException( + raise DbtInternalError( "Expected to defer to manifest, but there is no runtime manifest to defer from!" ) self.manifest.merge_from_artifact( diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index c4898b779fa..819bc4164a3 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -15,7 +15,7 @@ SourceFreshnessResult, FreshnessStatus, ) -from dbt.exceptions import RuntimeException, InternalException +from dbt.exceptions import DbtRuntimeError, DbtInternalError from dbt.events.functions import fire_event from dbt.events.types import ( FreshnessCheckComplete, @@ -33,7 +33,7 @@ class FreshnessRunner(BaseRunner): def on_skip(self): - raise RuntimeException("Freshness: nodes cannot be skipped!") + raise DbtRuntimeError("Freshness: nodes cannot be skipped!") def before_execute(self): description = "freshness of {0.source_name}.{0.name}".format(self.node) @@ -100,7 +100,7 @@ def execute(self, compiled_node, manifest): # therefore loaded_at_field should be a str. If this invariant is # broken, raise! if compiled_node.loaded_at_field is None: - raise InternalException( + raise DbtInternalError( "Got to execute for source freshness of a source that has no loaded_at_field!" ) @@ -132,7 +132,7 @@ def execute(self, compiled_node, manifest): def compile(self, manifest): if self.node.resource_type != NodeType.Source: # should be unreachable... - raise RuntimeException("fresnhess runner: got a non-Source") + raise DbtRuntimeError("fresnhess runner: got a non-Source") # we don't do anything interesting when we compile a source node return self.node @@ -162,7 +162,7 @@ def raise_on_first_error(self): def get_node_selector(self): if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return FreshnessSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/task/generate.py b/core/dbt/task/generate.py index 87723a530a1..19fa4c1bde9 100644 --- a/core/dbt/task/generate.py +++ b/core/dbt/task/generate.py @@ -22,7 +22,7 @@ ColumnMetadata, CatalogArtifact, ) -from dbt.exceptions import InternalException, AmbiguousCatalogMatch +from dbt.exceptions import DbtInternalError, AmbiguousCatalogMatchError from dbt.include.global_project import DOCS_INDEX_FILE_PATH from dbt.events.functions import fire_event from dbt.events.types import ( @@ -81,7 +81,7 @@ def get_table(self, data: PrimitiveDict) -> CatalogTable: str(data["table_name"]), ) except KeyError as exc: - raise dbt.exceptions.CompilationException( + raise dbt.exceptions.CompilationError( "Catalog information missing required key {} (got {})".format(exc, data) ) table: CatalogTable @@ -119,7 +119,7 @@ def make_unique_id_map( unique_ids = source_map.get(table.key(), set()) for unique_id in unique_ids: if unique_id in sources: - raise AmbiguousCatalogMatch( + raise AmbiguousCatalogMatchError( unique_id, sources[unique_id].to_dict(omit_none=True), table.to_dict(omit_none=True), @@ -201,7 +201,7 @@ def get_unique_id_mapping( class GenerateTask(CompileTask): def _get_manifest(self) -> Manifest: if self.manifest is None: - raise InternalException("manifest should not be None in _get_manifest") + raise DbtInternalError("manifest should not be None in _get_manifest") return self.manifest def run(self) -> CatalogArtifact: @@ -232,7 +232,7 @@ def run(self) -> CatalogArtifact: shutil.copytree(asset_path, to_asset_path) if self.manifest is None: - raise InternalException("self.manifest was None in run!") + raise DbtInternalError("self.manifest was None in run!") adapter = get_adapter(self.config) with adapter.connection_named("generate_catalog"): diff --git a/core/dbt/task/init.py b/core/dbt/task/init.py index b1769d2e729..f3a7dd28e75 100644 --- a/core/dbt/task/init.py +++ b/core/dbt/task/init.py @@ -252,7 +252,7 @@ def run(self): try: move_to_nearest_project_dir(self.args) in_project = True - except dbt.exceptions.RuntimeException: + except dbt.exceptions.DbtRuntimeError: in_project = False if in_project: diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index 49fb07b359a..fa8d3ccd8d2 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -7,7 +7,7 @@ from dbt.node_types import NodeType from dbt.events.functions import warn_or_error from dbt.events.types import NoNodesSelected -from dbt.exceptions import RuntimeException, InternalException +from dbt.exceptions import DbtRuntimeError, DbtInternalError from dbt.logger import log_manager from dbt.events.eventmgr import EventLevel @@ -44,9 +44,9 @@ def __init__(self, args, config): super().__init__(args, config) if self.args.models: if self.args.select: - raise RuntimeException('"models" and "select" are mutually exclusive arguments') + raise DbtRuntimeError('"models" and "select" are mutually exclusive arguments') if self.args.resource_types: - raise RuntimeException( + raise DbtRuntimeError( '"models" and "resource_type" are mutually exclusive ' "arguments" ) @@ -72,7 +72,7 @@ def _iterate_selected_nodes(self): warn_or_error(NoNodesSelected()) return if self.manifest is None: - raise InternalException("manifest is None in _iterate_selected_nodes") + raise DbtInternalError("manifest is None in _iterate_selected_nodes") for node in nodes: if node in self.manifest.nodes: yield self.manifest.nodes[node] @@ -83,7 +83,7 @@ def _iterate_selected_nodes(self): elif node in self.manifest.metrics: yield self.manifest.metrics[node] else: - raise RuntimeException( + raise DbtRuntimeError( f'Got an unexpected result from node selection: "{node}"' f"Expected a source or a node!" ) @@ -143,7 +143,7 @@ def run(self): elif output == "path": generator = self.generate_paths else: - raise InternalException("Invalid output {}".format(output)) + raise DbtInternalError("Invalid output {}".format(output)) return self.output_results(generator()) @@ -185,7 +185,7 @@ def defer_to_manifest(self, adapter, selected_uids): def get_node_selector(self): if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") if self.resource_types == [NodeType.Test]: return TestSelector( graph=self.graph, diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index ff468f4dd41..145225be9d5 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -21,11 +21,11 @@ from dbt.contracts.graph.nodes import HookNode, ResultNode from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus, BaseResult from dbt.exceptions import ( - CompilationException, - InternalException, - MissingMaterialization, - RuntimeException, - ValidationException, + CompilationError, + DbtInternalError, + MissingMaterializationError, + DbtRuntimeError, + DbtValidationError, ) from dbt.events.functions import fire_event, get_invocation_id from dbt.events.types import ( @@ -106,7 +106,7 @@ def get_hook(source, index): def track_model_run(index, num_nodes, run_model_result): if tracking.active_user is None: - raise InternalException("cannot track model run with no active user") + raise DbtInternalError("cannot track model run with no active user") invocation_id = get_invocation_id() tracking.track_model_run( { @@ -135,14 +135,14 @@ def _validate_materialization_relations_dict(inp: Dict[Any, Any], model) -> List 'Invalid return value from materialization, "relations" ' "not found, got keys: {}".format(list(inp)) ) - raise CompilationException(msg, node=model) from None + raise CompilationError(msg, node=model) from None if not isinstance(relations_value, list): msg = ( 'Invalid return value from materialization, "relations" ' "not a list, got: {}".format(relations_value) ) - raise CompilationException(msg, node=model) from None + raise CompilationError(msg, node=model) from None relations: List[BaseRelation] = [] for relation in relations_value: @@ -151,7 +151,7 @@ def _validate_materialization_relations_dict(inp: Dict[Any, Any], model) -> List "Invalid return value from materialization, " '"relations" contains non-Relation: {}'.format(relation) ) - raise CompilationException(msg, node=model) + raise CompilationError(msg, node=model) assert isinstance(relation, BaseRelation) relations.append(relation) @@ -213,7 +213,7 @@ def after_execute(self, result): def _build_run_model_result(self, model, context): result = context["load_result"]("main") if not result: - raise RuntimeException("main is not being called during running model") + raise DbtRuntimeError("main is not being called during running model") adapter_response = {} if isinstance(result.response, dbtClassMixin): adapter_response = result.response.to_dict(omit_none=True) @@ -234,7 +234,7 @@ def _materialization_relations(self, result: Any, model) -> List[BaseRelation]: 'The materialization ("{}") did not explicitly return a ' "list of relations to add to the cache.".format(str(model.get_materialization())) ) - raise CompilationException(msg, node=model) + raise CompilationError(msg, node=model) if isinstance(result, dict): return _validate_materialization_relations_dict(result, model) @@ -243,7 +243,7 @@ def _materialization_relations(self, result: Any, model) -> List[BaseRelation]: "Invalid return value from materialization, expected a dict " 'with key "relations", got: {}'.format(str(result)) ) - raise CompilationException(msg, node=model) + raise CompilationError(msg, node=model) def execute(self, model, manifest): context = generate_runtime_model_context(model, self.config, manifest) @@ -253,12 +253,12 @@ def execute(self, model, manifest): ) if materialization_macro is None: - raise MissingMaterialization( + raise MissingMaterializationError( materialization=model.get_materialization(), adapter_type=self.adapter.type() ) if "config" not in context: - raise InternalException( + raise DbtInternalError( "Invalid materialization context generated, missing config: {}".format(context) ) context_config = context["config"] @@ -267,7 +267,7 @@ def execute(self, model, manifest): model_lang_supported = model.language in materialization_macro.supported_languages if mat_has_supported_langs and not model_lang_supported: str_langs = [str(lang) for lang in materialization_macro.supported_languages] - raise ValidationException( + raise DbtValidationError( f'Materialization "{materialization_macro.name}" only supports languages {str_langs}; ' f'got "{model.language}"' ) @@ -315,7 +315,7 @@ def _hook_keyfunc(self, hook: HookNode) -> Tuple[str, Optional[int]]: def get_hooks_by_type(self, hook_type: RunHookType) -> List[HookNode]: if self.manifest is None: - raise InternalException("self.manifest was None in get_hooks_by_type") + raise DbtInternalError("self.manifest was None in get_hooks_by_type") nodes = self.manifest.nodes.values() # find all hooks defined in the manifest (could be multiple projects) @@ -395,7 +395,7 @@ def safe_run_hooks( ) -> None: try: self.run_hooks(adapter, hook_type, extra_context) - except RuntimeException as exc: + except DbtRuntimeError as exc: fire_event(DatabaseErrorRunningHook(hook_type=hook_type.value)) self.node_results.append( BaseResult( @@ -457,7 +457,7 @@ def after_run(self, adapter, results): def get_node_selector(self) -> ResourceTypeSelector: if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return ResourceTypeSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/task/run_operation.py b/core/dbt/task/run_operation.py index e510c70c37d..63384f1c21f 100644 --- a/core/dbt/task/run_operation.py +++ b/core/dbt/task/run_operation.py @@ -10,7 +10,7 @@ from dbt.adapters.factory import get_adapter from dbt.config.utils import parse_cli_vars from dbt.contracts.results import RunOperationResultsArtifact -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.events.functions import fire_event from dbt.events.types import ( RunningOperationCaughtError, @@ -34,7 +34,7 @@ def _get_kwargs(self) -> Dict[str, Any]: def compile_manifest(self) -> None: if self.manifest is None: - raise InternalException("manifest was None in compile_manifest") + raise DbtInternalError("manifest was None in compile_manifest") def _run_unsafe(self) -> agate.Table: adapter = get_adapter(self.config) diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 7143c286675..58504332ad2 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -44,10 +44,10 @@ from dbt.contracts.results import NodeStatus, RunExecutionResult, RunningStatus from dbt.contracts.state import PreviousState from dbt.exceptions import ( - InternalException, - NotImplementedException, - RuntimeException, - FailFastException, + DbtInternalError, + NotImplementedError, + DbtRuntimeError, + FailFastError, ) from dbt.graph import GraphQueue, NodeSelector, SelectionSpec, parse_difference, Graph @@ -83,7 +83,7 @@ def load_manifest(self): def compile_manifest(self): if self.manifest is None: - raise InternalException("compile_manifest called before manifest was loaded") + raise DbtInternalError("compile_manifest called before manifest was loaded") # we cannot get adapter in init since it will break rpc #5579 adapter = get_adapter(self.config) @@ -150,7 +150,7 @@ def get_selection_spec(self) -> SelectionSpec: @abstractmethod def get_node_selector(self) -> NodeSelector: - raise NotImplementedException(f"get_node_selector not implemented for task {type(self)}") + raise NotImplementedError(f"get_node_selector not implemented for task {type(self)}") @abstractmethod def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): @@ -164,7 +164,7 @@ def get_graph_queue(self) -> GraphQueue: def _runtime_initialize(self): super()._runtime_initialize() if self.manifest is None or self.graph is None: - raise InternalException("_runtime_initialize never loaded the manifest and graph!") + raise DbtInternalError("_runtime_initialize never loaded the manifest and graph!") self.job_queue = self.get_graph_queue() @@ -176,7 +176,7 @@ def _runtime_initialize(self): elif uid in self.manifest.sources: self._flattened_nodes.append(self.manifest.sources[uid]) else: - raise InternalException( + raise DbtInternalError( f"Node selection returned {uid}, expected a node or a source" ) @@ -186,7 +186,7 @@ def raise_on_first_error(self): return False def get_runner_type(self, node): - raise NotImplementedException("Not Implemented") + raise NotImplementedError("Not Implemented") def result_path(self): return os.path.join(self.config.target_path, RESULT_FILE_NAME) @@ -246,7 +246,7 @@ def call_runner(self, runner): fail_fast = flags.FAIL_FAST if result.status in (NodeStatus.Error, NodeStatus.Fail) and fail_fast: - self._raise_next_tick = FailFastException( + self._raise_next_tick = FailFastError( msg="Failing early due to test failure or runtime error", result=result, node=getattr(result, "node", None), @@ -255,7 +255,7 @@ def call_runner(self, runner): # if we raise inside a thread, it'll just get silently swallowed. # stash the error message we want here, and it will check the # next 'tick' - should be soon since our thread is about to finish! - self._raise_next_tick = RuntimeException(result.message) + self._raise_next_tick = DbtRuntimeError(result.message) return result @@ -280,7 +280,7 @@ def _raise_set_error(self): def run_queue(self, pool): """Given a pool, submit jobs from the queue to the pool.""" if self.job_queue is None: - raise InternalException("Got to run_queue with no job queue set") + raise DbtInternalError("Got to run_queue with no job queue set") def callback(result): """Note: mark_done, at a minimum, must happen here or dbt will @@ -289,7 +289,7 @@ def callback(result): self._handle_result(result) if self.job_queue is None: - raise InternalException("Got to run_queue callback with no job queue set") + raise DbtInternalError("Got to run_queue callback with no job queue set") self.job_queue.mark_done(result.node.unique_id) while not self.job_queue.empty(): @@ -331,7 +331,7 @@ def _handle_result(self, result): node = result.node if self.manifest is None: - raise InternalException("manifest was None in _handle_result") + raise DbtInternalError("manifest was None in _handle_result") if isinstance(node, SourceDefinition): self.manifest.update_source(node) @@ -387,7 +387,7 @@ def execute_nodes(self): try: self.run_queue(pool) - except FailFastException as failure: + except FailFastError as failure: self._cancel_connections(pool) print_run_result_error(failure.result) raise @@ -404,7 +404,7 @@ def execute_nodes(self): def _mark_dependent_errors(self, node_id, result, cause): if self.graph is None: - raise InternalException("graph is None in _mark_dependent_errors") + raise DbtInternalError("graph is None in _mark_dependent_errors") for dep_node_id in self.graph.get_dependent_nodes(node_id): self._skipped_children[dep_node_id] = cause @@ -458,7 +458,7 @@ def run(self): self._runtime_initialize() if self._flattened_nodes is None: - raise InternalException("after _runtime_initialize, _flattened_nodes was still None") + raise DbtInternalError("after _runtime_initialize, _flattened_nodes was still None") if len(self._flattened_nodes) == 0: with TextOnly(): @@ -514,7 +514,7 @@ def interpret_results(cls, results): def get_model_schemas(self, adapter, selected_uids: Iterable[str]) -> Set[BaseRelation]: if self.manifest is None: - raise InternalException("manifest was None in get_model_schemas") + raise DbtInternalError("manifest was None in get_model_schemas") result: Set[BaseRelation] = set() for node in self.manifest.nodes.values(): diff --git a/core/dbt/task/seed.py b/core/dbt/task/seed.py index 564a55b1577..58b6aa25bda 100644 --- a/core/dbt/task/seed.py +++ b/core/dbt/task/seed.py @@ -6,7 +6,7 @@ ) from dbt.contracts.results import RunStatus -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.graph import ResourceTypeSelector from dbt.logger import TextOnly from dbt.events.functions import fire_event @@ -73,7 +73,7 @@ def raise_on_first_error(self): def get_node_selector(self): if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return ResourceTypeSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/task/snapshot.py b/core/dbt/task/snapshot.py index 8de99864b96..f5e8a549bb2 100644 --- a/core/dbt/task/snapshot.py +++ b/core/dbt/task/snapshot.py @@ -1,6 +1,6 @@ from .run import ModelRunner, RunTask -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.events.functions import fire_event from dbt.events.base_types import EventLevel from dbt.events.types import LogSnapshotResult @@ -37,7 +37,7 @@ def raise_on_first_error(self): def get_node_selector(self): if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return ResourceTypeSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/task/sql.py b/core/dbt/task/sql.py index 4a267bd91bf..4f662383d74 100644 --- a/core/dbt/task/sql.py +++ b/core/dbt/task/sql.py @@ -25,7 +25,7 @@ def __init__(self, config, adapter, node, node_index, num_nodes): def handle_exception(self, e, ctx): fire_event(SQLRunnerException(exc=str(e), exc_info=traceback.format_exc())) if isinstance(e, dbt.exceptions.Exception): - if isinstance(e, dbt.exceptions.RuntimeException): + if isinstance(e, dbt.exceptions.DbtRuntimeError): e.add_node(ctx.node) return e @@ -51,7 +51,7 @@ def error_result(self, node, error, start_time, timing_info): raise error def ephemeral_result(self, node, start_time, timing_info): - raise dbt.exceptions.NotImplementedException("cannot execute ephemeral nodes remotely!") + raise dbt.exceptions.NotImplementedError("cannot execute ephemeral nodes remotely!") class SqlCompileRunner(GenericSqlRunner[RemoteCompileResult]): diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index b55eed940ac..48422b5e726 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -22,9 +22,9 @@ LogStartLine, ) from dbt.exceptions import ( - InternalException, - InvalidBoolean, - MissingMaterialization, + DbtInternalError, + BooleanError, + MissingMaterializationError, ) from dbt.graph import ( ResourceTypeSelector, @@ -51,7 +51,7 @@ def convert_bool_type(field) -> bool: try: return bool(strtobool(field)) # type: ignore except ValueError: - raise InvalidBoolean(field, "get_test_sql") + raise BooleanError(field, "get_test_sql") # need this so we catch both true bools and 0/1 return bool(field) @@ -101,10 +101,10 @@ def execute_test( ) if materialization_macro is None: - raise MissingMaterialization(materialization=test.get_materialization(), adapter_type=self.adapter.type()) + raise MissingMaterializationError(materialization=test.get_materialization(), adapter_type=self.adapter.type()) if "config" not in context: - raise InternalException( + raise DbtInternalError( "Invalid materialization context generated, missing config: {}".format(context) ) @@ -118,14 +118,14 @@ def execute_test( table = result["table"] num_rows = len(table.rows) if num_rows != 1: - raise InternalException( + raise DbtInternalError( f"dbt internally failed to execute {test.unique_id}: " f"Returned {num_rows} rows, but expected " f"1 row" ) num_cols = len(table.columns) if num_cols != 3: - raise InternalException( + raise DbtInternalError( f"dbt internally failed to execute {test.unique_id}: " f"Returned {num_cols} columns, but expected " f"3 columns" @@ -203,7 +203,7 @@ def raise_on_first_error(self): def get_node_selector(self) -> TestSelector: if self.manifest is None or self.graph is None: - raise InternalException("manifest and graph must be set to get perform node selection") + raise DbtInternalError("manifest and graph must be set to get perform node selection") return TestSelector( graph=self.graph, manifest=self.manifest, diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index 2d7ae5ded67..9fb34ff59a4 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -6,7 +6,7 @@ import warnings import yaml -from dbt.exceptions import CompilationException, DatabaseException +from dbt.exceptions import CompilationError, DbtDatabaseError import dbt.flags as flags from dbt.config.runtime import RuntimeConfig from dbt.adapters.factory import get_adapter, register_adapter, reset_adapters, get_adapter_by_type @@ -494,10 +494,10 @@ def project( # a `load_dependencies` method. # Macros gets executed as part of drop_scheme in core/dbt/adapters/sql/impl.py. When # the macros have errors (which is what we're actually testing for...) they end up - # throwing CompilationExceptions or DatabaseExceptions + # throwing CompilationErrorss or DatabaseErrors try: project.drop_test_schema() - except (KeyError, AttributeError, CompilationException, DatabaseException): + except (KeyError, AttributeError, CompilationError, DbtDatabaseError): pass os.chdir(orig_cwd) cleanup_event_logger() diff --git a/core/dbt/utils.py b/core/dbt/utils.py index 6afe9d1e26d..e9c4677130d 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -15,7 +15,7 @@ from pathlib import PosixPath, WindowsPath from contextlib import contextmanager -from dbt.exceptions import ConnectionException, DuplicateAlias +from dbt.exceptions import ConnectionError, DuplicateAliasError from dbt.events.functions import fire_event from dbt.events.types import RetryExternalCall, RecordRetryException from dbt import flags @@ -92,13 +92,13 @@ def get_model_name_or_none(model): def get_dbt_macro_name(name): if name is None: - raise dbt.exceptions.InternalException("Got None for a macro name!") + raise dbt.exceptions.DbtInternalError("Got None for a macro name!") return f"{MACRO_PREFIX}{name}" def get_dbt_docs_name(name): if name is None: - raise dbt.exceptions.InternalException("Got None for a doc name!") + raise dbt.exceptions.DbtInternalError("Got None for a doc name!") return f"{DOCS_PREFIX}{name}" @@ -228,7 +228,7 @@ def deep_map_render(func: Callable[[Any, Tuple[Union[str, int], ...]], Any], val return _deep_map_render(func, value, ()) except RuntimeError as exc: if "maximum recursion depth exceeded" in str(exc): - raise dbt.exceptions.RecursionException("Cycle detected in deep_map_render") + raise dbt.exceptions.RecursionError("Cycle detected in deep_map_render") raise @@ -365,7 +365,7 @@ def translate_mapping(self, kwargs: Mapping[str, Any]) -> Dict[str, Any]: for key, value in kwargs.items(): canonical_key = self.aliases.get(key, key) if canonical_key in result: - raise DuplicateAlias(kwargs, self.aliases, canonical_key) + raise DuplicateAliasError(kwargs, self.aliases, canonical_key) result[canonical_key] = self.translate_value(value) return result @@ -385,7 +385,7 @@ def translate(self, value: Mapping[str, Any]) -> Dict[str, Any]: return self.translate_mapping(value) except RuntimeError as exc: if "maximum recursion depth exceeded" in str(exc): - raise dbt.exceptions.RecursionException( + raise dbt.exceptions.RecursionError( "Cycle detected in a value passed to translate!" ) raise @@ -403,7 +403,7 @@ def translate_aliases( :returns: A dict containing all the values in kwargs referenced by their canonical key. - :raises: `AliasException`, if a canonical key is defined more than once. + :raises: `AliasError`, if a canonical key is defined more than once. """ translator = Translator(aliases, recurse) return translator.translate(kwargs) @@ -624,7 +624,7 @@ def _connection_exception_retry(fn, max_attempts: int, attempt: int = 0): time.sleep(1) return _connection_exception_retry(fn, max_attempts, attempt + 1) else: - raise ConnectionException("External connection exception occurred: " + str(exc)) + raise ConnectionError("External connection exception occurred: " + str(exc)) # This is used to serialize the args in the run_results and in the logs. diff --git a/plugins/postgres/dbt/adapters/postgres/connections.py b/plugins/postgres/dbt/adapters/postgres/connections.py index df24b0f9118..afa74a46339 100644 --- a/plugins/postgres/dbt/adapters/postgres/connections.py +++ b/plugins/postgres/dbt/adapters/postgres/connections.py @@ -73,19 +73,19 @@ def exception_handler(self, sql): logger.debug("Failed to release connection!") pass - raise dbt.exceptions.DatabaseException(str(e).strip()) from e + raise dbt.exceptions.DbtDatabaseError(str(e).strip()) from e except Exception as e: logger.debug("Error running SQL: {}", sql) logger.debug("Rolling back transaction.") self.rollback_if_open() - if isinstance(e, dbt.exceptions.RuntimeException): + if isinstance(e, dbt.exceptions.DbtRuntimeError): # during a sql query, an internal to dbt exception was raised. # this sounds a lot like a signal handler and probably has # useful information, so raise it without modification. raise - raise dbt.exceptions.RuntimeException(e) from e + raise dbt.exceptions.DbtRuntimeError(e) from e @classmethod def open(cls, connection): diff --git a/plugins/postgres/dbt/adapters/postgres/impl.py b/plugins/postgres/dbt/adapters/postgres/impl.py index 78b86234eae..9a5d5d3f8f6 100644 --- a/plugins/postgres/dbt/adapters/postgres/impl.py +++ b/plugins/postgres/dbt/adapters/postgres/impl.py @@ -9,11 +9,11 @@ from dbt.adapters.postgres import PostgresRelation from dbt.dataclass_schema import dbtClassMixin, ValidationError from dbt.exceptions import ( - CrossDbReferenceProhibited, - IndexConfigNotDict, - InvalidIndexConfig, - RuntimeException, - UnexpectedDbReference, + CrossDbReferenceProhibitedError, + IndexConfigNotDictError, + IndexConfigError, + DbtRuntimeError, + UnexpectedDbReferenceError, ) import dbt.utils @@ -46,9 +46,9 @@ def parse(cls, raw_index) -> Optional["PostgresIndexConfig"]: cls.validate(raw_index) return cls.from_dict(raw_index) except ValidationError as exc: - raise InvalidIndexConfig(exc) + raise IndexConfigError(exc) except TypeError: - raise IndexConfigNotDict(raw_index) + raise IndexConfigNotDictError(raw_index) @dataclass @@ -74,7 +74,7 @@ def verify_database(self, database): database = database.strip('"') expected = self.config.credentials.database if database.lower() != expected.lower(): - raise UnexpectedDbReference(self.type(), database, expected) + raise UnexpectedDbReferenceError(self.type(), database, expected) # return an empty string on success so macros can call this return "" @@ -107,8 +107,8 @@ def _get_catalog_schemas(self, manifest): schemas = super()._get_catalog_schemas(manifest) try: return schemas.flatten() - except RuntimeException as exc: - raise CrossDbReferenceProhibited(self.type(), exc.msg) + except DbtRuntimeError as exc: + raise CrossDbReferenceProhibitedError(self.type(), exc.msg) def _link_cached_relations(self, manifest): schemas: Set[str] = set() diff --git a/plugins/postgres/dbt/adapters/postgres/relation.py b/plugins/postgres/dbt/adapters/postgres/relation.py index 0f3296c1818..43c8c724a74 100644 --- a/plugins/postgres/dbt/adapters/postgres/relation.py +++ b/plugins/postgres/dbt/adapters/postgres/relation.py @@ -1,7 +1,7 @@ from dbt.adapters.base import Column from dataclasses import dataclass from dbt.adapters.base.relation import BaseRelation -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError @dataclass(frozen=True, eq=False, repr=False) @@ -14,7 +14,7 @@ def __post_init__(self): and self.type is not None and len(self.identifier) > self.relation_max_name_length() ): - raise RuntimeException( + raise DbtRuntimeError( f"Relation name '{self.identifier}' " f"is longer than {self.relation_max_name_length()} characters" ) diff --git a/test/integration/035_docs_blocks_tests/test_docs_blocks.py b/test/integration/035_docs_blocks_tests/test_docs_blocks.py index dacddf394f9..f37c8e677ac 100644 --- a/test/integration/035_docs_blocks_tests/test_docs_blocks.py +++ b/test/integration/035_docs_blocks_tests/test_docs_blocks.py @@ -122,7 +122,7 @@ def test_postgres_alternative_docs_path(self): @use_profile('postgres') def test_postgres_alternative_docs_path_missing(self): self.use_default_project({"docs-paths": [self.dir("not-docs")]}) - with self.assertRaises(dbt.exceptions.CompilationException): + with self.assertRaises(dbt.exceptions.CompilationError): self.run_dbt() @@ -142,7 +142,7 @@ def models(self): @use_profile('postgres') def test_postgres_missing_doc_ref(self): # The run should fail since we could not find the docs reference. - with self.assertRaises(dbt.exceptions.CompilationException): + with self.assertRaises(dbt.exceptions.CompilationError): self.run_dbt() @@ -162,7 +162,7 @@ def models(self): @use_profile('postgres') def test_postgres_invalid_doc_ref(self): # The run should fail since we could not find the docs reference. - with self.assertRaises(dbt.exceptions.CompilationException): + with self.assertRaises(dbt.exceptions.CompilationError): self.run_dbt(expect_pass=False) class TestDuplicateDocsBlock(DBTIntegrationTest): @@ -180,5 +180,5 @@ def models(self): @use_profile('postgres') def test_postgres_duplicate_doc_ref(self): - with self.assertRaises(dbt.exceptions.CompilationException): + with self.assertRaises(dbt.exceptions.CompilationError): self.run_dbt(expect_pass=False) diff --git a/test/integration/062_defer_state_tests/test_defer_state.py b/test/integration/062_defer_state_tests/test_defer_state.py index d48d84aae46..593dc034036 100644 --- a/test/integration/062_defer_state_tests/test_defer_state.py +++ b/test/integration/062_defer_state_tests/test_defer_state.py @@ -80,7 +80,7 @@ def run_and_snapshot_defer(self): results = self.run_dbt(['snapshot']) # no state, snapshot fails - with pytest.raises(dbt.exceptions.RuntimeException): + with pytest.raises(dbt.exceptions.DbtRuntimeError): results = self.run_dbt(['snapshot', '--state', 'state', '--defer']) # copy files diff --git a/test/integration/062_defer_state_tests/test_modified_state.py b/test/integration/062_defer_state_tests/test_modified_state.py index 5f64cd66ae1..085faf11d5b 100644 --- a/test/integration/062_defer_state_tests/test_modified_state.py +++ b/test/integration/062_defer_state_tests/test_modified_state.py @@ -6,7 +6,7 @@ import pytest -from dbt.exceptions import CompilationException, IncompatibleSchemaException +from dbt.exceptions import CompilationError, IncompatibleSchemaError class TestModifiedState(DBTIntegrationTest): @@ -95,7 +95,7 @@ def test_postgres_changed_seed_contents_state(self): assert len(results) == 1 assert results[0] == 'test.seed' - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: self.run_dbt(['--warn-error', 'ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) assert '>1MB' in str(exc.value) @@ -206,6 +206,6 @@ def test_postgres_changed_exposure(self): @use_profile('postgres') def test_postgres_previous_version_manifest(self): # This tests that a different schema version in the file throws an error - with self.assertRaises(IncompatibleSchemaException) as exc: + with self.assertRaises(IncompatibleSchemaError) as exc: results = self.run_dbt(['ls', '-s', 'state:modified', '--state', './previous_state']) self.assertEqual(exc.CODE, 10014) diff --git a/test/integration/062_defer_state_tests/test_run_results_state.py b/test/integration/062_defer_state_tests/test_run_results_state.py index 4f59c6faa75..58215009ad7 100644 --- a/test/integration/062_defer_state_tests/test_run_results_state.py +++ b/test/integration/062_defer_state_tests/test_run_results_state.py @@ -6,8 +6,6 @@ import pytest -from dbt.exceptions import CompilationException - class TestRunResultsState(DBTIntegrationTest): @property diff --git a/test/integration/068_partial_parsing_tests/test_partial_parsing.py b/test/integration/068_partial_parsing_tests/test_partial_parsing.py index fce32b42cf1..d411a738602 100644 --- a/test/integration/068_partial_parsing_tests/test_partial_parsing.py +++ b/test/integration/068_partial_parsing_tests/test_partial_parsing.py @@ -1,4 +1,4 @@ -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError from dbt.contracts.graph.manifest import Manifest from dbt.contracts.files import ParseFileType from dbt.contracts.results import TestStatus @@ -144,7 +144,7 @@ def test_postgres_pp_models(self): # referred to in schema file self.copy_file('test-files/models-schema2.yml', 'models/schema.yml') self.rm_file('models/model_three.sql') - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) # Put model back again @@ -212,7 +212,7 @@ def test_postgres_pp_models(self): # Remove the macro self.rm_file('macros/my_macro.sql') - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) # put back macro file, got back to schema file with no macro @@ -310,7 +310,7 @@ def test_postgres_pp_sources(self): # remove sources schema file self.rm_file(normalize('models/sources.yml')) - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): results = self.run_dbt(["--partial-parse", "run"]) # put back sources and add an exposures file @@ -319,7 +319,7 @@ def test_postgres_pp_sources(self): # remove seed referenced in exposures file self.rm_file(normalize('seeds/raw_customers.csv')) - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): results = self.run_dbt(["--partial-parse", "run"]) # put back seed and remove depends_on from exposure @@ -333,7 +333,7 @@ def test_postgres_pp_sources(self): # Change seed name to wrong name self.copy_file('test-files/schema-sources5.yml', 'models/sources.yml') - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) # Put back seed name to right name diff --git a/test/integration/068_partial_parsing_tests/test_pp_metrics.py b/test/integration/068_partial_parsing_tests/test_pp_metrics.py index b9cbc69e3aa..5debe6d2b85 100644 --- a/test/integration/068_partial_parsing_tests/test_pp_metrics.py +++ b/test/integration/068_partial_parsing_tests/test_pp_metrics.py @@ -1,4 +1,4 @@ -from dbt.exceptions import CompilationException, UndefinedMacroException +from dbt.exceptions import CompilationError from dbt.contracts.graph.manifest import Manifest from dbt.contracts.files import ParseFileType from dbt.contracts.results import TestStatus @@ -99,8 +99,8 @@ def test_postgres_metrics(self): # Then delete a metric self.copy_file('test-files/people_metrics3.yml', 'models/people_metrics.yml') - with self.assertRaises(CompilationException): - # We use "parse" here and not "run" because we're checking that the CompilationException + with self.assertRaises(CompilationError): + # We use "parse" here and not "run" because we're checking that the CompilationError # occurs at parse time, not compilation results = self.run_dbt(["parse"]) diff --git a/test/integration/068_partial_parsing_tests/test_pp_vars.py b/test/integration/068_partial_parsing_tests/test_pp_vars.py index e5f0752f6a9..a73bfc43fa3 100644 --- a/test/integration/068_partial_parsing_tests/test_pp_vars.py +++ b/test/integration/068_partial_parsing_tests/test_pp_vars.py @@ -1,4 +1,4 @@ -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import ParsingError from dbt.constants import SECRET_ENV_PREFIX from dbt.contracts.graph.manifest import Manifest from dbt.contracts.files import ParseFileType @@ -58,7 +58,7 @@ def test_postgres_env_vars_models(self): # copy a file with an env_var call without an env_var self.copy_file('test-files/env_var_model.sql', 'models/env_var_model.sql') - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): results = self.run_dbt(["--partial-parse", "run"]) # set the env var @@ -84,7 +84,7 @@ def test_postgres_env_vars_models(self): # set an env_var in a schema file self.copy_file('test-files/env_var_schema.yml', 'models/schema.yml') self.copy_file('test-files/env_var_model_one.sql', 'models/model_one.sql') - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): results = self.run_dbt(["--partial-parse", "run"]) # actually set the env_var @@ -139,7 +139,7 @@ def test_postgres_env_vars_models(self): # Delete database env var del os.environ['ENV_VAR_DATABASE'] - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): results = self.run_dbt(["--partial-parse", "run"]) os.environ['ENV_VAR_DATABASE'] = 'test_dbt' @@ -149,7 +149,7 @@ def test_postgres_env_vars_models(self): results = self.run_dbt(["--partial-parse", "run"]) # Add source test using test_color and an env_var for color self.copy_file('test-files/env_var_schema2.yml', 'models/schema.yml') - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): results = self.run_dbt(["--partial-parse", "run"]) os.environ['ENV_VAR_COLOR'] = 'green' results = self.run_dbt(["--partial-parse", "run"]) diff --git a/test/unit/test_adapter_connection_manager.py b/test/unit/test_adapter_connection_manager.py index 47db6b67ab0..b270f6a5d19 100644 --- a/test/unit/test_adapter_connection_manager.py +++ b/test/unit/test_adapter_connection_manager.py @@ -64,7 +64,7 @@ def test_retry_connection_fails_unhandled(self): * The Connection state should be "fail" and the handle None. * The resulting attempt count should be 1 as we are not explicitly configured to handle a ValueError. - * retry_connection should raise a FailedToConnectException with the Exception message. + * retry_connection should raise a FailedToConnectError with the Exception message. """ conn = self.postgres_connection attempts = 0 @@ -75,7 +75,7 @@ def connect(): raise ValueError("Something went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "Something went horribly wrong" + dbt.exceptions.FailedToConnectError, "Something went horribly wrong" ): BaseConnectionManager.retry_connection( @@ -99,7 +99,7 @@ def test_retry_connection_fails_handled(self): As a result: * The Connection state should be "fail" and the handle None. * The resulting attempt count should be 2 as we are configured to handle a ValueError. - * retry_connection should raise a FailedToConnectException with the Exception message. + * retry_connection should raise a FailedToConnectError with the Exception message. """ conn = self.postgres_connection attempts = 0 @@ -110,7 +110,7 @@ def connect(): raise ValueError("Something went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "Something went horribly wrong" + dbt.exceptions.FailedToConnectError, "Something went horribly wrong" ): BaseConnectionManager.retry_connection( @@ -173,7 +173,7 @@ def test_retry_connection_attempts(self): * The Connection state should be "fail" and the handle None, as connect never returns. * The resulting attempt count should be 11 as we are configured to handle a ValueError. - * retry_connection should raise a FailedToConnectException with the Exception message. + * retry_connection should raise a FailedToConnectError with the Exception message. """ conn = self.postgres_connection attempts = 0 @@ -185,7 +185,7 @@ def connect(): raise ValueError("Something went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "Something went horribly wrong" + dbt.exceptions.FailedToConnectError, "Something went horribly wrong" ): BaseConnectionManager.retry_connection( conn, @@ -208,7 +208,7 @@ def test_retry_connection_fails_handling_all_exceptions(self): * The Connection state should be "fail" and the handle None, as connect never returns. * The resulting attempt count should be 11 as we are configured to handle all Exceptions. - * retry_connection should raise a FailedToConnectException with the Exception message. + * retry_connection should raise a FailedToConnectError with the Exception message. """ conn = self.postgres_connection attempts = 0 @@ -220,7 +220,7 @@ def connect(): raise TypeError("An unhandled thing went horribly wrong") with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "An unhandled thing went horribly wrong" + dbt.exceptions.FailedToConnectError, "An unhandled thing went horribly wrong" ): BaseConnectionManager.retry_connection( conn, @@ -338,7 +338,7 @@ def connect(): return True with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, "retry_limit cannot be negative" + dbt.exceptions.FailedToConnectError, "retry_limit cannot be negative" ): BaseConnectionManager.retry_connection( conn, @@ -365,7 +365,7 @@ def connect(): for retry_timeout in [-10, -2.5, lambda _: -100, lambda _: -10.1]: with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, + dbt.exceptions.FailedToConnectError, "retry_timeout cannot be negative or return a negative time", ): BaseConnectionManager.retry_connection( @@ -392,7 +392,7 @@ def connect(): return True with self.assertRaisesRegex( - dbt.exceptions.FailedToConnectException, + dbt.exceptions.FailedToConnectError, "retry_limit cannot be negative", ): BaseConnectionManager.retry_connection( diff --git a/test/unit/test_cache.py b/test/unit/test_cache.py index f69b4783ee1..3f9c6e4f6bf 100644 --- a/test/unit/test_cache.py +++ b/test/unit/test_cache.py @@ -121,7 +121,7 @@ def test_dest_exists_error(self): self.cache.add(bar) self.assert_relations_exist('DBT', 'schema', 'foo', 'bar') - with self.assertRaises(dbt.exceptions.InternalException): + with self.assertRaises(dbt.exceptions.DbtInternalError): self.cache.rename(foo, bar) self.assert_relations_exist('DBT', 'schema', 'foo', 'bar') diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 9cdc248b7ed..4c1707d28b9 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -928,7 +928,7 @@ def test_run_operation_task(self): def test_run_operation_task_with_bad_path(self): self.args.project_dir = 'bad_path' - with self.assertRaises(dbt.exceptions.RuntimeException): + with self.assertRaises(dbt.exceptions.DbtRuntimeError): new_task = RunOperationTask.from_args(self.args) diff --git a/test/unit/test_context.py b/test/unit/test_context.py index a567e032f55..34c8562402f 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -89,7 +89,7 @@ def test_var_not_defined(self): var = providers.RuntimeVar(self.context, self.config, self.model) self.assertEqual(var("foo", "bar"), "bar") - with self.assertRaises(dbt.exceptions.CompilationException): + with self.assertRaises(dbt.exceptions.CompilationError): var("foo") def test_parser_var_default_something(self): @@ -464,7 +464,7 @@ def test_macro_namespace_duplicates(config_postgres, manifest_fx): mn.add_macros(manifest_fx.macros.values(), {}) # same pkg, same name: error - with pytest.raises(dbt.exceptions.CompilationException): + with pytest.raises(dbt.exceptions.CompilationError): mn.add_macro(mock_macro("macro_a", "root"), {}) # different pkg, same name: no error diff --git a/test/unit/test_core_dbt_utils.py b/test/unit/test_core_dbt_utils.py index 1deb8a77552..546e4f6ca00 100644 --- a/test/unit/test_core_dbt_utils.py +++ b/test/unit/test_core_dbt_utils.py @@ -2,7 +2,7 @@ import tarfile import unittest -from dbt.exceptions import ConnectionException +from dbt.exceptions import ConnectionError from dbt.utils import _connection_exception_retry as connection_exception_retry @@ -19,7 +19,7 @@ def test_connection_exception_retry_success_requests_exception(self): def test_connection_exception_retry_max(self): Counter._reset() - with self.assertRaises(ConnectionException): + with self.assertRaises(ConnectionError): connection_exception_retry(lambda: Counter._add_with_exception(), 5) self.assertEqual(6, counter) # 6 = original attempt plus 5 retries diff --git a/test/unit/test_deps.py b/test/unit/test_deps.py index 650722ef6f4..27c6f66e015 100644 --- a/test/unit/test_deps.py +++ b/test/unit/test_deps.py @@ -133,7 +133,7 @@ def test_resolve_fail(self): self.assertEqual(c.git, 'http://example.com') self.assertEqual(c.revisions, ['0.0.1', '0.0.2']) - with self.assertRaises(dbt.exceptions.DependencyException): + with self.assertRaises(dbt.exceptions.DependencyError): c.resolved() def test_default_revision(self): @@ -264,7 +264,7 @@ def test_resolve_missing_package(self): package='dbt-labs-test/b', version='0.1.2' )) - with self.assertRaises(dbt.exceptions.DependencyException) as exc: + with self.assertRaises(dbt.exceptions.DependencyError) as exc: a.resolved() msg = 'Package dbt-labs-test/b was not found in the package index' @@ -276,7 +276,7 @@ def test_resolve_missing_version(self): version='0.1.4' )) - with self.assertRaises(dbt.exceptions.DependencyException) as exc: + with self.assertRaises(dbt.exceptions.DependencyError) as exc: a.resolved() msg = ( "Could not find a matching compatible version for package " @@ -298,7 +298,7 @@ def test_resolve_conflict(self): b = RegistryUnpinnedPackage.from_contract(b_contract) c = a.incorporate(b) - with self.assertRaises(dbt.exceptions.DependencyException) as exc: + with self.assertRaises(dbt.exceptions.DependencyError) as exc: c.resolved() msg = ( "Version error for package dbt-labs-test/a: Could not " diff --git a/test/unit/test_exceptions.py b/test/unit/test_exceptions.py index 6a47255e13c..e66e913b1a6 100644 --- a/test/unit/test_exceptions.py +++ b/test/unit/test_exceptions.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import raise_duplicate_macro_name, CompilationException +from dbt.exceptions import raise_duplicate_macro_name, CompilationError from .utils import MockMacro @@ -8,7 +8,7 @@ def test_raise_duplicate_macros_different_package(): macro_1 = MockMacro(package='dbt', name='some_macro') macro_2 = MockMacro(package='dbt-myadapter', name='some_macro') - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: raise_duplicate_macro_name( node_1=macro_1, node_2=macro_2, @@ -24,7 +24,7 @@ def test_raise_duplicate_macros_same_package(): macro_1 = MockMacro(package='dbt', name='some_macro') macro_2 = MockMacro(package='dbt', name='some_macro') - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: raise_duplicate_macro_name( node_1=macro_1, node_2=macro_2, diff --git a/test/unit/test_graph_selection.py b/test/unit/test_graph_selection.py index a0da5b490e9..4c40c1dff82 100644 --- a/test/unit/test_graph_selection.py +++ b/test/unit/test_graph_selection.py @@ -200,5 +200,5 @@ def test_parse_specs(spec, parents, parents_depth, children, children_depth, fil @pytest.mark.parametrize('invalid', invalid_specs, ids=lambda k: str(k)) def test_invalid_specs(invalid): - with pytest.raises(dbt.exceptions.RuntimeException): + with pytest.raises(dbt.exceptions.DbtRuntimeError): graph_selector.SelectionCriteria.from_single_spec(invalid) diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index 7532302784f..769199e841f 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -898,11 +898,11 @@ def test_select_state_no_change(manifest, previous_state): def test_select_state_nothing(manifest, previous_state): previous_state.manifest = None method = statemethod(manifest, previous_state) - with pytest.raises(dbt.exceptions.RuntimeException) as exc: + with pytest.raises(dbt.exceptions.DbtRuntimeError) as exc: search_manifest_using_method(manifest, method, 'modified') assert 'no comparison manifest' in str(exc.value) - with pytest.raises(dbt.exceptions.RuntimeException) as exc: + with pytest.raises(dbt.exceptions.DbtRuntimeError) as exc: search_manifest_using_method(manifest, method, 'new') assert 'no comparison manifest' in str(exc.value) diff --git a/test/unit/test_graph_selector_spec.py b/test/unit/test_graph_selector_spec.py index 68c8611ccac..d72325affc2 100644 --- a/test/unit/test_graph_selector_spec.py +++ b/test/unit/test_graph_selector_spec.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError from dbt.graph.selector_spec import ( SelectionCriteria, SelectionIntersection, @@ -111,10 +111,10 @@ def test_raw_parse_weird(): def test_raw_parse_invalid(): - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): SelectionCriteria.from_single_spec('invalid_method:something') - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): SelectionCriteria.from_single_spec('@foo+') diff --git a/test/unit/test_jinja.py b/test/unit/test_jinja.py index 6b8c939de64..5213f8d7d8c 100644 --- a/test/unit/test_jinja.py +++ b/test/unit/test_jinja.py @@ -6,7 +6,7 @@ from dbt.clients.jinja import get_rendered from dbt.clients.jinja import get_template from dbt.clients.jinja import extract_toplevel_blocks -from dbt.exceptions import CompilationException, JinjaRenderingException +from dbt.exceptions import CompilationError, JinjaRenderingError @contextmanager @@ -55,12 +55,12 @@ def expected_id(arg): ( '''foo: "{{ 'bar' | as_bool }}"''', returns('bar'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ 'bar' | as_number }}"''', returns('bar'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ 'bar' | as_native }}"''', @@ -116,7 +116,7 @@ def expected_id(arg): ( '''foo: "{{ 1 | as_bool }}"''', returns('1'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ 1 | as_number }}"''', @@ -136,7 +136,7 @@ def expected_id(arg): ( '''foo: "{{ '1' | as_bool }}"''', returns('1'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ '1' | as_number }}"''', @@ -171,7 +171,7 @@ def expected_id(arg): ( '''foo: "{{ True | as_number }}"''', returns('True'), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ True | as_native }}"''', @@ -197,7 +197,7 @@ def expected_id(arg): ( '''foo: "{{ true | as_number }}"''', returns("True"), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ true | as_native }}"''', @@ -254,7 +254,7 @@ def expected_id(arg): ( '''foo: "{{ True | as_number }}"''', returns("True"), - raises(JinjaRenderingException), + raises(JinjaRenderingError), ), ( '''foo: "{{ True | as_native }}"''', @@ -552,24 +552,24 @@ def test_materialization_parse(self): def test_nested_not_ok(self): # we don't allow nesting same blocks body = '{% myblock a %} {% myblock b %} {% endmyblock %} {% endmyblock %}' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body, allowed_blocks={'myblock'}) def test_incomplete_block_failure(self): fullbody = '{% myblock foo %} {% endmyblock %}' for length in range(len('{% myblock foo %}'), len(fullbody)-1): body = fullbody[:length] - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body, allowed_blocks={'myblock'}) def test_wrong_end_failure(self): body = '{% myblock foo %} {% endotherblock %}' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body, allowed_blocks={'myblock', 'otherblock'}) def test_comment_no_end_failure(self): body = '{# ' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body) def test_comment_only(self): @@ -698,7 +698,7 @@ def test_unclosed_model_quotes(self): def test_if(self): # if you conditionally define your macros/models, don't body = '{% if true %}{% macro my_macro() %} adsf {% endmacro %}{% endif %}' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body) def test_if_innocuous(self): @@ -710,7 +710,7 @@ def test_if_innocuous(self): def test_for(self): # no for-loops over macros. body = '{% for x in range(10) %}{% macro my_macro() %} adsf {% endmacro %}{% endfor %}' - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): extract_toplevel_blocks(body) def test_for_innocuous(self): @@ -722,19 +722,19 @@ def test_for_innocuous(self): def test_endif(self): body = '{% snapshot foo %}select * from thing{% endsnapshot%}{% endif %}' - with self.assertRaises(CompilationException) as err: + with self.assertRaises(CompilationError) as err: extract_toplevel_blocks(body) self.assertIn('Got an unexpected control flow end tag, got endif but never saw a preceeding if (@ 1:53)', str(err.exception)) def test_if_endfor(self): body = '{% if x %}...{% endfor %}{% endif %}' - with self.assertRaises(CompilationException) as err: + with self.assertRaises(CompilationError) as err: extract_toplevel_blocks(body) self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 1:13)', str(err.exception)) def test_if_endfor_newlines(self): body = '{% if x %}\n ...\n {% endfor %}\n{% endif %}' - with self.assertRaises(CompilationException) as err: + with self.assertRaises(CompilationError) as err: extract_toplevel_blocks(body) self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 3:4)', str(err.exception)) diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 38e439a696f..0699253417b 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -18,7 +18,7 @@ ModelNode, Macro, DependsOn, SingularTestNode, SnapshotNode, AnalysisNode, UnpatchedSourceDefinition ) -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError from dbt.node_types import NodeType from dbt.parser import ( ModelParser, MacroParser, SingularTestParser, GenericTestParser, @@ -664,7 +664,7 @@ def test_basic(self): def test_sql_model_parse_error(self): block = self.file_block_for(sql_model_parse_error, 'nested/model_1.sql') - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): self.parser.parse_file(block) def test_python_model_parse(self): @@ -724,31 +724,31 @@ def test_python_model_config_with_defaults(self): def test_python_model_single_argument(self): block = self.file_block_for(python_model_single_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_no_argument(self): block = self.file_block_for(python_model_no_argument, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_incorrect_argument_name(self): block = self.file_block_for(python_model_incorrect_argument_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_multiple_models(self): block = self.file_block_for(python_model_multiple_models, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_incorrect_function_name(self): block = self.file_block_for(python_model_incorrect_function_name, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_empty_file(self): @@ -759,13 +759,13 @@ def test_python_model_empty_file(self): def test_python_model_multiple_returns(self): block = self.file_block_for(python_model_multiple_returns, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_no_return(self): block = self.file_block_for(python_model_no_return, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_single_return(self): @@ -776,7 +776,7 @@ def test_python_model_single_return(self): def test_python_model_incorrect_ref(self): block = self.file_block_for(python_model_incorrect_ref, 'nested/py_model.py') self.parser.manifest.files[block.file.file_id] = block.file - with self.assertRaises(ParsingException): + with self.assertRaises(ParsingError): self.parser.parse_file(block) def test_python_model_default_materialization(self): @@ -1027,7 +1027,7 @@ def file_block_for(self, data, filename): def test_parse_error(self): block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}', 'nested/snap_1.sql') - with self.assertRaises(CompilationException): + with self.assertRaises(CompilationError): self.parser.parse_file(block) def test_single_block(self): diff --git a/test/unit/test_postgres_adapter.py b/test/unit/test_postgres_adapter.py index 06a2ed7c497..0d56ff9ff63 100644 --- a/test/unit/test_postgres_adapter.py +++ b/test/unit/test_postgres_adapter.py @@ -12,7 +12,7 @@ from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import ManifestStateCheck from dbt.clients import agate_helper -from dbt.exceptions import ValidationException, DbtConfigError +from dbt.exceptions import DbtValidationError, DbtConfigError from psycopg2 import extensions as psycopg2_extensions from psycopg2 import DatabaseError @@ -58,8 +58,8 @@ def adapter(self): def test_acquire_connection_validations(self, psycopg2): try: connection = self.adapter.acquire_connection('dummy') - except ValidationException as e: - self.fail('got ValidationException: {}'.format(str(e))) + except DbtValidationError as e: + self.fail('got DbtValidationError: {}'.format(str(e))) except BaseException as e: self.fail('acquiring connection failed with unknown exception: {}' .format(str(e))) diff --git a/test/unit/test_registry_get_request_exception.py b/test/unit/test_registry_get_request_exception.py index 44033fe0546..3029971cad4 100644 --- a/test/unit/test_registry_get_request_exception.py +++ b/test/unit/test_registry_get_request_exception.py @@ -1,9 +1,9 @@ import unittest -from dbt.exceptions import ConnectionException +from dbt.exceptions import ConnectionError from dbt.clients.registry import _get_with_retries class testRegistryGetRequestException(unittest.TestCase): def test_registry_request_error_catching(self): # using non routable IP to test connection error logic in the _get_with_retries function - self.assertRaises(ConnectionException, _get_with_retries, '', 'http://0.0.0.0') + self.assertRaises(ConnectionError, _get_with_retries, '', 'http://0.0.0.0') diff --git a/test/unit/test_semver.py b/test/unit/test_semver.py index eff7603a2f6..b36c403e3a7 100644 --- a/test/unit/test_semver.py +++ b/test/unit/test_semver.py @@ -2,7 +2,7 @@ import itertools from typing import List -from dbt.exceptions import VersionsNotCompatibleException +from dbt.exceptions import VersionsNotCompatibleError from dbt.semver import VersionSpecifier, UnboundedVersionSpecifier, \ VersionRange, reduce_versions, versions_compatible, \ resolve_to_specific_version, filter_installable @@ -40,7 +40,7 @@ def assertVersionSetResult(self, inputs, output_range): def assertInvalidVersionSet(self, inputs): for permutation in itertools.permutations(inputs): - with self.assertRaises(VersionsNotCompatibleException): + with self.assertRaises(VersionsNotCompatibleError): reduce_versions(*permutation) def test__versions_compatible(self): diff --git a/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py b/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py index b764568fe16..053fcc506c8 100644 --- a/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py +++ b/tests/adapter/dbt/tests/adapter/query_comment/test_query_comment.py @@ -1,6 +1,6 @@ import pytest import json -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError from dbt.version import __version__ as dbt_version from dbt.tests.util import run_dbt_and_capture from dbt.tests.adapter.query_comment.fixtures import MACROS__MACRO_SQL, MODELS__X_SQL @@ -77,7 +77,7 @@ def project_config_update(self): return {"query-comment": "{{ invalid_query_header() }}"} def run_assert_comments(self): - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): self.run_get_json(expect_pass=False) diff --git a/tests/functional/artifacts/test_override.py b/tests/functional/artifacts/test_override.py index 46a037bdcc5..a7b689a3670 100644 --- a/tests/functional/artifacts/test_override.py +++ b/tests/functional/artifacts/test_override.py @@ -1,6 +1,6 @@ import pytest from dbt.tests.util import run_dbt -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError model_sql = """ select 1 as id @@ -30,6 +30,6 @@ def test_override_used( results = run_dbt(["run"]) assert len(results) == 1 # this should pick up our failure macro and raise a compilation exception - with pytest.raises(CompilationException) as excinfo: + with pytest.raises(CompilationError) as excinfo: run_dbt(["--warn-error", "docs", "generate"]) assert "rejected: no catalogs for you" in str(excinfo.value) diff --git a/tests/functional/artifacts/test_previous_version_state.py b/tests/functional/artifacts/test_previous_version_state.py index a7a7ed5417c..84fd8bab360 100644 --- a/tests/functional/artifacts/test_previous_version_state.py +++ b/tests/functional/artifacts/test_previous_version_state.py @@ -2,7 +2,7 @@ import os import shutil from dbt.tests.util import run_dbt -from dbt.exceptions import IncompatibleSchemaException +from dbt.exceptions import IncompatibleSchemaError from dbt.contracts.graph.manifest import WritableManifest # This is a *very* simple project, with just one model in it. @@ -84,7 +84,7 @@ def compare_previous_state( results = run_dbt(cli_args, expect_pass=expect_pass) assert len(results) == 0 else: - with pytest.raises(IncompatibleSchemaException): + with pytest.raises(IncompatibleSchemaError): run_dbt(cli_args, expect_pass=expect_pass) def test_compare_state_current(self, project): diff --git a/tests/functional/basic/test_invalid_reference.py b/tests/functional/basic/test_invalid_reference.py index 8a516027940..1c54d1b906a 100644 --- a/tests/functional/basic/test_invalid_reference.py +++ b/tests/functional/basic/test_invalid_reference.py @@ -1,6 +1,6 @@ import pytest from dbt.tests.util import run_dbt -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError descendant_sql = """ @@ -24,5 +24,5 @@ def models(): def test_undefined_value(project): # Tests that a project with an invalid reference fails - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["compile"]) diff --git a/tests/functional/configs/test_configs.py b/tests/functional/configs/test_configs.py index 489b60fbbb1..97e29362d4b 100644 --- a/tests/functional/configs/test_configs.py +++ b/tests/functional/configs/test_configs.py @@ -3,7 +3,7 @@ import pytest import os -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.tests.util import run_dbt, update_config_file, write_file, check_relations_equal from tests.functional.configs.fixtures import BaseConfigProject, simple_snapshot @@ -109,7 +109,7 @@ def test_snapshots_materialization_proj_config(self, project): snapshots_dir = os.path.join(project.project_root, "snapshots") write_file(simple_snapshot, snapshots_dir, "mysnapshot.sql") - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt() diff --git a/tests/functional/configs/test_configs_in_schema_files.py b/tests/functional/configs/test_configs_in_schema_files.py index 0d702615474..a04b9ed43aa 100644 --- a/tests/functional/configs/test_configs_in_schema_files.py +++ b/tests/functional/configs/test_configs_in_schema_files.py @@ -2,7 +2,7 @@ from dbt.tests.util import run_dbt, get_manifest, check_relations_equal, write_file -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError models_alt__schema_yml = """ version: 2 @@ -242,11 +242,11 @@ def test_config_layering( # copy a schema file with multiple metas # shutil.copyfile('extra-alt/untagged.yml', 'models-alt/untagged.yml') write_file(extra_alt__untagged_yml, project.project_root, "models", "untagged.yml") - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) # copy a schema file with config key in top-level of test and in config dict # shutil.copyfile('extra-alt/untagged2.yml', 'models-alt/untagged.yml') write_file(extra_alt__untagged2_yml, project.project_root, "models", "untagged.yml") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["run"]) diff --git a/tests/functional/configs/test_disabled_model.py b/tests/functional/configs/test_disabled_model.py index 5ca56512e14..4b6e74adffd 100644 --- a/tests/functional/configs/test_disabled_model.py +++ b/tests/functional/configs/test_disabled_model.py @@ -2,7 +2,7 @@ from hologram import ValidationError from dbt.tests.util import run_dbt, get_manifest -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError from tests.functional.configs.fixtures import ( schema_all_disabled_yml, @@ -47,7 +47,7 @@ def models(self): } def test_disabled_config(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "which is disabled" @@ -209,7 +209,7 @@ def models(self): } def test_disabled_config(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "Found 3 matching disabled nodes for model 'my_model_2'" diff --git a/tests/functional/configs/test_unused_configs.py b/tests/functional/configs/test_unused_configs.py index 7796472fea9..1bc887b03f1 100644 --- a/tests/functional/configs/test_unused_configs.py +++ b/tests/functional/configs/test_unused_configs.py @@ -1,7 +1,7 @@ import pytest from dbt.tests.util import run_dbt -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError seeds__seed_csv = """id,value 4,2 @@ -41,7 +41,7 @@ def test_warn_unused_configuration_paths( self, project, ): - with pytest.raises(CompilationException) as excinfo: + with pytest.raises(CompilationError) as excinfo: run_dbt(["--warn-error", "seed"]) assert "Configuration paths exist" in str(excinfo.value) diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py index 529087c851a..1e741a2b283 100644 --- a/tests/functional/context_methods/test_builtin_functions.py +++ b/tests/functional/context_methods/test_builtin_functions.py @@ -3,7 +3,7 @@ import os from dbt.tests.util import run_dbt, run_dbt_and_capture, write_file -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError macros__validate_set_sql = """ {% macro validate_set() %} @@ -142,9 +142,9 @@ class TestContextBuiltinExceptions: # Assert compilation errors are raised with _strict equivalents def test_builtin_function_exception(self, project): write_file(models__set_exception_sql, project.project_root, "models", "raise.sql") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["compile"]) write_file(models__zip_exception_sql, project.project_root, "models", "raise.sql") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["compile"]) diff --git a/tests/functional/context_methods/test_cli_vars.py b/tests/functional/context_methods/test_cli_vars.py index 3e548b6f402..353d96d777b 100644 --- a/tests/functional/context_methods/test_cli_vars.py +++ b/tests/functional/context_methods/test_cli_vars.py @@ -5,7 +5,7 @@ from dbt.tests.util import run_dbt, get_artifact, write_config_file from dbt.tests.fixtures.project import write_project_files -from dbt.exceptions import RuntimeException, CompilationException +from dbt.exceptions import DbtRuntimeError, CompilationError models_complex__schema_yml = """ @@ -114,7 +114,7 @@ def test_cli_vars_in_profile(self, project, dbt_profile_data): profile = dbt_profile_data profile["test"]["outputs"]["default"]["host"] = "{{ var('db_host') }}" write_config_file(profile, project.profiles_dir, "profiles.yml") - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): results = run_dbt(["run"]) results = run_dbt(["run", "--vars", "db_host: localhost"]) assert len(results) == 1 @@ -148,7 +148,7 @@ def test_cli_vars_in_packages(self, project, packages_config): write_config_file(packages, project.project_root, "packages.yml") # Without vars args deps fails - with pytest.raises(RuntimeException): + with pytest.raises(DbtRuntimeError): run_dbt(["deps"]) # With vars arg deps succeeds @@ -200,7 +200,7 @@ def test_vars_in_selectors(self, project): # Update the selectors.yml file to have a var write_config_file(var_selectors_yml, project.project_root, "selectors.yml") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["run"]) # Var in cli_vars works diff --git a/tests/functional/context_methods/test_secret_env_vars.py b/tests/functional/context_methods/test_secret_env_vars.py index 9cd4c2eacac..710c104f551 100644 --- a/tests/functional/context_methods/test_secret_env_vars.py +++ b/tests/functional/context_methods/test_secret_env_vars.py @@ -2,7 +2,7 @@ import os from dbt.constants import SECRET_ENV_PREFIX -from dbt.exceptions import ParsingException, InternalException +from dbt.exceptions import ParsingError, DbtInternalError from tests.functional.context_methods.first_dependency import FirstDependencyProject from dbt.tests.util import run_dbt, run_dbt_and_capture @@ -30,7 +30,7 @@ def models(self): return {"context.sql": secret_bad__context_sql} def test_disallow_secret(self, project): - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["compile"]) @@ -130,7 +130,7 @@ def packages(self): } def test_fail_clone_with_scrubbing(self, project): - with pytest.raises(InternalException) as excinfo: + with pytest.raises(DbtInternalError) as excinfo: _, log_output = run_dbt_and_capture(["deps"]) assert "abc123" not in str(excinfo.value) @@ -149,7 +149,7 @@ def packages(self): } def test_fail_clone_with_scrubbing(self, project): - with pytest.raises(InternalException) as excinfo: + with pytest.raises(DbtInternalError) as excinfo: _, log_output = run_dbt_and_capture(["deps"]) # we should not see any manipulated form of the secret value (abc123) here diff --git a/tests/functional/context_methods/test_var_in_generate_name.py b/tests/functional/context_methods/test_var_in_generate_name.py index 5025cb8fede..2bbba457e58 100644 --- a/tests/functional/context_methods/test_var_in_generate_name.py +++ b/tests/functional/context_methods/test_var_in_generate_name.py @@ -1,7 +1,7 @@ import pytest from dbt.tests.util import run_dbt, update_config_file -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError model_sql = """ select 1 as id @@ -27,7 +27,7 @@ def models(self): def test_generate_schema_name_var(self, project): # var isn't set, so generate_name macro fails - with pytest.raises(CompilationException) as excinfo: + with pytest.raises(CompilationError) as excinfo: run_dbt(["compile"]) assert "Required var 'somevar' not found in config" in str(excinfo.value) diff --git a/tests/functional/dependencies/test_local_dependency.py b/tests/functional/dependencies/test_local_dependency.py index 3e0bc5efdb7..13605028519 100644 --- a/tests/functional/dependencies/test_local_dependency.py +++ b/tests/functional/dependencies/test_local_dependency.py @@ -184,7 +184,7 @@ def models(self): def test_missing_dependency(self, project): # dbt should raise a runtime exception - with pytest.raises(dbt.exceptions.RuntimeException): + with pytest.raises(dbt.exceptions.DbtRuntimeError): run_dbt(["compile"]) @@ -335,12 +335,12 @@ def prepare_dependencies(self, project): ) def test_local_dependency_same_name(self, prepare_dependencies, project): - with pytest.raises(dbt.exceptions.DependencyException): + with pytest.raises(dbt.exceptions.DependencyError): run_dbt(["deps"], expect_pass=False) def test_local_dependency_same_name_sneaky(self, prepare_dependencies, project): shutil.copytree("duplicate_dependency", "./dbt_packages/duplicate_dependency") - with pytest.raises(dbt.exceptions.CompilationException): + with pytest.raises(dbt.exceptions.CompilationError): run_dbt(["compile"]) # needed to avoid compilation errors from duplicate package names in test autocleanup diff --git a/tests/functional/deprecations/test_deprecations.py b/tests/functional/deprecations/test_deprecations.py index fc76289b2ee..a70b3687c69 100644 --- a/tests/functional/deprecations/test_deprecations.py +++ b/tests/functional/deprecations/test_deprecations.py @@ -63,7 +63,7 @@ def test_data_path(self, project): def test_data_path_fail(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt(["--warn-error", "debug"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "The `data-paths` config has been renamed" @@ -107,7 +107,7 @@ def test_package_path(self, project): def test_package_path_not_set(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt(["--warn-error", "clean"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "path has changed from `dbt_modules` to `dbt_packages`." @@ -134,7 +134,7 @@ def test_package_redirect(self, project): def test_package_redirect_fail(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt(["--warn-error", "deps"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "The `fishtown-analytics/dbt_utils` package is deprecated in favor of `dbt-labs/dbt_utils`" @@ -159,7 +159,7 @@ def test_metric_handle_rename(self, project): def test_metric_handle_rename_fail(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: # turn off partial parsing to ensure that the metric is re-parsed run_dbt(["--warn-error", "--no-partial-parse", "parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace @@ -182,7 +182,7 @@ def test_exposure_name(self, project): def test_exposure_name_fail(self, project): deprecations.reset_deprecations() assert deprecations.active_deprecations == set() - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt(["--warn-error", "--no-partial-parse", "parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace expected_msg = "Starting in v1.3, the 'name' of an exposure should contain only letters, numbers, and underscores." diff --git a/tests/functional/duplicates/test_duplicate_analysis.py b/tests/functional/duplicates/test_duplicate_analysis.py index e9050860ad9..44dc4c6f167 100644 --- a/tests/functional/duplicates/test_duplicate_analysis.py +++ b/tests/functional/duplicates/test_duplicate_analysis.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -27,7 +27,7 @@ def analyses(self): def test_duplicate_model_enabled(self, project): message = "dbt found two analyses with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace assert message in exc_str diff --git a/tests/functional/duplicates/test_duplicate_exposure.py b/tests/functional/duplicates/test_duplicate_exposure.py index 6035da7c110..140db21cd07 100644 --- a/tests/functional/duplicates/test_duplicate_exposure.py +++ b/tests/functional/duplicates/test_duplicate_exposure.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -26,6 +26,6 @@ def models(self): def test_duplicate_exposure(self, project): message = "dbt found two exposures with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert message in str(exc.value) diff --git a/tests/functional/duplicates/test_duplicate_macro.py b/tests/functional/duplicates/test_duplicate_macro.py index 1fc7282808f..35b843f5891 100644 --- a/tests/functional/duplicates/test_duplicate_macro.py +++ b/tests/functional/duplicates/test_duplicate_macro.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -43,7 +43,7 @@ def macros(self): def test_duplicate_macros(self, project): message = 'dbt found two macros named "some_macro" in the project' - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["parse"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace assert message in exc_str @@ -64,7 +64,7 @@ def macros(self): def test_duplicate_macros(self, project): message = 'dbt found two macros named "some_macro" in the project' - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace assert message in exc_str diff --git a/tests/functional/duplicates/test_duplicate_metric.py b/tests/functional/duplicates/test_duplicate_metric.py index e40295278b9..f8beca39c24 100644 --- a/tests/functional/duplicates/test_duplicate_metric.py +++ b/tests/functional/duplicates/test_duplicate_metric.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -46,6 +46,6 @@ def models(self): def test_duplicate_metric(self, project): message = "dbt found two metrics with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert message in str(exc.value) diff --git a/tests/functional/duplicates/test_duplicate_model.py b/tests/functional/duplicates/test_duplicate_model.py index fbcd1b79671..7a53fd6de63 100644 --- a/tests/functional/duplicates/test_duplicate_model.py +++ b/tests/functional/duplicates/test_duplicate_model.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException, DuplicateResourceName +from dbt.exceptions import CompilationError, DuplicateResourceNameError from dbt.tests.fixtures.project import write_project_files from dbt.tests.util import run_dbt, get_manifest @@ -54,7 +54,7 @@ def models(self): def test_duplicate_model_enabled(self, project): message = "dbt found two models with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) exc_str = " ".join(str(exc.value).split()) # flatten all whitespace assert message in exc_str @@ -108,7 +108,7 @@ def packages(self): def test_duplicate_model_enabled_across_packages(self, project): run_dbt(["deps"]) message = "dbt found two models with the name" - with pytest.raises(DuplicateResourceName) as exc: + with pytest.raises(DuplicateResourceNameError) as exc: run_dbt(["run"]) assert message in str(exc.value) diff --git a/tests/functional/duplicates/test_duplicate_source.py b/tests/functional/duplicates/test_duplicate_source.py index 181aaf5d18e..1100345aabc 100644 --- a/tests/functional/duplicates/test_duplicate_source.py +++ b/tests/functional/duplicates/test_duplicate_source.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt @@ -22,6 +22,6 @@ def models(self): def test_duplicate_source_enabled(self, project): message = "dbt found two sources with the name" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert message in str(exc.value) diff --git a/tests/functional/fail_fast/test_fail_fast_run.py b/tests/functional/fail_fast/test_fail_fast_run.py index 3ea3c4bc0f0..5c0c8cf849d 100644 --- a/tests/functional/fail_fast/test_fail_fast_run.py +++ b/tests/functional/fail_fast/test_fail_fast_run.py @@ -2,7 +2,7 @@ from dbt.tests.util import run_dbt from tests.functional.fail_fast.fixtures import models, project_files # noqa: F401 -from dbt.exceptions import FailFastException +from dbt.exceptions import FailFastError def check_audit_table(project, count=1): @@ -43,7 +43,7 @@ def test_fail_fast_run( self, project, ): - with pytest.raises(FailFastException): + with pytest.raises(FailFastError): run_dbt(["run", "--threads", "1", "--fail-fast"]) check_audit_table(project) @@ -62,6 +62,6 @@ def test_fail_fast_run_user_config( self, project, ): - with pytest.raises(FailFastException): + with pytest.raises(FailFastError): run_dbt(["run", "--threads", "1"]) check_audit_table(project) diff --git a/tests/functional/hooks/test_model_hooks.py b/tests/functional/hooks/test_model_hooks.py index 79f3632bd8e..99a05c9c895 100644 --- a/tests/functional/hooks/test_model_hooks.py +++ b/tests/functional/hooks/test_model_hooks.py @@ -2,7 +2,7 @@ from pathlib import Path -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import ( run_dbt, @@ -422,7 +422,7 @@ def models(self): return {"hooks.sql": models__hooks_error} def test_run_duplicate_hook_defs(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "pre_hook" in str(exc.value) assert "pre-hook" in str(exc.value) diff --git a/tests/functional/invalid_model_tests/test_invalid_models.py b/tests/functional/invalid_model_tests/test_invalid_models.py index 29739dcac20..09db17bc325 100644 --- a/tests/functional/invalid_model_tests/test_invalid_models.py +++ b/tests/functional/invalid_model_tests/test_invalid_models.py @@ -1,6 +1,6 @@ import pytest -from dbt.exceptions import CompilationException, ParsingException +from dbt.exceptions import CompilationError, ParsingError from dbt.tests.util import ( run_dbt, @@ -129,7 +129,7 @@ def models(self): } def test_view_disabled(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["seed"]) assert "enabled" in str(exc.value) @@ -146,7 +146,7 @@ def models(self): } def test_referencing_disabled_model(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "which is disabled" in str(exc.value) @@ -160,7 +160,7 @@ def models(self): return {"models__dependent_on_view.sql": models__dependent_on_view} def test_models_not_found(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "which was not found" in str(exc.value) @@ -176,7 +176,7 @@ def models(self): return {"models__with_bad_macro.sql": models__with_bad_macro} def test_with_invalid_macro_call(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert "macro 'dbt_macro__some_macro' takes no keyword argument 'invalid'" in str( @@ -207,7 +207,7 @@ def project_config_update(self): } def test_postgres_source_disabled(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "which is disabled" in str(exc.value) @@ -221,7 +221,7 @@ def models(self): return {"models__referencing_disabled_source.sql": models__referencing_disabled_source} def test_source_missing(self, project): - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "which was not found" in str(exc.value) diff --git a/tests/functional/macros/test_macros.py b/tests/functional/macros/test_macros.py index 899be2453b1..e7f25acab3a 100644 --- a/tests/functional/macros/test_macros.py +++ b/tests/functional/macros/test_macros.py @@ -97,7 +97,7 @@ def macros(self): return {"my_macros.sql": macros__no_default_macros} def test_invalid_macro(self, project): - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt() assert "In dispatch: No macro named 'dispatch_to_nowhere' found" in str(exc.value) @@ -213,7 +213,7 @@ def macros(self): return {"macro.sql": macros__deprecated_adapter_macro} def test_invalid_macro(self, project): - with pytest.raises(dbt.exceptions.CompilationException) as exc: + with pytest.raises(dbt.exceptions.CompilationError) as exc: run_dbt() assert 'The "adapter_macro" macro has been deprecated' in str(exc.value) diff --git a/tests/functional/materializations/test_incremental.py b/tests/functional/materializations/test_incremental.py index f6ec8b2a3e9..7e8df9ea6f1 100644 --- a/tests/functional/materializations/test_incremental.py +++ b/tests/functional/materializations/test_incremental.py @@ -1,6 +1,6 @@ import pytest from dbt.tests.util import run_dbt, get_manifest -from dbt.exceptions import RuntimeException +from dbt.exceptions import DbtRuntimeError from dbt.context.providers import generate_runtime_model_context @@ -43,10 +43,10 @@ def test_basic(project): assert type(macro_func).__name__ == "MacroGenerator" # These two incremental strategies are not valid for Postgres - with pytest.raises(RuntimeException) as excinfo: + with pytest.raises(DbtRuntimeError) as excinfo: macro_func = project.adapter.get_incremental_strategy_macro(context, "merge") assert "merge" in str(excinfo.value) - with pytest.raises(RuntimeException) as excinfo: + with pytest.raises(DbtRuntimeError) as excinfo: macro_func = project.adapter.get_incremental_strategy_macro(context, "insert_overwrite") assert "insert_overwrite" in str(excinfo.value) diff --git a/tests/functional/metrics/test_metric_configs.py b/tests/functional/metrics/test_metric_configs.py index 88c39e0537d..d81c97f79a6 100644 --- a/tests/functional/metrics/test_metric_configs.py +++ b/tests/functional/metrics/test_metric_configs.py @@ -1,7 +1,7 @@ import pytest from hologram import ValidationError from dbt.contracts.graph.model_config import MetricConfig -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError from dbt.tests.util import run_dbt, update_config_file, get_manifest @@ -106,7 +106,7 @@ def test_metrics_all_configs(self, project): assert config_test_table == pytest.expected_config -# Test CompilationException if a model references a disabled metric +# Test CompilationError if a model references a disabled metric class TestDisabledMetricRef(MetricConfigTests): @pytest.fixture(scope="class") def models(self): @@ -134,7 +134,7 @@ def test_disabled_metric_ref_model(self, project): } update_config_file(new_enabled_config, project.project_root, "dbt_project.yml") - with pytest.raises(CompilationException): + with pytest.raises(CompilationError): run_dbt(["parse"]) diff --git a/tests/functional/metrics/test_metrics.py b/tests/functional/metrics/test_metrics.py index de8c022f3d3..10e34770cf1 100644 --- a/tests/functional/metrics/test_metrics.py +++ b/tests/functional/metrics/test_metrics.py @@ -1,7 +1,7 @@ import pytest from dbt.tests.util import run_dbt, get_manifest -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from tests.functional.metrics.fixtures import ( @@ -85,14 +85,14 @@ def models(self): "people.sql": models_people_sql, } - # tests that we get a ParsingException with an invalid model ref, where + # tests that we get a ParsingError with an invalid model ref, where # the model name does not have quotes def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -104,14 +104,14 @@ def models(self): "people.sql": models_people_sql, } - # tests that we get a ParsingException with an invalid model ref, where + # tests that we get a ParsingError with an invalid model ref, where # the model name does not have quotes def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -123,13 +123,13 @@ def models(self): "people.sql": models_people_sql, } - # tests that we get a ParsingException with a missing expression + # tests that we get a ParsingError with a missing expression def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -142,7 +142,7 @@ def models(self): } def test_names_with_spaces(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["run"]) assert "cannot contain spaces" in str(exc.value) @@ -156,7 +156,7 @@ def models(self): } def test_names_with_special_char(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["run"]) assert "must contain only letters, numbers and underscores" in str(exc.value) @@ -170,7 +170,7 @@ def models(self): } def test_names_with_leading_number(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["run"]) assert "must begin with a letter" in str(exc.value) @@ -184,7 +184,7 @@ def models(self): } def test_long_name(self, project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["run"]) assert "cannot contain more than 250 characters" in str(exc.value) @@ -198,7 +198,7 @@ def models(self): } def test_invalid_derived_metrics(self, project): - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -294,14 +294,14 @@ def models(self): "people.sql": models_people_sql, } - # Tests that we get a ParsingException with an invalid metric definition. + # Tests that we get a ParsingError with an invalid metric definition. # This metric definition is missing timestamp but HAS a time_grains property def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) @@ -313,12 +313,12 @@ def models(self): "people.sql": models_people_sql, } - # Tests that we get a ParsingException with an invalid metric definition. + # Tests that we get a ParsingError with an invalid metric definition. # This metric definition is missing timestamp but HAS a window property def test_simple_metric( self, project, ): # initial run - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt(["run"]) diff --git a/tests/functional/schema_tests/test_schema_v2_tests.py b/tests/functional/schema_tests/test_schema_v2_tests.py index 44a6696931b..36495fd7020 100644 --- a/tests/functional/schema_tests/test_schema_v2_tests.py +++ b/tests/functional/schema_tests/test_schema_v2_tests.py @@ -95,7 +95,7 @@ alt_local_utils__macros__type_timestamp_sql, all_quotes_schema__schema_yml, ) -from dbt.exceptions import ParsingException, CompilationException, DuplicateResourceName +from dbt.exceptions import ParsingError, CompilationError, DuplicateResourceNameError from dbt.contracts.results import TestStatus @@ -410,7 +410,7 @@ def test_malformed_schema_will_break_run( self, project, ): - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): run_dbt() @@ -904,7 +904,7 @@ def test_generic_test_collision( project, ): """These tests collide, since only the configs differ""" - with pytest.raises(DuplicateResourceName) as exc: + with pytest.raises(DuplicateResourceNameError) as exc: run_dbt() assert "dbt found two tests with the name" in str(exc.value) @@ -922,7 +922,7 @@ def test_generic_test_config_custom_macros( project, ): """This test has a reference to a custom macro its configs""" - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt() assert "Invalid generic test configuration" in str(exc) @@ -987,7 +987,7 @@ def test_invalid_schema_file( self, project, ): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt() assert re.search(r"'models' is not a list", str(exc)) diff --git a/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py b/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py index 33e6b61aebc..dfb51f7992e 100644 --- a/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py +++ b/tests/functional/simple_snapshot/test_missing_strategy_snapshot.py @@ -1,6 +1,6 @@ import pytest from dbt.tests.util import run_dbt -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from tests.functional.simple_snapshot.fixtures import ( models__schema_yml, models__ref_snapshot_sql, @@ -43,7 +43,7 @@ def macros(): def test_missing_strategy(project): - with pytest.raises(ParsingException) as exc: + with pytest.raises(ParsingError) as exc: run_dbt(["compile"], expect_pass=False) assert "Snapshots must be configured with a 'strategy'" in str(exc.value) diff --git a/tests/functional/source_overrides/test_source_overrides_duplicate_model.py b/tests/functional/source_overrides/test_source_overrides_duplicate_model.py index cd35fd6f7c2..e3cdebe4794 100644 --- a/tests/functional/source_overrides/test_source_overrides_duplicate_model.py +++ b/tests/functional/source_overrides/test_source_overrides_duplicate_model.py @@ -1,5 +1,5 @@ import os -from dbt.exceptions import CompilationException +from dbt.exceptions import CompilationError import pytest from dbt.tests.util import run_dbt @@ -56,7 +56,7 @@ def project_config_update(self): def test_source_duplicate_overrides(self, project): run_dbt(["deps"]) - with pytest.raises(CompilationException) as exc: + with pytest.raises(CompilationError) as exc: run_dbt(["compile"]) assert "dbt found two schema.yml entries for the same source named" in str(exc.value) diff --git a/tests/functional/sources/test_simple_source.py b/tests/functional/sources/test_simple_source.py index 0c69f859b6b..cd08647f367 100644 --- a/tests/functional/sources/test_simple_source.py +++ b/tests/functional/sources/test_simple_source.py @@ -1,7 +1,7 @@ import os import pytest import yaml -from dbt.exceptions import ParsingException +from dbt.exceptions import ParsingError from dbt.tests.util import ( run_dbt, @@ -164,7 +164,7 @@ def models(self): } def test_malformed_schema_will_break_run(self, project): - with pytest.raises(ParsingException): + with pytest.raises(ParsingError): self.run_dbt_with_vars(project, ["seed"]) diff --git a/tests/functional/sources/test_source_fresher_state.py b/tests/functional/sources/test_source_fresher_state.py index 362f9a816c0..a97694a9c5a 100644 --- a/tests/functional/sources/test_source_fresher_state.py +++ b/tests/functional/sources/test_source_fresher_state.py @@ -4,7 +4,7 @@ import pytest from datetime import datetime, timedelta -from dbt.exceptions import InternalException +from dbt.exceptions import DbtInternalError from dbt.tests.util import AnyStringWith, AnyFloat @@ -619,7 +619,7 @@ class TestSourceFresherNoPreviousState(SuccessfulSourceFreshnessTest): def test_intentional_failure_no_previous_state(self, project): self.run_dbt_with_vars(project, ["run"]) # TODO add the current and previous but with previous as null - with pytest.raises(InternalException) as excinfo: + with pytest.raises(DbtInternalError) as excinfo: self.run_dbt_with_vars( project, ["run", "-s", "source_status:fresher", "--defer", "--state", "previous_state"], @@ -641,7 +641,7 @@ def test_intentional_failure_no_previous_state(self, project): copy_to_previous_state() assert previous_state_results[0].max_loaded_at is not None - with pytest.raises(InternalException) as excinfo: + with pytest.raises(DbtInternalError) as excinfo: self.run_dbt_with_vars( project, ["run", "-s", "source_status:fresher", "--defer", "--state", "previous_state"], diff --git a/tests/unit/test_connection_retries.py b/tests/unit/test_connection_retries.py index 8b031ce5ab4..9076adb7ef9 100644 --- a/tests/unit/test_connection_retries.py +++ b/tests/unit/test_connection_retries.py @@ -1,7 +1,7 @@ import functools import pytest from requests.exceptions import RequestException -from dbt.exceptions import ConnectionException +from dbt.exceptions import ConnectionError from dbt.utils import _connection_exception_retry @@ -28,7 +28,7 @@ class TestMaxRetries: def test_no_retry(self): fn_to_retry = functools.partial(no_success_fn) - with pytest.raises(ConnectionException): + with pytest.raises(ConnectionError): _connection_exception_retry(fn_to_retry, 3) diff --git a/tests/unit/test_deprecations.py b/tests/unit/test_deprecations.py index df7a43c867a..3f03e3e35a5 100644 --- a/tests/unit/test_deprecations.py +++ b/tests/unit/test_deprecations.py @@ -12,7 +12,7 @@ def to_be_decorated(): # simpletest that the return value is not modified -def test_deprecated(): +def test_deprecated_func(): assert(hasattr(to_be_decorated, '__wrapped__')) assert(to_be_decorated() == 5) @@ -36,7 +36,7 @@ def is_deprecated(self, func): def test_missing_config(self): func = dbt.exceptions.missing_config - exception = dbt.exceptions.MissingConfig + exception = dbt.exceptions.MissingConfigError model = argparse.Namespace() model.unique_id = '' name = "" @@ -49,7 +49,7 @@ def test_missing_config(self): def test_missing_materialization(self): func = dbt.exceptions.missing_materialization - exception = dbt.exceptions.MissingMaterialization + exception = dbt.exceptions.MissingMaterializationError model = argparse.Namespace() model.config = argparse.Namespace() model.config.materialized = '' @@ -63,7 +63,7 @@ def test_missing_materialization(self): def test_missing_relation(self): func = dbt.exceptions.missing_relation - exception = dbt.exceptions.MissingRelation + exception = dbt.exceptions.MissingRelationError relation = "" self.is_deprecated(func) @@ -74,7 +74,7 @@ def test_missing_relation(self): def test_raise_ambiguous_alias(self): func = dbt.exceptions.raise_ambiguous_alias - exception = dbt.exceptions.AmbiguousAlias + exception = dbt.exceptions.AmbiguousAliasError node_1 = argparse.Namespace() node_1.unique_id = "" node_1.original_file_path = "" @@ -91,7 +91,7 @@ def test_raise_ambiguous_alias(self): def test_raise_ambiguous_catalog_match(self): func = dbt.exceptions.raise_ambiguous_catalog_match - exception = dbt.exceptions.AmbiguousCatalogMatch + exception = dbt.exceptions.AmbiguousCatalogMatchError unique_id = "" match_1 = {"metadata": {"schema": ""}} match_2 = {"metadata": {"schema": ""}} @@ -104,7 +104,7 @@ def test_raise_ambiguous_catalog_match(self): def test_raise_cache_inconsistent(self): func = dbt.exceptions.raise_cache_inconsistent - exception = dbt.exceptions.CacheInconsistency + exception = dbt.exceptions.CacheInconsistencyError msg = "" self.is_deprecated(func) @@ -115,7 +115,7 @@ def test_raise_cache_inconsistent(self): def test_raise_dataclass_not_dict(self): func = dbt.exceptions.raise_dataclass_not_dict - exception = dbt.exceptions.DataclassNotDict + exception = dbt.exceptions.DataclassNotDictError obj = "" self.is_deprecated(func) @@ -126,7 +126,7 @@ def test_raise_dataclass_not_dict(self): def test_raise_compiler_error(self): func = dbt.exceptions.raise_compiler_error - exception = dbt.exceptions.CompilationException + exception = dbt.exceptions.CompilationError msg = "" self.is_deprecated(func) @@ -137,7 +137,7 @@ def test_raise_compiler_error(self): def test_raise_database_error(self): func = dbt.exceptions.raise_database_error - exception = dbt.exceptions.DatabaseException + exception = dbt.exceptions.DbtDatabaseError msg = "" self.is_deprecated(func) @@ -148,7 +148,7 @@ def test_raise_database_error(self): def test_raise_dep_not_found(self): func = dbt.exceptions.raise_dep_not_found - exception = dbt.exceptions.DependencyNotFound + exception = dbt.exceptions.DependencyNotFoundError node = "" node_description = "" required_pkg = "" @@ -161,7 +161,7 @@ def test_raise_dep_not_found(self): def test_raise_dependency_error(self): func = dbt.exceptions.raise_dependency_error - exception = dbt.exceptions.DependencyException + exception = dbt.exceptions.DependencyError msg = "" self.is_deprecated(func) @@ -172,7 +172,7 @@ def test_raise_dependency_error(self): def test_raise_duplicate_patch_name(self): func = dbt.exceptions.raise_duplicate_patch_name - exception = dbt.exceptions.DuplicatePatchPath + exception = dbt.exceptions.DuplicatePatchPathError patch_1 = argparse.Namespace() patch_1.name = "" patch_1.original_file_path = "" @@ -186,7 +186,7 @@ def test_raise_duplicate_patch_name(self): def test_raise_duplicate_resource_name(self): func = dbt.exceptions.raise_duplicate_resource_name - exception = dbt.exceptions.DuplicateResourceName + exception = dbt.exceptions.DuplicateResourceNameError node_1 = argparse.Namespace() node_1.name = "" node_1.resource_type = NodeType('model') @@ -207,7 +207,7 @@ def test_raise_duplicate_resource_name(self): def test_raise_invalid_property_yml_version(self): func = dbt.exceptions.raise_invalid_property_yml_version - exception = dbt.exceptions.InvalidPropertyYML + exception = dbt.exceptions.PropertyYMLError path = "" issue = "" @@ -219,7 +219,7 @@ def test_raise_invalid_property_yml_version(self): def test_raise_not_implemented(self): func = dbt.exceptions.raise_not_implemented - exception = dbt.exceptions.NotImplementedException + exception = dbt.exceptions.NotImplementedError msg = "" self.is_deprecated(func) @@ -230,7 +230,7 @@ def test_raise_not_implemented(self): def test_relation_wrong_type(self): func = dbt.exceptions.relation_wrong_type - exception = dbt.exceptions.RelationWrongType + exception = dbt.exceptions.RelationWrongTypeError relation = argparse.Namespace() relation.type = "" @@ -244,7 +244,7 @@ def test_relation_wrong_type(self): def test_raise_duplicate_alias(self): func = dbt.exceptions.raise_duplicate_alias - exception = dbt.exceptions.DuplicateAlias + exception = dbt.exceptions.DuplicateAliasError kwargs = {"": ""} aliases = {"": ""} canonical_key = "" @@ -257,7 +257,7 @@ def test_raise_duplicate_alias(self): def test_raise_duplicate_source_patch_name(self): func = dbt.exceptions.raise_duplicate_source_patch_name - exception = dbt.exceptions.DuplicateSourcePatchName + exception = dbt.exceptions.DuplicateSourcePatchNameError patch_1 = argparse.Namespace() patch_1.name = "" patch_1.path = "" @@ -273,7 +273,7 @@ def test_raise_duplicate_source_patch_name(self): def test_raise_duplicate_macro_patch_name(self): func = dbt.exceptions.raise_duplicate_macro_patch_name - exception = dbt.exceptions.DuplicateMacroPatchName + exception = dbt.exceptions.DuplicateMacroPatchNameError patch_1 = argparse.Namespace() patch_1.package_name = "" patch_1.name = "" @@ -288,7 +288,7 @@ def test_raise_duplicate_macro_patch_name(self): def test_raise_duplicate_macro_name(self): func = dbt.exceptions.raise_duplicate_macro_name - exception = dbt.exceptions.DuplicateMacroName + exception = dbt.exceptions.DuplicateMacroNameError node_1 = argparse.Namespace() node_1.name = "" node_1.package_name = "" @@ -308,7 +308,7 @@ def test_raise_duplicate_macro_name(self): def test_approximate_relation_match(self): func = dbt.exceptions.approximate_relation_match - exception = dbt.exceptions.ApproximateMatch + exception = dbt.exceptions.ApproximateMatchError target = "" relation = "" @@ -320,7 +320,7 @@ def test_approximate_relation_match(self): def test_get_relation_returned_multiple_results(self): func = dbt.exceptions.get_relation_returned_multiple_results - exception = dbt.exceptions.RelationReturnedMultipleResults + exception = dbt.exceptions.RelationReturnedMultipleResultsError kwargs = {} matches = [] @@ -332,7 +332,7 @@ def test_get_relation_returned_multiple_results(self): def test_system_error(self): func = dbt.exceptions.system_error - exception = dbt.exceptions.OperationException + exception = dbt.exceptions.OperationError operation_name = "" self.is_deprecated(func) @@ -343,7 +343,7 @@ def test_system_error(self): def test_invalid_materialization_argument(self): func = dbt.exceptions.invalid_materialization_argument - exception = dbt.exceptions.InvalidMaterializationArg + exception = dbt.exceptions.MaterializationArgError name = "" argument = "" @@ -386,7 +386,7 @@ def test_bad_package_spec(self): def test_raise_git_cloning_problem(self): func = dbt.exceptions.raise_git_cloning_problem - exception = dbt.exceptions.GitCloningProblem + exception = dbt.exceptions.UnknownGitCloningProblemError repo = "" self.is_deprecated(func) @@ -397,7 +397,7 @@ def test_raise_git_cloning_problem(self): def test_macro_invalid_dispatch_arg(self): func = dbt.exceptions.macro_invalid_dispatch_arg - exception = dbt.exceptions.MacroInvalidDispatchArg + exception = dbt.exceptions.MacroDispatchArgError macro_name = "" self.is_deprecated(func) @@ -408,7 +408,7 @@ def test_macro_invalid_dispatch_arg(self): def test_dependency_not_found(self): func = dbt.exceptions.dependency_not_found - exception = dbt.exceptions.GraphDependencyNotFound + exception = dbt.exceptions.GraphDependencyNotFoundError node = argparse.Namespace() node.unique_id = "" dependency = "" @@ -421,7 +421,7 @@ def test_dependency_not_found(self): def test_target_not_found(self): func = dbt.exceptions.target_not_found - exception = dbt.exceptions.TargetNotFound + exception = dbt.exceptions.TargetNotFoundError node = argparse.Namespace() node.unique_id = "" node.original_file_path = "" @@ -437,7 +437,7 @@ def test_target_not_found(self): def test_doc_target_not_found(self): func = dbt.exceptions.doc_target_not_found - exception = dbt.exceptions.DocTargetNotFound + exception = dbt.exceptions.DocTargetNotFoundError model = argparse.Namespace() model.unique_id = "" target_doc_name = "" @@ -451,7 +451,7 @@ def test_doc_target_not_found(self): def test_ref_bad_context(self): func = dbt.exceptions.ref_bad_context - exception = dbt.exceptions.RefBadContext + exception = dbt.exceptions.RefBadContextError model = argparse.Namespace() model.name = "" args = [] @@ -464,7 +464,7 @@ def test_ref_bad_context(self): def test_metric_invalid_args(self): func = dbt.exceptions.metric_invalid_args - exception = dbt.exceptions.MetricInvalidArgs + exception = dbt.exceptions.MetricArgsError model = argparse.Namespace() model.unique_id = "" args = [] @@ -477,7 +477,7 @@ def test_metric_invalid_args(self): def test_ref_invalid_args(self): func = dbt.exceptions.ref_invalid_args - exception = dbt.exceptions.RefInvalidArgs + exception = dbt.exceptions.RefArgsError model = argparse.Namespace() model.unique_id = "" args = [] @@ -490,7 +490,7 @@ def test_ref_invalid_args(self): def test_invalid_bool_error(self): func = dbt.exceptions.invalid_bool_error - exception = dbt.exceptions.InvalidBoolean + exception = dbt.exceptions.BooleanError return_value = "" macro_name = "" @@ -502,7 +502,7 @@ def test_invalid_bool_error(self): def test_invalid_type_error(self): func = dbt.exceptions.invalid_type_error - exception = dbt.exceptions.InvalidMacroArgType + exception = dbt.exceptions.MacroArgTypeError method_name = "" arg_name = "" got_value = "" @@ -516,7 +516,7 @@ def test_invalid_type_error(self): def test_disallow_secret_env_var(self): func = dbt.exceptions.disallow_secret_env_var - exception = dbt.exceptions.DisallowSecretEnvVar + exception = dbt.exceptions.SecretEnvVarLocationError env_var_name = "" self.is_deprecated(func) @@ -527,7 +527,7 @@ def test_disallow_secret_env_var(self): def test_raise_parsing_error(self): func = dbt.exceptions.raise_parsing_error - exception = dbt.exceptions.ParsingException + exception = dbt.exceptions.ParsingError msg = "" self.is_deprecated(func) @@ -538,7 +538,7 @@ def test_raise_parsing_error(self): def test_raise_unrecognized_credentials_type(self): func = dbt.exceptions.raise_unrecognized_credentials_type - exception = dbt.exceptions.UnrecognizedCredentialType + exception = dbt.exceptions.UnrecognizedCredentialTypeError typename = "" supported_types = [] @@ -550,7 +550,7 @@ def test_raise_unrecognized_credentials_type(self): def test_raise_patch_targets_not_found(self): func = dbt.exceptions.raise_patch_targets_not_found - exception = dbt.exceptions.PatchTargetNotFound + exception = dbt.exceptions.PatchTargetNotFoundError node = argparse.Namespace() node.name = "" node.original_file_path = "" @@ -564,7 +564,7 @@ def test_raise_patch_targets_not_found(self): def test_multiple_matching_relations(self): func = dbt.exceptions.multiple_matching_relations - exception = dbt.exceptions.RelationReturnedMultipleResults + exception = dbt.exceptions.RelationReturnedMultipleResultsError kwargs = {} matches = [] @@ -576,7 +576,7 @@ def test_multiple_matching_relations(self): def test_materialization_not_available(self): func = dbt.exceptions.materialization_not_available - exception = dbt.exceptions.MaterializationNotAvailable + exception = dbt.exceptions.MaterializationNotAvailableError model = argparse.Namespace() model.config = argparse.Namespace() model.config.materialized = "" @@ -590,7 +590,7 @@ def test_materialization_not_available(self): def test_macro_not_found(self): func = dbt.exceptions.macro_not_found - exception = dbt.exceptions.MacroNotFound + exception = dbt.exceptions.MacroNotFoundError model = argparse.Namespace() model.unique_id = "" target_macro_id = "" diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index a7056a729b2..c7d0260a93b 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -133,7 +133,7 @@ def test_event_codes(self): AdapterDeprecationWarning(old_name="", new_name=""), MetricAttributesRenamed(metric_name=""), ExposureNameDeprecation(exposure=""), - FunctionDeprecated(function_name="", reason="", suggested_action="", version=""), + InternalDeprecation(name="", reason="", suggested_action="", version=""), # E - DB Adapter ====================== AdapterEventDebug(), @@ -183,9 +183,9 @@ def test_event_codes(self): ParseCmdOut(msg="testing"), GenericTestFileParse(path=""), MacroFileParse(path=""), - PartialParsingExceptionProcessingFile(file=""), + PartialParsingErrorProcessingFile(file=""), PartialParsingFile(file_id=""), - PartialParsingException(exc_info={}), + PartialParsingError(exc_info={}), PartialParsingSkipParsing(), UnableToPartialParse(reason="something went wrong"), PartialParsingNotEnabled(), @@ -344,7 +344,7 @@ def test_event_codes(self): # W - Node testing ====================== CatchableExceptionOnRun(exc=""), - InternalExceptionOnRun(build_path="", exc=""), + InternalErrorOnRun(build_path="", exc=""), GenericExceptionOnRun(build_path="", unique_id="", exc=""), NodeConnectionReleaseError(node_name="", exc=""), FoundStats(stat_line=""), From dd4b47d8b1fee9d50a2b5a98f76a49428def8c47 Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Tue, 10 Jan 2023 17:26:35 -0700 Subject: [PATCH 103/156] Check length of escaped strings in the adapter test (#6567) * Check length of escaped strings in the adapter test * One column per line to improve readability --- .../Under the Hood-20230110-145648.yaml | 6 ++++ .../utils/fixture_escape_single_quotes.py | 33 ++++++++++++++++--- 2 files changed, 35 insertions(+), 4 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230110-145648.yaml diff --git a/.changes/unreleased/Under the Hood-20230110-145648.yaml b/.changes/unreleased/Under the Hood-20230110-145648.yaml new file mode 100644 index 00000000000..9a21f1da645 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230110-145648.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Check length of escaped strings in the adapter test +time: 2023-01-10T14:56:48.044198-07:00 +custom: + Author: dbeatty10 + Issue: "6566" diff --git a/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py b/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py index d7e7148b886..aeaaaa44193 100644 --- a/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py +++ b/tests/adapter/dbt/tests/adapter/utils/fixture_escape_single_quotes.py @@ -1,15 +1,37 @@ # escape_single_quotes models__test_escape_single_quotes_quote_sql = """ -select '{{ escape_single_quotes("they're") }}' as actual, 'they''re' as expected union all -select '{{ escape_single_quotes("they are") }}' as actual, 'they are' as expected +select + '{{ escape_single_quotes("they're") }}' as actual, + 'they''re' as expected, + {{ length(string_literal(escape_single_quotes("they're"))) }} as actual_length, + 7 as expected_length + +union all + +select + '{{ escape_single_quotes("they are") }}' as actual, + 'they are' as expected, + {{ length(string_literal(escape_single_quotes("they are"))) }} as actual_length, + 8 as expected_length """ # The expected literal is 'they\'re'. The second backslash is to escape it from Python. models__test_escape_single_quotes_backslash_sql = """ -select '{{ escape_single_quotes("they're") }}' as actual, 'they\\'re' as expected union all -select '{{ escape_single_quotes("they are") }}' as actual, 'they are' as expected +select + '{{ escape_single_quotes("they're") }}' as actual, + 'they\\'re' as expected, + {{ length(string_literal(escape_single_quotes("they're"))) }} as actual_length, + 7 as expected_length + +union all + +select + '{{ escape_single_quotes("they are") }}' as actual, + 'they are' as expected, + {{ length(string_literal(escape_single_quotes("they are"))) }} as actual_length, + 8 as expected_length """ @@ -21,4 +43,7 @@ - assert_equal: actual: actual expected: expected + - assert_equal: + actual: actual_length + expected: expected_length """ From 7a6160273875853525c6709cc196b4835617530a Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Tue, 10 Jan 2023 19:43:52 -0500 Subject: [PATCH 104/156] Add --warn-error-options (#6520) * adding --warn-error-options Co-authored-by: Github Build Bot --- .../unreleased/Features-20230104-181003.yaml | 7 + core/dbt/cli/main.py | 1 + core/dbt/cli/option_types.py | 15 + core/dbt/cli/params.py | 17 +- core/dbt/config/utils.py | 12 +- core/dbt/contracts/project.py | 1 + .../docs/build/doctrees/environment.pickle | Bin 65160 -> 183655 bytes core/dbt/docs/build/html/.buildinfo | 2 +- .../_sphinx_javascript_frameworks_compat.js | 134 - core/dbt/docs/build/html/_static/basic.css | 3 + .../docs/build/html/_static/jquery-3.6.0.js | 10881 ---------------- core/dbt/docs/build/html/_static/jquery.js | 2 - .../build/html/_static/underscore-1.13.1.js | 2042 --- .../dbt/docs/build/html/_static/underscore.js | 6 - core/dbt/docs/build/html/genindex.html | 5 +- core/dbt/docs/build/html/index.html | 5 +- core/dbt/docs/build/html/search.html | 5 +- core/dbt/events/functions.py | 6 +- core/dbt/events/proto_types.py | 8 +- core/dbt/events/types.py | 4 +- core/dbt/exceptions.py | 7 +- core/dbt/flags.py | 37 +- core/dbt/helper_types.py | 66 +- core/dbt/main.py | 18 +- core/dbt/task/runnable.py | 2 +- core/dbt/utils.py | 3 +- core/dbt/version.py | 4 +- test/unit/test_flags.py | 75 +- tests/unit/test_events.py | 2 +- tests/unit/test_functions.py | 49 + tests/unit/test_helper_types.py | 46 + 31 files changed, 350 insertions(+), 13115 deletions(-) create mode 100644 .changes/unreleased/Features-20230104-181003.yaml delete mode 100644 core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js delete mode 100644 core/dbt/docs/build/html/_static/jquery-3.6.0.js delete mode 100644 core/dbt/docs/build/html/_static/jquery.js delete mode 100644 core/dbt/docs/build/html/_static/underscore-1.13.1.js delete mode 100644 core/dbt/docs/build/html/_static/underscore.js create mode 100644 tests/unit/test_functions.py create mode 100644 tests/unit/test_helper_types.py diff --git a/.changes/unreleased/Features-20230104-181003.yaml b/.changes/unreleased/Features-20230104-181003.yaml new file mode 100644 index 00000000000..856329cb4a7 --- /dev/null +++ b/.changes/unreleased/Features-20230104-181003.yaml @@ -0,0 +1,7 @@ +kind: Features +body: '--warn-error-options: Treat warnings as errors for specific events, based on + user configuration' +time: 2023-01-04T18:10:03.203142-05:00 +custom: + Author: MichelleArk + Issue: "6165" diff --git a/core/dbt/cli/main.py b/core/dbt/cli/main.py index 6f0a153c923..9942db702ca 100644 --- a/core/dbt/cli/main.py +++ b/core/dbt/cli/main.py @@ -46,6 +46,7 @@ def cli_runner(): @p.version @p.version_check @p.warn_error +@p.warn_error_options @p.write_json def cli(ctx, **kwargs): """An ELT tool for managing your SQL transformations and data models. diff --git a/core/dbt/cli/option_types.py b/core/dbt/cli/option_types.py index 523df651775..f0c497b5bec 100644 --- a/core/dbt/cli/option_types.py +++ b/core/dbt/cli/option_types.py @@ -1,6 +1,8 @@ from click import ParamType import yaml +from dbt.helper_types import WarnErrorOptions + class YAML(ParamType): """The Click YAML type. Converts YAML strings into objects.""" @@ -17,6 +19,19 @@ def convert(self, value, param, ctx): self.fail(f"String '{value}' is not valid YAML", param, ctx) +class WarnErrorOptionsType(YAML): + """The Click WarnErrorOptions type. Converts YAML strings into objects.""" + + name = "WarnErrorOptionsType" + + def convert(self, value, param, ctx): + include_exclude = super().convert(value, param, ctx) + + return WarnErrorOptions( + include=include_exclude.get("include", []), exclude=include_exclude.get("exclude", []) + ) + + class Truthy(ParamType): """The Click Truthy type. Converts strings into a "truthy" type""" diff --git a/core/dbt/cli/params.py b/core/dbt/cli/params.py index b739e886e2a..3ad3747e962 100644 --- a/core/dbt/cli/params.py +++ b/core/dbt/cli/params.py @@ -1,7 +1,7 @@ from pathlib import Path, PurePath import click -from dbt.cli.option_types import YAML +from dbt.cli.option_types import YAML, WarnErrorOptionsType from dbt.cli.resolvers import default_project_dir, default_profiles_dir @@ -358,9 +358,20 @@ ) warn_error = click.option( - "--warn-error/--no-warn-error", + "--warn-error", envvar="DBT_WARN_ERROR", - help="If dbt would normally warn, instead raise an exception. Examples include --models that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.", + help="If dbt would normally warn, instead raise an exception. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests.", + default=None, + flag_value=True, +) + +warn_error_options = click.option( + "--warn-error-options", + envvar="DBT_WARN_ERROR_OPTIONS", + default=None, + help="""If dbt would normally warn, instead raise an exception based on include/exclude configuration. Examples include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, + and missing sources/refs in tests. This argument should be a YAML string, with keys 'include' or 'exclude'. eg. '{"include": "all", "exclude": ["NoNodesForSelectionCriteria"]}'""", + type=WarnErrorOptionsType(), ) write_json = click.option( diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py index eb379b5d1f7..cb7c90eac68 100644 --- a/core/dbt/config/utils.py +++ b/core/dbt/config/utils.py @@ -8,20 +8,24 @@ from dbt.config import Profile, Project, read_user_config from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt.events.functions import fire_event -from dbt.events.types import InvalidVarsYAML -from dbt.exceptions import DbtValidationError, VarsArgNotYamlDictError +from dbt.events.types import InvalidOptionYAML +from dbt.exceptions import DbtValidationError, OptionNotYamlDict def parse_cli_vars(var_string: str) -> Dict[str, Any]: + return parse_cli_yaml_string(var_string, "vars") + + +def parse_cli_yaml_string(var_string: str, cli_option_name: str) -> Dict[str, Any]: try: cli_vars = yaml_helper.load_yaml_text(var_string) var_type = type(cli_vars) if var_type is dict: return cli_vars else: - raise VarsArgNotYamlDictError(var_type) + raise OptionNotYamlDict(var_type, cli_option_name) except DbtValidationError: - fire_event(InvalidVarsYAML()) + fire_event(InvalidOptionYAML(option_name=cli_option_name)) raise diff --git a/core/dbt/contracts/project.py b/core/dbt/contracts/project.py index 2fd7434bd87..ba15b9d32b6 100644 --- a/core/dbt/contracts/project.py +++ b/core/dbt/contracts/project.py @@ -249,6 +249,7 @@ class UserConfig(ExtensibleDbtClassMixin, Replaceable, UserConfigContract): printer_width: Optional[int] = None write_json: Optional[bool] = None warn_error: Optional[bool] = None + warn_error_options: Optional[Dict[str, Union[str, List[str]]]] = None log_format: Optional[str] = None debug: Optional[bool] = None version_check: Optional[bool] = None diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 8aaad5e25b0b97cc741c122d6608193f2544081f..1e44093fd51bdf1e1dad0604bc71acfe68ce02d8 100644 GIT binary patch literal 183655 zcmeHw3z!{8b*3ciHBV_qlI179wlE{v8VTDtfC*q(HpVMkGDwCPEN*)4?U~zG_vP-s zk_OuWFEO^DjWNC^I4lrg6P6_`5E0DVm;k{f4$H&wfAbI@!XP$rl))HivuC!*bNLJUcwSgYIAxsJm2!2N^Ef4 z?sR9$70)ZrIt?fxJY$?)N0P=^Qay3e80WFdJ}QS_1Hq0e0m{wUcW*@yb;o3#|TL?Gk?L zD%WnSGoE z+v{08|0GUAIl7nCT+71hKJf;(-S%(Vc0-hR(+xoJO`+k^oDDKL?3b-ZqkX76+kN$` z=gYopwLB0RtGFY9y+^s@l1=?Ht~kP#Ja$ zssV@x5qACA>E}#)&K$qHc1EqluU+f3vI1Uq&N)$0N2~U%)ou7?w+(e1l6I0+tpfNK z6B`KGg;uC$dr+≶w!`X1m%25|NiUW72O|%Iz8S%re~MIwI?gY*yK;0ZZIIYOmUE zT253+r^DS%P;A?8wSBwn+OwedR>dxR9SfS5BOJ*nGr?WkixS5>^K;m8dS%a_XT@T2 znyf+7(7@&)u36AnNI*#Ygt(^N;Z+t^Ztb+%>0Fj;q86e&1C16)Yd23HJ7C+L@*&G@ zL8ZiCCcJK^13u}Aoc-a$P6P^Bor5bho9$K^u0vmkdNgN_)p!LklG3-f`|7W5WC5bJJwyLY1+*YdPPvwIWd zK*ng(YIVSgymHqCALSNSw;aC%t?_*C2)k%J{JZSTwVV~1bsZP5A7ps%k3_=cRE*R z8Tzl9Y}R_!Hr6g-)ya~h#`W@xSF^Fll3+Qe^LB|{ z!MnKHmup|fyCy|iIpG^DZJI}aVhn|6%S;K6@h@s^>vkvyXtGcZNt@a^kJrWQ1Dy);PeVZq)W_NX+Y7bIa zwQ$@O9Dnq-B$MXgDmzLF(Ybcqh4V`}icTBdF6`S`M(ym{Gx!rZXVD;nToiCBpj8ZI zegZejX~Sd~1ciPaU+O`h2@Kzmd_J@oW$<$F&d3qgvvM}AFui=psiGe(;dk-Uuyzx? z8QWc_)o@y1mNCb(LG@+c9o@#0*D{+hM60+CYZ#G3b_36W!5y?iGhH8sSW(UdyXTPW z;P3$Z3+Or=EEj^pVk8iFz9`wfZtn*%W-7E2;{n16SAy5O3M#9>N z?mZudg0l1r(&^DmBa{Y6@LKGo*GB0$rNIUv${39@y4e36;a5*aiO@^8%WTjA*R!7S zlsHSYEWYJJDh^8cH5;RA8rWn-^BkOGpZub0uDr6ed&gCmm45TGt9D#*<;6QLz4WD* z?cMu=9)I!%aVwvn!L-|I_JZ|S?$~|#H9Ibc`>wq5;)b)W?aoE_jN(lFFnG!w_Ey2_ zP89~bkb=sDqOalEQNz#1PPOZT(Wp<@hLi zBnY^GHd!+YJg}Toc@8#0{I@2|sG!Ykbek=pI*AUiISkD(44QfR$@~t#-D%hd?FO_g zZ19NFy50PW_56xV6P`4i=*^e4+99lU{6?0CodS%Dafra~8|Qg2on{jyY?D~}i99{g zoy3C{mOsJsSx6w9U7;(io@uuaKq8DLAPCcFyKeiC2a;#^jD+{Fu~_|05b5ISAwJ&W zCmfF56tYc611u+m4)t6C-Lal7yaSpc81PjNh#3ff(5cu!@tkNVmQ+dRA~t)6)(rwE zjtli6o-r;g;lLI>fj#?Vk?z{pL;S-p*KCUo#Uo}Xl9%wAG0be(-H9yUY1+^uM0KCY z!8jfc?RTxn++?*XFs8wdftfo=JSof(gapf8b!MdT)5LWy_z^V9P3X8I1eagzevZux zxX~f-kZ@=L6%KxtjJriHRzgBfwRSCy{b&qt6Y31rcM7I9tpm_4bGc(E27YTpUm9Mr z&I1={*u2Y)#ys^Biih=36X4BGYqpIc5;o9_{9WLJiO8w-AHW^K8GBlG2}*015(C&$ zd$tru5y!4_yo%j`P7>(%L_07UJx1d8YoolZ7ezU8r;V`-VuICCq$5<+hR8$A)UNUQ;Tgdee6so{VzSar_xr+??HbALN*y~o%kE&C7>1KG>F zEr>nvmVF(|!zfQYzChEgXFz1kbT|Vi;|_Mk@L`CLLM+4gwTV2Adn0SVX}Hn5o=!Wog% z!ogXv8p9C~4uwhdSQwmzb^|8)5ZLf*F8FDg1puMFVvsggIvqZK&BkTe1rzK7X>rzL zRzZOv7sf!{Ie23G08H>Awc~9(gJ}Z8@KN5$5YBZvWeD!IEDY+FXROKr2q=0rxPsw1 zvojB&?N%8(mu~BN=*VzVTrKMj&fK4U|sxY zbRmoycGtHoTe1ZE+Vvv4Y9GXy4h}Vf6?SVHyeGcwBym~zxG3llEIm(LHZCsXQ>fTL z;yDLqKvEEL#F#%W1+aPEv@o((7=cS!oJwZKwVN1Dr1=}gy*wTlwWvp86DNyHnobKw zD{iygfT0L<0)mzVYaaCIT(h-3v5^Eu9&&~NNSwJwJRzK%jzz^Ry%=gt6!lbbB@U@k z8Q@(Q3jv1{XcFwlm+|o6eqrlGVT%pKl`OM-`Xu)j!U^Ej; z=A|bW-R?{<#)thdqyis@PJU^yj->F05K0xSgkd6twqb;DVlW=Ati|2fs;k3gv1o0r zwz@WiD+kbBw<|nUe+P`$!li~ccV}uZx(WVX8ye#MmA9?Z+qEn>DVze@Dbj| zbfy!AS|Bf4#Eh-M8s)Cl!TQuKOmArGx9hn zaa{~+YQ!-?GwhUfPWrS=^Y6sNjgl%A-)YuIbi`|^c<`PUNFps zW5vzEIGa#Gj~7PtgJsZs24gHt2Ti}lmu;4#?qJSlLnIq|T3oAu=qK=dB-l`as}4YT z7drkj^uxG-0KMjI!Dt887*Ld~2@4=5!RNe?>}}%|;GAhYtTN)2iIKtXVAP$hB<_7i zbT5ROESSAjV0^jFvHe+Q0Q9mQoeVAEeZEG*ku)6KdEFVB zIuQ-|Ji9T=LkCA7DrmtP6q^CT05>?1d8-5A92e)XFy+Jcq6)=dmyDxF%XT=HsGU&r zj|5{;lmlw73K7jc!6>_-97XH9!D<-YLm zh1ED@&3nB&;L;`x=8*94(m#MpgXN)pcF#)2R}7C4iw&^1t%Z#}v@GlbT=nI90ES%a zP#9K&_F;9U25b2+qJkk-{Ugl!K4@qW{nMfb@_Vnn$1Cl)`ifMP^p+ksxRNi)_39tR z+lS&IHq5a@p94jPV6a_bi5CL^}oP>K8OE&0qt5vTE3t{tpHbr&>$&Gm_~~5 z3$!0Afjv`C5q=&OAvVX|ZwS1}nscpAjk-uMwhzYVPzNyhv0;UeAW!9y60ABwYYA4M zIi^u80dT$&5)wiRc3v@Cg2oOu(;&kozg>bMP6suRV(sjcQ+zxL1X7 zD@tRWN~oCDOND5{&OyWIqe9?msf=<-IzggIq=9iFb2=}D)_|%t^ z(%fXWG#N>*;gS?ONsID!0HjxxqqAKY(Zk>ix}O@=CCwtl+^v@wACd*>?;?Tl@jbh*cDZMWe2BMXZnB_g=M3bQtC3T(lqx%7qUO$?I zNd;SlftY`|(U^LZ>;l5m1A2K9KJ_IuT_l7w?oHh(-fb;tK!`!;qDOpdpf;E(7&_k?7&h%xU`y0|Y$ff7i-Upi{sXhTXZ-La?uQb;NPFYsfK0D9u7<4#JQ7bt zE0PH_PwHhxc-}A3aIxnNb3bA$00e6?gPxX$G9o;yky~bs?DMm*6-JTn=$yc2I>Iy7ZELl`on@D-j4HN;iE)fQ3T?Rw}#*#R|Uy{;yLo!Zy4=PY5Xa! zCFQuvY&kM29OWtum66uqen6vFgLpg**D$CE<5a?ht(Oa-!WlGdLWQ1Q#6pFLSfaw; ze0j~O)Nr#7a*9J8TtZ8>#!;i~wD80qZWO>HNZaONZzrvS!Ay~QyV)!Y84ZWgo=90~ z5#I@L^on>ie3XZ~vnY);i_mh9URs2l--?Ed<(wRGkR~;mLD0TWFIpjGL@SZ&?*ULuWdF;Go#>q$y zVdg=-%m^R*3L4h!V~^-XD|`&mN_@=i!^cv+MCfLh^3{GE!xuXhM!FoBhx3JUFV)E` z1^}XuADLx5<2e(2c$HLg+Hsx$NO~OycHCf?3XU6)rd}iyN|vqwW$T-B3YYm6a$W2) zbU;tg(ky~_jb6mUZ4j}T(||{>a+l*CUrHQLB0OBEmj|KP^U-jzVp9ubM6#(of_bN2 z%tEt>S)$oz?gb`GR>`w)-#6~)$95ZM2Ow44WfhH&aKA2W2T#d$;kT30zQk;4GfExi z%`8(-TBR0X(W}xGY=;MxX_!V>aP+bubUKTMi*=eD*AtN@GYHy4deI7nB3g+;Ux90A zxOoN|QYOwh2Sf5=d=70ZYL-Xr*&Lwmu{B`W42lgfJKzVA%R`P|gDYWZ5_YqxL5Il) z5e9ED%V0+J;Z_^2#dWjK^@cU@E*b!2V zG69{+BbYy+7qf5K@#^I*3FhT&lq3m2l`oTOLUj9lZS6%P$=iCS!Iy>Xv2tpJ7l zNqIkEw!9hft`D1Rs;RV!9{^N(RlJg))urS`iG-1_>19Ny_OH-zv1;k)mxz{T5yX$` zMJzOnh$WgmhtDTDqfzuRj@Iz@JPaC8z87*MDdA7dmN28zFgzY=CaumV0gYaDj_@Z* z);CEG;bg^1kduiC+%hy=tkdLZf~Yi^LC~(#i&kh9(MmKrPZt=cIKlc>Irr^&>E)O0 zd+LSGn<)-hn@k|lw$_!N+Z{A4qqPU86;HW0@F{2@47 zV@I1cVZ_206eTNB9u8x|PJzWqQloXVk0LByW0u8?3xw-bktWmfw*ih``7eh<2r2FJ zBtk<|FAYNYbu?To{IHw2cE?p$QspNz2-+L;q7~9dv=Zrma4)>25VpcZ=Q)_-ab6lt zHcR_(>IpA^VTZj+@f%wccd$L2JVZDT`_IxVv0;pz+JRU`w^4;nXb{9fdf-?I9vy&< zYjfL5Q#Ul{%MqfBO6(7Kx#&$(l*Er5i`b#(%Rvu47Tr#G#B5z>95f6gF|SE`=RJT$ zuXm1xM`z3I1uV4YQhCG!-mRAv;jiyR!^QrZnlY2kDwRhte@HK8;kAfa;SBb4Zzc2Z923ywS`t#O*{}} zJn$pINhA*i%Uh_2e8w!V84p>adD{22*v#LM;>;GId-PJ^Bq*;WPjex^QU}8NQE|zmL{y^|1GYHzV z^`aG0Mzj(sA2VBv2tCIPDpEHWn5~t5P%4gn;+juS24 z5bC2uLdR?L(jnaCMl@XPE_4`0u+l7o_;$UBh07pfiOVcxX@-@Ny;R4)rcnIA^O#mYi4Ym55;C^vB}4e-Gth9csOgZ6fTdXk@w4?J7LrE95=qOC zo(}#(r_0QiBO}8#kqq-*>2wVM(kn#ziX2+OwbH3z8zkxW>*Q!gvR zgWiOOi#;eA941(k83gUS^r98MgJ>nbbNr1R_!?>cwxJK23eP_pK0Fhqb ziC)h^)EFfaC4Nya8^UiskA{o=hK_g$R+>c+e?u=~;X8;};yd!e(BSXv_^#Pkv7n@(AWtLtt9p zMrxr)#4OR{)V@8J?zyN`+TAYI;EM`y_T#W0y;TWczH|^yf8Z;GDdm0NMas6pY}qmz zos2Y^P)>TJaW-JmtJ8Q}1*!8ml`t}`ml2`RbI@?HLTRH&z|t&&_<4E}3wb=54pQ769p$G2K%^h@@GB7)viPLeQ^5!^MJ5j*ki6WClUo z){9n%8PQ6_e9UZYAoLtFs7T%1WVUWHE*4KrqjLe$H_Y4)sPy_7*;Ya7h2#)!?$*nV z@Upj{VclN#UcG3Ammyk-mo3JA2U2m10f6Y^BW4-T_zrvxiwEe#^9WMcxgS93bsh0C z8A88ECZs%|mlWYNUqZvhPD2M&1TD=Xh#%IASU3(MmN<^$`(a3N6m5`l|G;dyGxCjN zD!Ff&c^nYw)o#D0WV;(F!$NK(CA`XP2{ZDH!;CpQ<@N&}z4|2YI3T2v9KwpNmldJZ88lq1)MQ&s z;3hK&T2C)pp;AOEQR(rwAH=VnC-rc%*?P$MQanm8djG+l07|bz#cw(w>WotfCHLs1 zL^#r0(QvUNCEjyDkS5Xz*7xbfD%=RMO5DioO9@Dk%|42-_@`!B%s4?DNzHo$!WRLM zUI*xJ@AZRvi4fBN3K}k!e%Mtk;0*|m=tV2Uk7y<0Kfd>R@%CQ-$ZTC_Ty!*wCP{Z+ zKLKd;`e*7r2c#uT<&maq=?FNMzR9le*k2*x#U7hlLZ+Dci+rLl*AmQY^kNo%ihhv5(lUhaX|*@9#*jZchWOew^iK>ZpQv(LvRo>88FFo6aNxJ6|sm z!YKwnrZ8M#ADYU3FC&>j3bWHxVJ=3)#U4(>48*}{7D4BVvhvKObM9D?UOA zpSt2-zreR3!~6H)rMONDzMKIsJ;AT1@^5H}m*mn-B?HEke%!4GkB|IT=?Yc#|0f?I-l26;ejD5-A@u z8#)L*#|$b`H-B!nZZZxwNgPc6R*#1PlU^?)TS-W*kQ~Cyqk5SUKK3_gShtV;L@!$5 zV~AGbV~erJgH+sN03iDKSF?;~JZB>IoZPp1jEw4boM>T^P#+}{I@ZCnjYI?1qTyn9 zp%Vgvm1YsdXX-^PTm}(KTxKDQj-(6=xsjA`yV(+EBs$6^nzL5Zl8ZK7m!XwMq=tV13iD)INJRVtIyvTCJY+YykbC~<5G_LFc7QNn?iXD?CEtN-F ztsC^RBK-9*8ZP!%)wuEzy_kj9B4&x#{<_4Kf6pwh84p<(dPreZ`F#MYA6Ge=iYkxo zY&VKOT2opz!@pkV5SHQ-!jWW#s^^(+3|8;totKC(~tA@A4jHfHo_v!BCI^7 zmlffZ|A>Z*)trp95xmI^g7)9_q7_O;v=SvBGr=xG&oP6F)Xmr!RJgu%^mN^1{0jmJ z!oTDZxYGfWUMC}QHc}}hhY)kNUSfocorQ*VyVwPK(TZwBv=SFvi~ty^xWxcK^s&P% z;~B?+MNaNGxdFJB03yA96UEvH^HCyU<66CJ2yb~A8ZP!0I))-xX%;~|rx&sC7(^`b zn1#gANEsG#BPn6mYzZ?89bpR1iNDx_F2G53^)-#!6Q`f<*q`7wodB|oNsd8B3ftlpvvr_75fCNl`yGxVYrN=CF2B_A_k2SU#=gNoG6wAs4J_}4_zzvRJ?=L4jEy5|0!jD$Q$ zrH~v#%w>9s5iWKK8rJP%d-b9fE{13&F18r42vTv20f6YkGRt_zan?qTlN$`V9uVpE zn@R70(17MZ?9yTzK5>r+U!}Q6gH2D38bO7B6o1q}jU8xaSaaPifq4Y)ya8 zc{Dd}*KoYt@E6M?Ez=}C&&W(WFK*ZWtS0_G5NU(sR8ovhrixKQ!^Le2?b!%enne(w zrx&qkUl6gReUZN?fAFscz0hnqGQKm48on{_lRjqqOqJ3MR)hI3MpBQ2E1n9^SzQ^v;30f+9j)6 zw<>naFZpelWni_zcNSgSYj@p>?QJdD2klnLnS~qddHAR8*&?@8vtW)gW5X5p;ZDP; zI6l0%KeCdVt}GG;qA$-Z?->u=$UIP@`t&U5bpTASGe#dtA&qR5NJx33UQ&e9-G+vX zosPEp1S`!Vi0{#hShx=&mbj0+Lm2$aDep5|j*J{fm>hFHn{qFJ(JaAs*Y9*wBdnES zBB{{(^b#Q?_-Qm;EWz_rt@u>kYqzl3Z&^(nf8x6&@F#EA+HSfVxFlr&T7EPU^#W)n z`W;P0zenb>y~Bfg*%0zZHYD;sg@1?zzQa=Kx-Rww@%LD0hgWnVsn18umLNyip|a9# z>yH79USW^$CzTJ_^Qm=zl0zu@nO;hS#QzBm7fYNrrTwP0M?LPRXl;9%PoNEHlb ziqzXqvn*tE%qNa{Z(`XCX!L5BZf_tg(k#NtEA_G>)cjj$xLD1}4GsivGJ~LX^r96? zMzj(oA2WLl2tCIPDpEIx%+^iDzb05e5&D;W=fkZ4Nw1TU{SBm2NDd+94!y((7Yop^ zZWnupUbMo+5Us?;7GvW9skp@eK=knevy5jPhYzs8adLM)d{vzP$>g3&?z3GuwP z*LB$h*lSrGc&j2y_iVe0`Bi(?>NZ#s1QpX$;(Johhbb{f5Kn#?is-KhN%S{E6a9gR zqTeG_(cclW_-{ZL{T?EW{sffpYbp-=HNABtS`4fsNsCc5fhL|^bRkjMqh?Ex@xV#e zby1f{PjY?=nDqLf_^uXVNhA|aexa8W;dwtt!^NJbHq~A+0ftRXwGpvIhl-!dBIGOD zAT_nlY`HV)9b@W^B5%F4a5n)Qy~6GHTU+PqB|=DcGa4?IY;wv%YB-res_nUY(TZjh z(MsfM@)KC3#3mm^7`)spgBjrubK(1zZ)a5ha=@Zj_0eWK!fljDcqr@TLFoQEG+eBE zwWhkM7qQSjB9>@>Asguk-wU~s)X5EIOPJ9nUzN()f%irLqgR~$?ZCTBF9|}JZ$`t# z!VKHp1$-0b-FndqQ6gH2D39;JTf7~3A2M6l8TTAwU59iB-hBY2ALl%p+JPrN;4`=$%T$Z&65X$q*FW?MzJmG-nym>t*=XX8#g z`05Cp-|?N+T=*FhJJN5-?Y7|ZgSP8BRdy{Hf-jkruiSHa`RX0}UOa_c?;Jn=iX)-o z28sZJY!L>CbPb3Cj3se^!4wk0{n=)@&p6IFcbr(w>52S>fTy31sejLj7t~1&#Hoah z=j&xd_{~LVxY%#hCN;bCA{M@bh$X%=*xER$g27CYdb`#v3mFxM&41oHy903aikL2@ z6B=n2q2+qLvSadw*n7~hZXY|U7p?FyL@V*J#aQPi6}K1wh(7K& z%Xr3fCSuRYU1|RsK+@|tQG}RKA0-kxzNMEA;VyrThKt=rZG`nhy@-X&AYzHjEF}6% z_+H43q)r|;Tf&S)N4Z3E;>Ii1=~m|mf0AVSOmYY(V}QcQ40;$17wa@RLLxPs%phn_ z)r(eW6wyjFI#1`XQ=DLZljwarUV8au`<{9peKW-YQ{Myui6k&0{Sr#)c5KhG-O{V4F1UP!--`^@^S1%1h_%0eQ7Jk@G zEMOJ#b$ZbX=_6W+^pCGXF5W8Soo4Ggz%1@WRc-hDvvZ<@7K$U z@Yg>=!^Qrpx}0~fUd+O45wpZ=e_g(x^+mJ1W;|qF=plvQ&w22;OyL$2f7W%#(3 zGYdPl=^!bcNu1?7dI=KF@@+J1##w%>7qf5{#4K@^ewVRH3)SyUq&A)~Te6H?*K@h1 z)RP`~4Xy7_uSa3`!jYXub9OJ`K|CRK;RUoNw|RT;pW&93@{wrO~O{{ z@t8y7AkLY(t>~>brB)kO>Qgf#Efz^VKF=&C8Fw54Ke>@>EbW840E*r|BYrU%X}022 zLd0IZM2KGEYBXFdXthwUr5CY~Ga{DAS^jpK!CzO_Hd~I21V>PUd2cIw4S>-r!l7B% zI98su(qooHIuYWndg%~C{9QC$EJW3@{oQ&o3n?OIi4;qF{dOk`HMLtM_;1#6J->up z!%nGGv-#AQlNxxh+0tYLIfjB1DoMACM*)srNsi8T8;!DS&qBwO+M-9Z2rZw|ON$WZ z$I-AEVLqT2vk)d?mI$+pT|NtxI(8L0{u#e?$bpyPRb9|4yvWP7oABNvs}TlAOYo`S z5?03iG`!EyYE-%n930>UF|(PO&~wb7B6agVvvrekFE+rB_KcL__3xQ( z;$A?c*T={@aF${G}&?VH5K+L@aR{#TOtE z@)d26np$JF+!^Vzd1>xDl+FM&dUf0HJCvsM@*ouZbTnM7*yLD=RB$qb)Y^r5(TXM> z(Mr^6^3_J9#3mm^7~E-=!Hn+N5-D3H&Wv&P0u;TXkB+?%YNJF#!z=aDAY}hrXt-GR zY7Moc7qO5&B9=&hAxByW-wU~s)X5>UCCo^Zg=zTy{fstm1uS~CNzS_v!blEbi~r6Vp&cD-r(jQ!tBn3TB&Ba=T-@R`vt8c^44}xlS)B!a)WzJ7v3M=pck$1ZA2^a?N9FQ!kho#{eSLU7dI8c8vywMgQit+ z+uKU}@O^+)hkYK|a_9Lz#2LHPgckP0Jj4be(`Z87$=kcwLj07M^Wn`J!XK__QC zD0h3!g#b-&cNHzpk}8N22`$gpON(%ui_mbf+o+AacIibdTn7w7-b^^?KP53T>g`VudbzhW{G9XoWfvtwf#2BkqeAalhSc zU1$6>HIJ9Z-roV3`tj1EsgUAW98$!us?y*~Dvz{P@6pSF@Xo;li19ldNUI#Dl42Y+ zRg4d#;o?R{tsA^wFJjTqAYw^FBR@qp_?sQRYPK91CmKb4=f2wEQ9#p=`}7|Mqle{* z6loUG;}7++BHZfl&~ULHRTr)v*Na&w5;02@xkzW&Nfj;<2BMV{PKGKqvB=MP&v?)T z^C0qih;%f09YE6SM&iUg;a?;ZN>0^FiEy8j&~UN)B;#uYYchkN-J%z*a2!M{ah&4^ z>BT|%XPf0d<2+-`d2&zEUk-5eI!<(Go=7oDBr@EimkQxDSEAu!r%@XN?bnM~I1VC~ zIF5V_H26vS17^#S5#b0EVa`$d8vu-63C4%!i3a0TBEr|{r9z1CwP@Ic2=CO3Scni2 zOGG%>@*Syy!Ay~QdzV=jG7{#)_`C(c4*?pz8m6m!ghiS~Sh-g(D?-g5MZ?8vP7XH+ z-ed+r`$fHIg_04iM9Ig@QX8S?m_bGA<{M_~CgWdxkY9M1{`&w)ualA0JyIzohY<5) zy~GF?`#)${w~IZY7p-tHL@RNz#aMPD6}K1wh(1=G0@Y|@cFY;iIL=x&wkkSJzY!4W z^&4?)o^UUc2_t9dWkh()X=u3EW7Nj^(|QpLuR+8TuTeY+PsmraL2BxGX3L$?E}xv{ z9;e?0aP$gyIqVHRL>C))5}{$QUK)g8uSUbgf>jNMT6!@H$s%TnWS_Ygn1l_sp4IHY z8^hU`m+VrbZB=2LZq@RwXs@nsyG^HMH7M%~zn#=j+iYnwLLFj_EE7*U^!OTpqF1E- zzVqQ$y(9>k{w^9WmT7WKPnw}*2C1^U^`aF5MYIxuntT-lDY3~15eDCDmcfkZhq>tS z(*T)+^+y4VUe!mtg$TD%BH`gvdU+7K|2P^h*1g(z;sL#gh4vA#MEeWb2}Jl_$c>~< z9yVLTj5c{hF=yw{4*`r`agujC5XMLjq2#A}DG@UL2Q*wP(_|}5;3hK&+9&m*6%s|X z5{Vvv>x2BQ4G?7lQo~IY9@FB!1@gcMCVjx8}`zXTV3(c~a@cl<6h`7IiHq%0`Tr9%jD z77d#a;vv0=g%A<3M2Lg!-Xv8pm?=_kx0q!iBV*B==4}SO8xZN$G2QV=n50>RnRn=A zMkxCC(QvV%lj{rwZ!&|R{eWJyLd}R)qUK{}+a;mrm_bGA<`ZV?CgWop;;y1_L+Aql zORuAmZJ(rKNDd+AYkJ8MZuVDbSht%!su!(rGej$Kv&GntNh)qJ01$or#4O_(=UJb0 zp4@l9JPD}u`i|KBN%$AZgq0PigSbu1KbN85Vy{t~f3DMuSa=R1mUxchhE77hq7710 zXPPZ{M!yrWesed5o(GWhinrfAchA*JhG^`60}U6;HaT=6wVcc#Rd=~wv_i6oRw7xG zTj5BFO+JV)c#T;GGvXIPL;1&+Y`~*e`Oy|T!f%vFxM=F-LTJB^hKseY)@0wH7qQSk zB9`cXA=~E&-wU~s)X5RECCunk1aWfq-rWO$^a?cI7ep9~QwbgK)=P(w=sVGHu|yNQ zf(X(?I>GuOy;y}f5vxR;X1DN=BAa~_VevC&S;~m7GgpAb__kFmkFW$ zucP5&?S~_f1?(95zFxFK{fJhg{^L7_7H`MUV`l3*ZYNaYdW>;I#H_rKiT?*@*uO%>rR zG+f*;s_xCYKrd#|JR)XE^QdLJ1sMZqu}Gx2!z?G6W@H#WC1>--O8|@BPCnkIL0F7a z2@}`qWkPiIFGIt{idLJ6&FMuf^o)oldN#FrqiePt84Zpy4d!j!xCPMYRpHRA$f{4`>J9{8@) z))M>?-3Nc~ryqLys@`%6Wn(!d%I@xEAL1%mjYbJ&0iXM-Sw4KX3qHM7ty>lN0vLYN zi&q=G7r?bWHgfjhTU-b2R>_&g&w0(mKW)zzxuu$Q(1!1K!4>x5PQ$4V#SDGcGj7TqyUwsAmEky^a&@ekD?j5{V4A=%qq9 z%`?z&vD2uH?w_p}v2YwjEO8w9=zj1mv6q=GM@EDrOoTbxVXpx&dL!m`7@D*s-ga{AlMJz;!h$SK%Y!54`g27CYdYd=PLPo-Tf||D#_6VTSt6{p|m9R*& z2rGB$Wksm@O=!4S&B@^g!JEt=Xy2t5txz(el_>d`*>po;PrE8$)w6GooU%ZTupe?h~=9-}tSUsVFbCML*;SmHH`+gAzs ziZ)11Z7^HzjCT3tln1=pZsz5CX9FC)!tHna>$F}Xgk;Y_!^M(K4wpy`Co@R3Jx?!M z(a0lOiCj(YN+l&W`5?mJ6=oUC2%j&x%D28=3t053e%$lht_`bQY#GL*QTTKw;l%}a*0WnLw;XGLOf`tI=hB36?ZNT!tX5`vw&6V8tA#Y3cieP+Q z@Qmf9HqUP^>a zzl4U1WttpH5V*+qy{V4_BE8-epLrnC zj8lm&pVZ5T@SwCdNB*9Ma&YX{dIY7>dj_( z%{a&;caXyOrrrsd`tX#D?UCw?KkdPuRotwa4g$RGsf>OZP9CKOMD_1s+Be~!UCX;O zndhB`&#OSZW!jszI?i;pUFkMq`=uoV5X4tcc_p~Il)m~q@EF*X`Y8L+t$!DO)}B?r z4}R3YfdA03S_0a#?u=D`2rjv)x4Hf>`~)lC;y)hYKZxmICEpL)VYs-TgYin& zbpd}lykK`Q8l^z|$nNc~zYZ^(K;*vD@V3pm?Urx1s=Z+4%dAG%_Ih{qw%pA2mtmu@ zI~ba?TefRKOsWTeK*$N=Qt<66_+x}WqYQ=*z@Nk6pIx^Be~tO=N_obr9Kc+!7pwqK z?tCwpoNc%LR@=7|nY)$-@I|wV-|9BYhiBVvvj-^g=D^Yp6O7bmg43K%iq$vas! z!xr^}j^}WVttnR>&zfo2Q1`7_2diXNBxk1gU=+Z60ERm$)O>hldDdw_VZe9Z3b=0z zMqKL<)DYULqNFC!1)#=Tb0D0C({%jaRcoi}x5LG|>u<#J23Xz=9lOI!l|jm7--Zvh z;isH})sQ}Y_A{SV?bYARZg~^latGcr1c6x4W7V(qcCD`YM}oE9JTxMQ%Qa`N)`0(m z5|%gGt+`(P_u=~6>c5A#z720Zy;8G4Uce>SBCx|?HZAu+x6?aPe+yi71S+9jaayxL zW#}OFyD+_eD;l~D|2e|`egph_TfK+B*Iux@I}@ztUI3zm3W&Ep^9eX|6-;>D8DP_A zhb(~ESr5EFm?Wt!s|mzHGFY39|5%A)sj~D{n8<&xzX$uHcfmhiFoelIY~;s_SjG{L zwQukjUQx#u+Y83xMgc4`dt@5&6zfw#R=OPcIo<5 zVtn-wxojwUD!XZt*oXGQ^-)C|(ze2!CBZN_P^H~Pzv$Jr2CH_4zuyIgzoqxmUNGLV zT+lyo>A_(J{*c&dNb*RW%qngq+@o2Bjkj25!&P+RQ2(M^fk6-%x-Fzn340N2^g$27 zFsc<)fer(Bg&_a4tTV z4H-7WZwq@?XyO^MiA~Hz{dD*dcIvcJ&dSNyT%XFxsc+58U=)wXXT4{-PNNzgv*G2r z67KX&a2JejN)8({L|y?89+1cvF%!8&Zm#DssY+ycNSjNN^VcxQAFHkK%x;E1XK?;z zRZW};H}c|FmYK&d~mKRn7$MUsW|> zoc8>awOsK3&lve9hvdg2eZCtG!zr06cwCh~Q3e03YQlIGq)%3I;!oXVs$pLf5+5Ip z%g}uiJbS=l$Ocst#_6Uf@;KQqFh=%RNOpJxEJOA4RCyB0evYaM<5Z^(o^h@%V_Zig zmguZfhU{0VG9}1 zwx|i?{7VkbaQfe8jQ+{6aRK_{GY}c}|Enrvg7kN*nlMg!`rrj;{42&7kNVDd=YEFn zKUd{S(ESBf6UOOY4&MhnLSVr#U>VkqoJbcq4LwF~&EI~tiJx6fusKd8!>Al*|nVVv~T zrd>|>e=$aQH1~;iYi7v4N0li-_FGj=7$=+W98LC@_Zee48kewrcNv;Lt;&(0`4g%p zjMFUL=f{=(J!5>Y4|_}c`RfevkE*gJi2oZ^6UK>`22xg>ZE8q3zNyl%t=5gXfs|$N z=m7_X|9;GpogYZqY>e@BlyMJs7P;B6_}PQgewL~U;~LM6pX@Tm`lMhzKYsFJRmMca za+#_LA>|{gJPE4*hpGwVR7*FcJZy~YwK3VbTT;HM%9Wt|YpN!U z)1ALV;+Mua-w<<7wr6BU-@j00O%VTcRTIXEmoLzt{tQ#Y?}?c7{3ZHR;OPU7z&EOz zFiyB^iT+|^bgzl&&Re8^zA95f-505vFi!R`-|F3NHRj36PQ@7EFxd%v&BUWKP0DYp zGA2mhuWG_L>Cy@P>y0rky3X7w{p(b@5_G>-)r4`n^QZLhH^#Z>I*WHl{E;eag828U znlMg$?xg-Njq$!-v@S*4A|6oXOwj%%RTIW(mv4*ssWHyQh$Vkx#6PIAB?$kKstMzS zOIGYBr%a6x#jF#LhDz4#*TAC(92}0SnlMhaEM#?|F|NhnFfV9zzA95f*V|Q17$-Y- z>G-#dF)rHg!o}ljR5=s0zf{$PaoVMe$2S3 zlbt`sKgSs3>q3PWPw=NyITIRxx~d7|wC7InuQJB^q+mUN)&4)JGA2mBT-Ahe(q&Vi zjxoN+Lca4RKuuMi1l4s_6UM2|4h`=zM)XKXw0vOrW>t;^&9|$XFivy9!0=+%m{==#!j1w;%qkPL4<=B>Vy!?%Yf2~TC;QAY?CX92PKScRAV}xU8DG1Nm zSNMb~WrFp8Q8i(l_1qE48Cy(^#Bt`sNzWgkoCZ%Ha4>$dstMzq%LXWy7^53rs)Tgs zjZa>vN|R9cZ>pLw&UAKsVjJUmnDZx1nlR3?w3oci7|~;!oR;I~_ma1$ z5+%6)&#ESjbDiHye!v*vlZ5b`Uh==GQYKjcLsb*TSHY6EW%ez2sl05+*qR zvZ@K=oKJcO;Jv|N@CaX93vW?PhGri#M*T^gddc%M5!!!LrBBqrk5x?=uL0ROW&Js( zhJ~Y?^}KP)1Uz`a5#hM13FAy>$0^S?#`7@eSw2pAmMTR;&*!O{FwU}cNB%2}5k0mE z-;#S{?soi_s}d!+-luB9IM+}0=DeGX(S8!Ay`LB7y;_w%Q3HomO&G5M**5V%Fvj{Q zXFYF^_&Zc-5={TTstNN<^Dh99(aE2gW;%RkW<>HiRhk6TpHVeooavG9TMGy5d5_Tj z_r~a+;KEKmIK%n(RS6TE|8G?j#yOYH?uX7bH4F@U&D>@BmGI;N$AKrPnlR3F{_K9M zF~TRgz>8Pux2RGk6#fiV6UJH3o!##-Mtaz)0O|R&`zuum6P#b6YQi|@WAGyDcGs=g zW&ErNY0F(>tgquzr;pCG=51Bd1n&)16UKR$pMAL780F)f^8AAjZ&9U6uziQB3FB-_ z4nBO`7}X;o)$&shf2vB7;Q3EfO&I4{7O8r~7}4QeCofXvBUp#ZGsWJEXCU(>q_9C;_b!1)Agm1KFrtbfw%9tSiC#oillP*2> zdg8R{L1FHp*LCpd0SAR^RZSQtJ3G&Lt}&)Z!lpz%(fJKkj)bN!P&Hwk=7N(A*BRq` zeZ+V1$%boHSrf#+Ox1*O;>QoU@cG_y(`q@h@DXk@(R__D<~M}QryibZ$8S{SO;A6t zYQi}6l6P>v#~9ibkp z7^gbtbFlx%7}?{D?1GQM{y>#0LHGAmO&F&;|5c|W+f0oHCphDUpMqTtPakkVxI)#0 zanj{c!fnO~A7g~)#|Y0=WlAV~v#JT>WXtxhy~G&RBaG_2t!ukgSrSC=QZ-?mXlXFk zH^wyYBy&Tt*Q@d*sBWp6Fiv%TDE9Y^@y%Q9;y`Rql`%p3ovJ2`lb#!h{iHF@*Rs|G zIL{Bm-mA)&p!}n%CX7=q-(m6{V|??DGJk`~w^f-EWIwEG!Z_KI4JJ#rn;HZ1xn#~; zHJ?=FNYMOCRTIW(mMtx8GR89>kK`>Zl;F7oj?=cTr(hl_x=UMb(6Hsjqi1ryZj9}9$o6!*W4o5`v|Bf_M`soh zzN*TVp!+XXO&F(JI&pv87~?0V7|)%;|FbG%g7lxNnlMg!{uKVCXPO#xpDJfQ=Peu? z0LOrX!%0;W#_7+U%sr17$;vg1$vb+-Z!Lp&zlUrQk5@3 z`ERM3Fiv@PxOlTM!qZ)|L z*_JJ0j-6*}oH)w4&RfMCh6fKgFkGc-!Z_2?$Q0gbhETN|9jsW2z>Mvn*LJe$*Jv zBO%T57ZUu9DoKLpZ>gFv&a*69vg~}rW52v0$-k@8B$)m;RTIXU&RsD++Zf$p&sex* zd=@-?!14AOswRwcE?qKyu`#~G@pkT-@nxz+34LFpYQi|z*&yHnV?>AjqCECjQ>93- zY^$0u&T_$`@tcg%y^i;f2a8vY->6EO;QcmL6UKR$oQ3*`F}~Mq@>*60Mty$XfvEqX zN|fOG1F9yBb1jW;f72M-&`U;a=bnoCnkrd>@4r$tVVrMS{O=dW=w2)6&Wi#5T$L)p z_G79hjI%9`0iJS!sUdel-MMkVjqvmV$AIfqO&I4pKMweOW2B!bG+rDFyhxQcq4Z~~ znlR3MZY=P(jS;_r5TCyTV!tYBg7;UbnlR40JRbNuW28gWJL*95V}h?$rAsh=ld1{h zj7y?be`JjA#ON^Rh{JnTi4t7@fvO4PT+1R>4;UjmF?Y2CUG766#a9lW~YQi|vk{SK^ z#;6|Ig~peKZz6H&&Q+Qrb?fvfxA^r7_R~OPQlL_<9(d- zp1)J@Q>s)6wm+_F!Z_QqNa1&laXrep&WjTMttw4|=|@ye7-w1 zm%@_=90~sFnB_V@LO5lN@JX)i;^^Sh2WS0ERTIV)o*NzfPsT`}h)K_n3|_8Em}pfl zRW)Ipb9rR2X^il&x6F?U)>WwzY|p8hFwV9lD)?q&REIOnoQUA6CYb-AstM!F%VM#AZH)17JeU`U{e~({ zg6aRMYQi|vxw{9SFh+OSTNdsf{1;Wi1m{0fHDR1{Y3%#7-!MD|&y9PZ3{M_#41S`j z3FBO6LT;;pAU3OZPMz_jL1Z!uVhV7O8cGIV+B1;GF86wO^nwX)$F7pp z;cB~SLDbapEx2uSu(ZN{jJWN#U-sskGwnvN?gPkRWX~TUbJgTHyEnatk#_E**+`Bs_@S^ ze|5!yQ!ViBAPw(np(1{MCjRIKCsaBe%&UJ0yr+H$pw&MNztK|HKMF~^>wk*> z21Bfbd)z%8DAu@dH#-f>x37kxHa$ogjZ${tdzNQ_O4unUTa%e=ga-`q)DF;=s@ zvVDD*5rNk+86NQ%g|)YcE9>88CONeyxbV3NkryUa|b_c0t7DF&A8`g~7 zVEBU7b9M`)W*_d={}JrgE(ylGPR(f@-qvVWtcDF5E$?%!mIwTSnXBFQ;rZTOP_UoE z)xnx_!+Q06*$*$UA*cR3z)`RY%)$$$8ogk}70kNdySZ2Y8QjD2>p#ce9*2K!?Y+J? zb89bH4Fquv9{;PIL{V}*~bL!q;^eRSZDc|~&3F|98+j1+lUj2k;V;SWpNA|Ld zN>^KRc29UqD`Zj}hOTS36X=p}-6Ms>*d>wEQoMKb;k{;6F8 ze<8#WC;Ww$L-Viib+LYf6~66)ior3u!AhqIXuV!A64qMRcYyS@FM{R=J^7lJbN!`G z#fOT47UGT`cxSc2+NNIpL@*tUUD`g>YP78?-r@1fg2}e)%sDNqQD)}LUDtuf1w-gH Y<*-=_MqGQgTxqv_`!GtdJG1To10{8uT>t<8 literal 65160 zcmd6wd7LC?S;sfCyK{HXxmPwJ6P9BW*cnJrxrFTIBJO53lAwZFit6r~sp_q+>aAmU zC&>yyNU~ZQB!!Ci4dMYJhai_I5&;pB8x=%B0q+|R^!FasZ})W7h|l-T zZ++`|pXYg>x4Qd9yI*nPt2^YsY(DO~UT}Wd3C?;^7<8N<$+OvGspq$D>??XGzv|xn zLVh^gX?MH%YBt#ln@Qw2EicNm>sH*bKSx^4o%ZkG^YC4e741+aqJe~>MlC0DS~qxL0Fcklvt7$kER-d3;v`C&Ns8(O2=h3r z1!)Jj!deTXj-5!w8H<%HT=DJducb!|N@#|Tc{X;Fx4tZkb|-ejN6Gjd;#C$+Hp7;4 zq~V9S7un>ixNUse90U2L8zA^sTe{?$!=W zUl5Qj_qzw&>qW~ksD-%wxqD1?9g7^_v16xKusaD`_D1jRgnVnQb&`Bl^%(c*M^lub`V3!T|4~p*vE$6%zH7>@H z$+W_!T!ZcYs^5i42x2}4-SGY^6?W`9JEAYtGR~MH$-E!9K z)K9D^#68x~_S>x%CLc&;gCa+;Lar0zu0EipiUysql>(yR(yN$H!lo5A#LBeLDe_F! z6~k3l?1GY#FLbXJc5JV>NtdH{2O1kDL6|sJprvymn>@5L@$Y0+Fe&z@U{ zXOLoMq-;bCLE^1>!uO7)!Ra782kX%{c-)ju+WnL&9GAUPua%=j-WE0QriefCtNM%9 z(aI@`^z0(1=fYJ}HH&U2mYw$Pvci3|`xrH%VpaqZ1C26;0#;G0`A#KNVH3p&g7iA> zHI3mjLHO?e@nJD6ta7Z*Vj(Qb>+ION+&bsA#5&rcda0|Cb`z^v*j+F1y#Qh*IaLj< zx0E}2Ko!qQnF!IEktYqKU?@AN8U%N+Lya^+h*i|g$v)>IPlN};U%-J*A_nvBLSx%k z1G-nbkJNTm#z0c(is(sI%=cCFX`Zr?s!nQNwhy75jB}TkU=LH_1Ek;8_Eg7bm_cGC zUsDei>J5022{|C_ZwPxJr!Xx`*6gz$Am~E(vF>46>t5wvt^PRVK1#Kpk?mn+BYW^V z_bAZ_39Pi+LRbnfEJGPNj0zEB=w-vk5{Fxo#i|n=GAM_E?L(wcg`#>v%2f7W+)=M7 zjh5`H>Vz-vItUD*1gD`6i#l8{Y|yfhkY@{UrXm1@0q;iI$BW4GUOf+WRegdUQ1e@q z6ENkLq?C=k`%Su|iiZ0YPEJnWtgi1RS`#lSfvQ3TkCxtQ42&cdTUCe)!V;3=_R49P z@?KoMYdJxTtQsjGVk%f>LG0L3(-pyG--N^(F8b>GuO=i0+f<>%$qAWQSI=j^nvV#8 z)Hvwd5x%;Za8Fs$KsjmMQ>mL+Z&0D$eO8#mH0h=y^eQ65-7+%Nm#5YdVhu!4h=CBc z3;!iAPHB% z6AHCjA;wAEHf*Nc31&}olcXCzb!Da53YOb(%kjOl(Q@DAlt1@OPu_Kg<2uHQqSG4hk{VO(2V#9vmrsteEv9x;h)*F|ee&el2 zZ#i+@(Hm}f_KmmS{)}9WyvKA^n?3B|?G7y2iKDAGKj-Mp=y&48b-uS8M(ahNX|e4; zkCn16TwgZcZ6R_-iO@`3`i!bByoe0M+&i=B!oe(yrv15xzO2G1;1Ps4yqbqgw$UhY|mgb{j?JRb^q){oAa>gh`?3(0o5Z3yS{VQ z@nKnHBr5iZtE$DKYBA&w`o;Ez1|vbJr?sFu$~xg;5M+yZL-wnyI_%Bmu1nY?S-w}5 z_xYw?1SIR{RK0R5%E=ZKrgW8z-`e>lJ9a3MdKxN7=v!A(o`yOYLG;fMx3dZLlHO7G zfVDx8*gS1^Eoz{H!VamLB7EythM`7s#}6Zhq>C2;yBVWyB#B^HD7+l|fGLlhGsu}F zmh0Fud@lgb^*2$wY3zn%@4l+U>o_p{#Vziu5xGCH9XP&wsFNKu5eEtTFU9RQBk39g zBpZ4Dm4zx_X4*wqF0j2F_~&8WY=LB!C>Wk6+ST zE;Z53D4|xeOGRnNJ`L}yv|6NZ@K*?zshcds&~)EXp0$XR+Dk%tq$8TcqVn!yJy-VTRQZMsZyVZ-Ejt^f5{IOZ?^TjYFt9!Pn z8(u6$P5IjF&E0^>#*6HT)~G#&hya)9B(~>cJ*EYn{Y7mv3XzTIB~*GkAcjZ^x=mLE znc3LY-+1IZ9{IQzXd_*dO#&)WuX103;h-q8?YzjY!l{?l%{DC+S)X<`p#Cj+i?cBVx9fo| zE=c|ru3_$tFhpECCY+uxtoel4MR|dU!Lo6Y!axcY8}FJ<=)!IwF@R)9;zn4fmP{JL z6U!;pn>y!*OR&yf3+A$Te^X|CEaY|y1ECve&$zGI?W*v0I0qyp%*h6&7#2m<79dcI z5Gh^9OvBUIOctv|bW;^lmqg0Hs5^kvvDdYbNDOR|K(-oo^E8r|v0GfxmE8@b(F05P zo-{ZEKPNWeEi2jR7P&xu?R%j9Sd5O}MKBPwbgB^P0p!e!8kuZZ=)El(BB)qxKXGKo z4&jOyO?AsTEAmGo63KQu!4a&wUbFqCnI5i4aAcP}(KM@@W@@|E6VNx#=>}90=}mFI zAdVg6p7MwhGHZmOsmzI$iHV$!NL>!r?=yW>?ys=K7keTOm?j-BKrj<^EFX~yoQJ{E zM=gfKSGw+=>h9jt`_JO8uAMyb(!F^z>ywPK?t%J*z4S4J>DzH?}NaR!GDo2L~gEm1tG*j+Yezv87tUiKb}T7g4Nc)6rV9ukT}uzDR4?*xWS{mo9rwvL*#UUsl8- z6Ou%~0ZDZ)7|3b3jjWCi(r#Jj#gV3fCW68^Z48#q6`^A6_-iV&a1j|u8|PhegM`31 z+bdVAhip$Iwz$|43VYGQnf609)5@p2%HJuHiCpz?e|*(M_UoX)zzK8!bAJU4(y453;3EmHa&FtlR!k0=b} z=U(*6cZ@&Yg++*!`;-$Fa36V#$kV)!PG&`KlXzPKsrLoQxNjO}-y&lzN+h)+{wf)n?59#jka3(5XdnMyiJh1q=-t8huc*o zsUUa5tRc(V&+lqRe)4YJqsAQK_t@?8dlXf~A4A=g`dq1Q!T&YrF`cIxKz%D4>iQvt zx)^l`_BT;UnN@!?RRlZii8T={tszxCfV<0vdsgE%;|>AuP-z+9ZK?=%;L~gH$09I9 zbm|T;e~69wLczR$=ppPcqcSttzlSP<9s4{sPBP7kq@I5IZ~*?B*x(<~@Jl8zbPIl! z%Fo<_*HJ~V--2u`=zYqN+8~7OwzBlX;?vGJf8QgzB6~T^sR6M7G3{gMq|6qe% z)3EiRL#V$(E=46hGRMXAXA?(kjGBY~{GaQ`8f1u>JA(Ad2LhrM;?OSpb|6CEvg81 z=tz)?Ud>^K`*kjyVMS{ZvW@^utsU?4av7P!$h*V~dY7@WD{<@7Rd% z=@B0od54)Fd7Hr|WcTMmvr1pY^<)C~9^ zqKaS#Uz$JpO*Z89Lrr}8I=Dyg=7*0b-$f;5V85Lzf*p2s{^Sd6#1{sMtJKYqj{hl@ znZf>9st9)MmHCs2QLdB=Gk`45f23;PboS-r?m~WwqV8>kY6iUR#8IBU4DZs%-)pIBzm6Jj3Q$?_& zE`1K=6>P9)d$22?M0q)tmBIZWRRlZk>IWl!jg9%99`k{xGKQSw8>!R`_}5cKu!Ao@ zm;W>y^tm4N>XZ3TP-z+9AEk<52VQnE|5Y~JGdC~C1qg$D^&zL>`6SCj0gY2 zz~30y(O)^n6?S8@qrGO|&_gEW5g31)arZ&02zKbD8~PjAh#S{gxuri!Wo2~y8B`JM zxU0AH>uk)8>(tCEHuWtkH3QzJieLv{xvBpyHtvhYy7WDeF=Srur!q6xUqThZj=lV` zh+kr3ZX%ZI2P0lXrDcG>iYkH~c*%+VhuCPFU8j0ZU`V%rm&(bYejil?JLLz8PMNC6~PXAiv#2rvJu~Fh^udz z*2mXFZu1^0H-mnIDuNw7zR6pBL)_3a;XlR3eeRI_?wNb~1|CBG<5XS-`HxUVup=+s z3jcyApRo}p4R-~*}%cJQS!$`7z17q+AuS3g*oQ$ZQl-$xa} z&bm58c`F-m;VcdCiU{S+RAh$w8>k}KsaHlQf5Zk|?0gjT>Hy_aRA7er$EhONnU@79 zJ0`gTLVTB!z+Dxe{3jKYA^mSu5$vRgm-N_x&kq2v@RDm(WQKZ^DuSJQrI*aupwIQ7S9{5e zRA7ereN++b%;)3N_zG^FuEWC2zKVByZiUD@z!3m@|ga&sGtn%-=K)iP5(iAC6H)urtT6bg19YkiVEU z;5whcMtwo4`ryz**8B(+n$h*EsUq09mw)!*b~fa7g}nNM54TZK8QQl}MX=K@`QXD@ zHq=uZb@?|H##Bg#bC)WDopV{H>XmGu^UK>`zifu){9@0)vqn zSL`>VX;*)J!GBR%8QlLt6~T_XbU*oMHrj=5S8gaDiJ`X{W?w-S!4A86L-{N=;tN{8 zx870SNM&Yp{1{aPJNC*Q#wl!F7#XRR^Qev3>T>) z*umG&Mfj~=3qS4ZtvQ>1z405_$nVkQHw`{y$Dc~&W_11=s3O?WmwX533)qNH9*WYS z>iaiOQ5hM`&!dW9$6Wff&v&wco|d31ANF|>m6JhzfhvL>b;Ym1{u~?Zx`e&OZ@~UE zm6gH$6I2oGxU0YF^mo{Z&neMj_`|AQU>-H zs3O>5mp!{SGs6`RPD#{NkFHH)+--(|6I2oGpi7gn*RYXRPO>r;yF%q;bo%jB5$vd| zQ?busQ;8YqC#fRXp;sni&$BU~mDU8ztJAP4m6t&tQAMyLFMq=1huL^5 zM_K)V$q!OV8Q8C&ieQIb@_@-Z*hs6rWW~2?{yLSB!Tf7f5$u@Dju!rejkAhJsty+Z zm`cfj{xnqtJLt-TgJp~9oKkNQfg z2zKorK2-Q7HqH|=3@JZSxS2}Gn2;N&BG@5saiq{;qdl)^Z+)Q9rZO|w*Qp}dvG1~@ z^})~Nyo`^hO1cwuml9D3*w;d`j84DRouieSfGx^e$iHsX6X5wG0Bzm7`G zK>sDG2zKbzTlkN$kv~{YzT#Ur{(#EP;QtU+1Uvr9&HPu`=wCWazj|B$&s1^-{y$Jf zu){Cg0$n!86_oGU#Jy@WbO1BhW@NsXDuNyPaJcwPHsG6GYI)%JbSf*O;ZLE8V8>m0 zVBBCMUON;!POJLsAuK8=!}?CD2zJ)hN0={V13s^SZ~aq^FQy_h)L%#y!A`yW4D;1& z#OsQ9^&#fZQc)S&KSdS6PP^<7^ZjhBrxoj}Q_P2{m<;LnP(`qlF5L|MH5+PexhwZT zU#5aGtp9>4f}M5s9%yc!D;m_6d+T$|I)>k7C^$tG!A`w$>+>Ww=yS@TYC03R?99s#GQX7#xb~LSCz*FpQ5o9bOclXSdspm+=LWt9;x0DQlS;Mah(l?^b5ulz zazYisPPyc~_($0=Pif5MUr6vnR7i&N4^Tz0b1uu4yo(LAj{T~VByXo;GNj*16~Rur z^2GR0*>G#mxaE=YXQ{vp^FN}BU}s)>WIVdS75?dXyYkFU7Fv1IU8+w$%1y}r=lLD!ZN%wst9)8W%<81vf-XJxT|u2 zucx9iv|mdV!A`q02lxp#+iSMV1VCP<*4?M~S9a-*5{=hEH$!XB;&u%<{??&N1N@-`#C8dU^4>x%C;`Fb|mGm7>W zUvP3Q6_(NMCs0MO^ZuWHJnFe@;P)%=U(1h2eG3(zxdXRTMX=w2@+SrFVdGv`+^e4y z+@PW|w9isSu+uKf6#h6H>uJThDogkyR7{5SE2$#bNtb2`-_3?vd(FxW;XA3I4C}X1 zMXj_#L9fmX?#Afbj1NbsBG{Ri zX9gd~23&i~>a5^lDk`JlkEV)Xr(Kd2TxCP8cbF9!!Q)g&hV!$iBG@_C<0On6OKd_? z{AAX^vzceufG;XzvT5w0r!)g9Hbee2RRlZvvRrJ=MqI~(Re9L&qhd0ozn3b4opj~X zgKuWTt-a-zPY=F<3d}J76{-k!=B2ssPqFdVF?eO(`{PtlhV@6NBG_3E=e_@v4fLe4 z+~qm%f1@HYl>db)f}QdfPY*8b~vC~9#1B!tvRkY@<_Z7{eRT4Rl>7S>F1C`;ikwF3`7P18w(f@wl*#6C{$QTXDLGDSpmeeoc5Gx#?Zig6!`UTG z`7#xSVPeG_okr;A?RzlEZ1i^Z?xFmudqo?0cUQCN*lYM+uzs`=o^^8b{+Q$AV?nFg zZm)&Q#UE2Gr|H}3i+}KTtQ9r`yCb^nLVxi(>v!T zHzjE!8w(p^^D2nBHyz)RW)n@<4%VI6N#vD`>9@W5@ur8*wcy)_M78LG9=b$guNCir zb4`?_+0JISD_XZZSW)d4f<`;QUxOst5sFsZvG_lmkXL;&I@!f_)f1=F_3gyD4VT}E zQ8HbW92MWawAxN(*R%5fbMpW5`2S*i1OH>W-7R0zMq4a6eVy)|#q`HEsne-EPP37= znEK)N1$2cvj3H^5LQ7DlDDCRliQC_z=Gw94oJl1b(N3!7#aK3XUv6Zze^`oCVs=LO z2?|rE3*$uS;NeW9G}gnefGs8J$&C7T!|~<(vhj5%fciP-^Y&{Y*FBci;;!oj=a>Dk zY5NZJ*199IgBbiE=C&w2zmY$L3ws?}XET;>-?d>Sx`Bh5_I*NG#~>D8GUeyl?pvhv zB)>OrzX5$@efy2#uQ%bp`}6zq#{GFV4nS%inDxey-aGeL`}JsccvIhO`c?^asapCw z`lOG>jvY1Ky!~4!tP(kOyKJ;{o4xMjW%GScpTvPlw%;$sW(&7%ByJci4b1I+DDb3M zH-@@It$E*5ok;j#&_YAkK4vMDT2s~HA~b6%)m+F1T?e+;cc diff --git a/core/dbt/docs/build/html/.buildinfo b/core/dbt/docs/build/html/.buildinfo index 39803f13c3e..f5b6f776592 100644 --- a/core/dbt/docs/build/html/.buildinfo +++ b/core/dbt/docs/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 1ee31fc16e025fb98598189ba2cb5fcb +config: e27d6c1c419f2f0af393858cdf674109 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js b/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js deleted file mode 100644 index 8549469dc29..00000000000 --- a/core/dbt/docs/build/html/_static/_sphinx_javascript_frameworks_compat.js +++ /dev/null @@ -1,134 +0,0 @@ -/* - * _sphinx_javascript_frameworks_compat.js - * ~~~~~~~~~~ - * - * Compatability shim for jQuery and underscores.js. - * - * WILL BE REMOVED IN Sphinx 6.0 - * xref RemovedInSphinx60Warning - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - - -/** - * small helper function to urldecode strings - * - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL - */ -jQuery.urldecode = function(x) { - if (!x) { - return x - } - return decodeURIComponent(x.replace(/\+/g, ' ')); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} diff --git a/core/dbt/docs/build/html/_static/basic.css b/core/dbt/docs/build/html/_static/basic.css index 4e9a9f1faca..fb02bbef7ad 100644 --- a/core/dbt/docs/build/html/_static/basic.css +++ b/core/dbt/docs/build/html/_static/basic.css @@ -324,6 +324,7 @@ aside.sidebar { p.sidebar-title { font-weight: bold; } + nav.contents, aside.topic, div.admonition, div.topic, blockquote { @@ -331,6 +332,7 @@ div.admonition, div.topic, blockquote { } /* -- topics ---------------------------------------------------------------- */ + nav.contents, aside.topic, div.topic { @@ -606,6 +608,7 @@ ol.simple p, ul.simple p { margin-bottom: 0; } + aside.footnote > span, div.citation > span { float: left; diff --git a/core/dbt/docs/build/html/_static/jquery-3.6.0.js b/core/dbt/docs/build/html/_static/jquery-3.6.0.js deleted file mode 100644 index fc6c299b73e..00000000000 --- a/core/dbt/docs/build/html/_static/jquery-3.6.0.js +++ /dev/null @@ -1,10881 +0,0 @@ -/*! - * jQuery JavaScript Library v3.6.0 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright OpenJS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2021-03-02T17:08Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var flat = arr.flat ? function( array ) { - return arr.flat.call( array ); -} : function( array ) { - return arr.concat.apply( [], array ); -}; - - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 - // Plus for old WebKit, typeof returns "function" for HTML collections - // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) - return typeof obj === "function" && typeof obj.nodeType !== "number" && - typeof obj.item !== "function"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.6.0", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), - function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); - } ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.6 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2021-02-16 - */ -( function( window ) { -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ( {} ).hasOwnProperty, - arr = [], - pop = arr.pop, - pushNative = arr.push, - push = arr.push, - slice = arr.slice, - - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[ i ] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + - "ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] - // or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + - whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + - "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - return nonHex ? - - // Strip the backslash prefix from a non-hex escape sequence - nonHex : - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + - ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android<4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - - // Can't trust NodeList.length - while ( ( target[ j++ ] = els[ i++ ] ) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && - - // Support: IE 8 only - // Exclude object elements - ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - if ( newContext !== context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split( "|" ), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[ i ] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( ( cur = cur.nextSibling ) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return ( name === "input" || name === "button" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem && elem.namespaceURI, - docElem = elem && ( elem.ownerDocument || elem ).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, - // Safari 4 - 5 only, Opera <=11.6 - 12.x only - // IE/Edge & older browsers don't support the :scope pseudo-class. - // Support: Safari 6.0 only - // Safari 6.0 supports :scope but it's an alias of :root there. - support.scope = assert( function( el ) { - docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); - return typeof el.querySelectorAll !== "undefined" && - !el.querySelectorAll( ":scope fieldset div" ).length; - } ); - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert( function( el ) { - el.className = "i"; - return !el.getAttribute( "className" ); - } ); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert( function( el ) { - el.appendChild( document.createComment( "" ) ); - return !el.getElementsByTagName( "*" ).length; - } ); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find[ "TAG" ] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Firefox <=3.6 - 5 only - // Old Firefox doesn't throw on a badly-escaped identifier. - el.querySelectorAll( "\\\f" ); - rbuggyQSA.push( "[\\r\\n\\f]" ); - } ); - - assert( function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll( "[name=d]" ).length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: Opera 10 - 11 only - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll( "*,:x" ); - rbuggyQSA.push( ",.*:" ); - } ); - } - - if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector ) ) ) ) { - - assert( function( el ) { - - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - } ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); - } : - function( a, b ) { - if ( b ) { - while ( ( b = b.parentNode ) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a == document || a.ownerDocument == preferredDoc && - contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b == document || b.ownerDocument == preferredDoc && - contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - return a == document ? -1 : - b == document ? 1 : - /* eslint-enable eqeqeq */ - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( ( cur = cur.parentNode ) ) { - ap.unshift( cur ); - } - cur = b; - while ( ( cur = cur.parentNode ) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[ i ] === bp[ i ] ) { - i++; - } - - return i ? - - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[ i ], bp[ i ] ) : - - // Otherwise nodes in our document sort first - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - ap[ i ] == preferredDoc ? -1 : - bp[ i ] == preferredDoc ? 1 : - /* eslint-enable eqeqeq */ - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || - match[ 5 ] || "" ).replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + - ")" + className + "(" + whitespace + "|$)" ) ) && classCache( - className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - /* eslint-disable max-len */ - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - /* eslint-enable max-len */ - - }; - }, - - "CHILD": function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - "not": markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element (issue #299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - "has": markFunction( function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - } ), - - "contains": markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && - ( !document.hasFocus || document.hasFocus() ) && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return ( nodeName === "input" && !!elem.checked ) || - ( nodeName === "option" && !!elem.selected ); - }, - - "selected": function( elem ) { - - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos[ "empty" ]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo( function() { - return [ 0 ]; - } ), - - "last": createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - "even": createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "odd": createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rcombinators.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || - ( outerCache[ elem.uniqueID ] = {} ); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = uniqueCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( - selector || "*", - context.nodeType ? [ context ] : context, - [] - ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens - .slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( - selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) - ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find[ "ID" ]( token.matches[ 0 ] - .replace( runescape, funescape ), context ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || - context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert( function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute( "href" ) === "#"; -} ) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - } ); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert( function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -} ) ) { - addHandle( "value", function( elem, _name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - } ); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert( function( el ) { - return el.getAttribute( "disabled" ) == null; -} ) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; - } - } ); -} - -return Sizzle; - -} )( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -} -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the primary Deferred - primary = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - primary.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( primary.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return primary.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); - } - - return primary.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
    " ], - col: [ 2, "", "
    " ], - tr: [ 2, "", "
    " ], - td: [ 3, "", "
    " ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - - // Support: Chrome 86+ - // In Chrome, if an element having a focusout handler is blurred by - // clicking outside of it, it invokes the handler synchronously. If - // that handler calls `.remove()` on the element, the data is cleared, - // leaving `result` undefined. We need to guard against this. - return result && result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - which: true -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - // Suppress native focus or blur as it's already being fired - // in leverageNative. - _default: function() { - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - // - // Support: Firefox 70+ - // Only Firefox includes border widths - // in computed dimensions. (gh-4529) - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; - tr.style.cssText = "border:1px solid"; - - // Support: Chrome 86+ - // Height set through cssText does not get applied. - // Computed height then comes back as 0. - tr.style.height = "1px"; - trChild.style.height = "9px"; - - // Support: Android 8 Chrome 86+ - // In our bodyBackground.html iframe, - // display for all div elements is set to "inline", - // which causes a problem only in Android 8 Chrome 86. - // Ensuring the div is display: block - // gets around this issue. - trChild.style.display = "block"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + - parseInt( trStyle.borderTopWidth, 10 ) + - parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml, parserErrorElem; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) {} - - parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; - if ( !xml || parserErrorElem ) { - jQuery.error( "Invalid XML: " + ( - parserErrorElem ? - jQuery.map( parserErrorElem.childNodes, function( el ) { - return el.textContent; - } ).join( "\n" ) : - data - ) ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ).filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ).map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - -originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script but not if jsonp - if ( !isSuccess && - jQuery.inArray( "script", s.dataTypes ) > -1 && - jQuery.inArray( "json", s.dataTypes ) < 0 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - - - @@ -90,7 +87,7 @@

    Quick search

    ©2022, dbt Labs. | - Powered by Sphinx 5.3.0 + Powered by Sphinx 6.0.0 & Alabaster 0.7.12 diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html index a62245c306d..c0f74617916 100644 --- a/core/dbt/docs/build/html/index.html +++ b/core/dbt/docs/build/html/index.html @@ -10,9 +10,6 @@ - - - @@ -840,7 +837,7 @@

    Quick search

    ©2022, dbt Labs. | - Powered by Sphinx 5.3.0 + Powered by Sphinx 6.0.0 & Alabaster 0.7.12 | diff --git a/core/dbt/docs/build/html/search.html b/core/dbt/docs/build/html/search.html index f94c6ef0835..9622cf38d3c 100644 --- a/core/dbt/docs/build/html/search.html +++ b/core/dbt/docs/build/html/search.html @@ -10,9 +10,6 @@ - - - @@ -109,7 +106,7 @@

    Related Topics

    ©2022, dbt Labs. | - Powered by Sphinx 5.3.0 + Powered by Sphinx 6.0.0 & Alabaster 0.7.12 diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index 06e5a89965c..f32287c3049 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -161,7 +161,11 @@ def msg_to_dict(msg: EventMsg) -> dict: def warn_or_error(event, node=None): - if flags.WARN_ERROR: + # TODO: resolve this circular import when flags.WARN_ERROR_OPTIONS is WarnErrorOptions type via click CLI. + from dbt.helper_types import WarnErrorOptions + + warn_error_options = WarnErrorOptions.from_yaml_string(flags.WARN_ERROR_OPTIONS) + if flags.WARN_ERROR or warn_error_options.includes(type(event).__name__): # TODO: resolve this circular import when at top from dbt.exceptions import EventCompilationError diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 536e0c6c7cc..1fe552270bf 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -159,16 +159,16 @@ class MissingProfileTargetMsg(betterproto.Message): @dataclass -class InvalidVarsYAML(betterproto.Message): +class InvalidOptionYAML(betterproto.Message): """A008""" - pass + option_name: str = betterproto.string_field(1) @dataclass -class InvalidVarsYAMLMsg(betterproto.Message): +class InvalidOptionYAMLMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "InvalidVarsYAML" = betterproto.message_field(2) + data: "InvalidOptionYAML" = betterproto.message_field(2) @dataclass diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index cfa0f1feaa4..83fdf7854c1 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -108,12 +108,12 @@ def message(self) -> str: @dataclass -class InvalidVarsYAML(ErrorLevel, pt.InvalidVarsYAML): +class InvalidOptionYAML(ErrorLevel, pt.InvalidOptionYAML): def code(self): return "A008" def message(self) -> str: - return "The YAML provided in the --vars argument is not valid." + return f"The YAML provided in the --{self.option_name} argument is not valid." @dataclass diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index f207496e9b1..b8f99eb5fdc 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -1703,15 +1703,16 @@ def get_message(self) -> str: return msg -class VarsArgNotYamlDictError(CompilationError): - def __init__(self, var_type): +class OptionNotYamlDict(CompilationError): + def __init__(self, var_type, option_name): self.var_type = var_type + self.option_name = option_name super().__init__(msg=self.get_message()) def get_message(self) -> str: type_name = self.var_type.__name__ - msg = f"The --vars argument must be a YAML dictionary, but was of type '{type_name}'" + msg = f"The --{self.option_name} argument must be a YAML dictionary, but was of type '{type_name}'" return msg diff --git a/core/dbt/flags.py b/core/dbt/flags.py index cecc024d7f4..b7f8ae95ef2 100644 --- a/core/dbt/flags.py +++ b/core/dbt/flags.py @@ -29,6 +29,7 @@ USE_EXPERIMENTAL_PARSER = None STATIC_PARSER = None WARN_ERROR = None +WARN_ERROR_OPTIONS = None WRITE_JSON = None PARTIAL_PARSE = None USE_COLORS = None @@ -54,6 +55,7 @@ "INDIRECT_SELECTION", "TARGET_PATH", "LOG_PATH", + "WARN_ERROR_OPTIONS", ] _NON_DBT_ENV_FLAGS = ["DO_NOT_TRACK"] @@ -66,6 +68,7 @@ "USE_EXPERIMENTAL_PARSER": False, "STATIC_PARSER": True, "WARN_ERROR": False, + "WARN_ERROR_OPTIONS": "{}", "WRITE_JSON": True, "PARTIAL_PARSE": True, "USE_COLORS": True, @@ -130,7 +133,7 @@ def set_from_args(args, user_config): # N.B. Multiple `globals` are purely for line length. # Because `global` is a parser directive (as opposed to a language construct) # black insists in putting them all on one line - global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER + global STRICT_MODE, FULL_REFRESH, WARN_ERROR, WARN_ERROR_OPTIONS, USE_EXPERIMENTAL_PARSER, STATIC_PARSER global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, QUIET, NO_PRINT, CACHE_SELECTED_ONLY @@ -146,6 +149,8 @@ def set_from_args(args, user_config): USE_EXPERIMENTAL_PARSER = get_flag_value("USE_EXPERIMENTAL_PARSER", args, user_config) STATIC_PARSER = get_flag_value("STATIC_PARSER", args, user_config) WARN_ERROR = get_flag_value("WARN_ERROR", args, user_config) + WARN_ERROR_OPTIONS = get_flag_value("WARN_ERROR_OPTIONS", args, user_config) + _check_mutually_exclusive(["WARN_ERROR", "WARN_ERROR_OPTIONS"], args, user_config) WRITE_JSON = get_flag_value("WRITE_JSON", args, user_config) PARTIAL_PARSE = get_flag_value("PARTIAL_PARSE", args, user_config) USE_COLORS = get_flag_value("USE_COLORS", args, user_config) @@ -178,7 +183,7 @@ def _set_overrides_from_env(): def get_flag_value(flag, args, user_config): - flag_value = _load_flag_value(flag, args, user_config) + flag_value, _ = _load_flag_value(flag, args, user_config) if flag == "PRINTER_WIDTH": # must be ints flag_value = int(flag_value) @@ -188,20 +193,36 @@ def get_flag_value(flag, args, user_config): return flag_value +def _check_mutually_exclusive(group, args, user_config): + set_flag = None + for flag in group: + flag_set_by_user = not _flag_value_from_default(flag, args, user_config) + if flag_set_by_user and set_flag: + raise ValueError(f"{flag.lower()}: not allowed with argument {set_flag.lower()}") + elif flag_set_by_user: + set_flag = flag + + +def _flag_value_from_default(flag, args, user_config): + _, from_default = _load_flag_value(flag, args, user_config) + + return from_default + + def _load_flag_value(flag, args, user_config): lc_flag = flag.lower() flag_value = getattr(args, lc_flag, None) if flag_value is not None: - return flag_value + return flag_value, False flag_value = _get_flag_value_from_env(flag) if flag_value is not None: - return flag_value + return flag_value, False if user_config is not None and getattr(user_config, lc_flag, None) is not None: - return getattr(user_config, lc_flag) + return getattr(user_config, lc_flag), False - return flag_defaults[flag] + return flag_defaults[flag], True def _get_flag_value_from_env(flag): @@ -211,11 +232,10 @@ def _get_flag_value_from_env(flag): if env_value is None or env_value == "": return None - env_value = env_value.lower() if flag in _NON_BOOLEAN_FLAGS: flag_value = env_value else: - flag_value = env_set_bool(env_value) + flag_value = env_set_bool(env_value.lower()) return flag_value @@ -229,6 +249,7 @@ def get_flag_dict(): "use_experimental_parser": USE_EXPERIMENTAL_PARSER, "static_parser": STATIC_PARSER, "warn_error": WARN_ERROR, + "warn_error_options": WARN_ERROR_OPTIONS, "write_json": WRITE_JSON, "partial_parse": PARTIAL_PARSE, "use_colors": USE_COLORS, diff --git a/core/dbt/helper_types.py b/core/dbt/helper_types.py index a8ff90fa75f..84f253b00c6 100644 --- a/core/dbt/helper_types.py +++ b/core/dbt/helper_types.py @@ -7,15 +7,16 @@ from datetime import timedelta from pathlib import Path from typing import Tuple, AbstractSet, Union +from hologram import FieldEncoder, JsonDict +from mashumaro.types import SerializableType +from typing import Callable, cast, Generic, Optional, TypeVar, List from dbt.dataclass_schema import ( dbtClassMixin, ValidationError, StrEnum, ) -from hologram import FieldEncoder, JsonDict -from mashumaro.types import SerializableType -from typing import Callable, cast, Generic, Optional, TypeVar +import dbt.events.types as dbt_event_types class Port(int, SerializableType): @@ -88,6 +89,65 @@ class NoValue(dbtClassMixin): novalue: NVEnum = field(default_factory=lambda: NVEnum.novalue) +@dataclass +class IncludeExclude(dbtClassMixin): + INCLUDE_ALL = ("all", "*") + + include: Union[str, List[str]] + exclude: List[str] = field(default_factory=list) + + def __post_init__(self): + if isinstance(self.include, str) and self.include not in self.INCLUDE_ALL: + raise ValidationError( + f"include must be one of {self.INCLUDE_ALL} or a list of strings" + ) + + if self.exclude and self.include not in self.INCLUDE_ALL: + raise ValidationError( + f"exclude can only be specified if include is one of {self.INCLUDE_ALL}" + ) + + if isinstance(self.include, list): + self._validate_items(self.include) + + if isinstance(self.exclude, list): + self._validate_items(self.exclude) + + def includes(self, item_name: str): + return ( + item_name in self.include or self.include in self.INCLUDE_ALL + ) and item_name not in self.exclude + + def _validate_items(self, items: List[str]): + pass + + +class WarnErrorOptions(IncludeExclude): + # TODO: this method can be removed once the click CLI is in use + @classmethod + def from_yaml_string(cls, warn_error_options_str: Optional[str]): + + # TODO: resolve circular import + from dbt.config.utils import parse_cli_yaml_string + + warn_error_options_str = ( + str(warn_error_options_str) if warn_error_options_str is not None else "{}" + ) + warn_error_options = parse_cli_yaml_string(warn_error_options_str, "warn-error-options") + return cls( + include=warn_error_options.get("include", []), + exclude=warn_error_options.get("exclude", []), + ) + + def _validate_items(self, items: List[str]): + valid_exception_names = set( + [name for name, cls in dbt_event_types.__dict__.items() if isinstance(cls, type)] + ) + for item in items: + if item not in valid_exception_names: + raise ValidationError(f"{item} is not a valid dbt error name.") + + dbtClassMixin.register_field_encoders( { Port: PortEncoder(), diff --git a/core/dbt/main.py b/core/dbt/main.py index 1d2dad9b259..8368ab9f723 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -1006,18 +1006,32 @@ def parse_args(args, cls=DBTArgumentParser): """, ) - p.add_argument( + warn_error_flag = p.add_mutually_exclusive_group() + warn_error_flag.add_argument( "--warn-error", action="store_true", default=None, help=""" If dbt would normally warn, instead raise an exception. Examples - include --models that selects nothing, deprecations, configurations + include --select that selects nothing, deprecations, configurations with no associated models, invalid test configurations, and missing sources/refs in tests. """, ) + warn_error_flag.add_argument( + "--warn-error-options", + default=None, + help=""" + If dbt would normally warn, instead raise an exception based on + include/exclude configuration. Examples include --select that selects + nothing, deprecations, configurations with no associated models, + invalid test configurations, and missing sources/refs in tests. + This argument should be a YAML string, with keys 'include' or 'exclude'. + eg. '{"include": "all", "exclude": ["NoNodesForSelectionCriteria"]}' + """, + ) + p.add_argument( "--no-version-check", dest="version_check", diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 58504332ad2..0b4b47a7c5f 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -154,7 +154,7 @@ def get_node_selector(self) -> NodeSelector: @abstractmethod def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): - raise NotImplementedException(f"defer_to_manifest not implemented for task {type(self)}") + raise NotImplementedError(f"defer_to_manifest not implemented for task {type(self)}") def get_graph_queue(self) -> GraphQueue: selector = self.get_node_selector() diff --git a/core/dbt/utils.py b/core/dbt/utils.py index e9c4677130d..3f31a806156 100644 --- a/core/dbt/utils.py +++ b/core/dbt/utils.py @@ -657,9 +657,10 @@ def args_to_dict(args): "store_failures", "use_experimental_parser", ) + default_empty_yaml_dict_keys = ("vars", "warn_error_options") if key in default_false_keys and var_args[key] is False: continue - if key == "vars" and var_args[key] == "{}": + if key in default_empty_yaml_dict_keys and var_args[key] == "{}": continue # this was required for a test case if isinstance(var_args[key], PosixPath) or isinstance(var_args[key], WindowsPath): diff --git a/core/dbt/version.py b/core/dbt/version.py index d668a902ae6..21eba9e0d6f 100644 --- a/core/dbt/version.py +++ b/core/dbt/version.py @@ -71,7 +71,7 @@ def _get_core_msg_lines(installed, latest) -> Tuple[List[List[str]], str]: latest_line = ["latest", latest_s, green("Up to date!")] if installed > latest: - latest_line[2] = green("Ahead of latest version!") + latest_line[2] = yellow("Ahead of latest version!") elif installed < latest: latest_line[2] = yellow("Update available!") update_info = ( @@ -145,7 +145,7 @@ def _get_plugin_msg_info( compatibility_msg = yellow("Update available!") needs_update = True elif plugin > latest_plugin: - compatibility_msg = green("Ahead of latest version!") + compatibility_msg = yellow("Ahead of latest version!") else: compatibility_msg = green("Up to date!") diff --git a/test/unit/test_flags.py b/test/unit/test_flags.py index 8bb248af443..6f03ec22e92 100644 --- a/test/unit/test_flags.py +++ b/test/unit/test_flags.py @@ -1,8 +1,8 @@ import os -from unittest import mock, TestCase +from unittest import TestCase from argparse import Namespace +import pytest -from .utils import normalize from dbt import flags from dbt.contracts.project import UserConfig from dbt.graph.selector_spec import IndirectSelection @@ -63,6 +63,21 @@ def test__flags(self): flags.WARN_ERROR = False self.user_config.warn_error = None + # warn_error_options + self.user_config.warn_error_options = '{"include": "all"}' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": "all"}') + os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}' + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": []}') + setattr(self.args, 'warn_error_options', '{"include": "all"}') + flags.set_from_args(self.args, self.user_config) + self.assertEqual(flags.WARN_ERROR_OPTIONS, '{"include": "all"}') + # cleanup + os.environ.pop('DBT_WARN_ERROR_OPTIONS') + delattr(self.args, 'warn_error_options') + self.user_config.warn_error_options = None + # write_json self.user_config.write_json = True flags.set_from_args(self.args, self.user_config) @@ -264,3 +279,59 @@ def test__flags(self): # cleanup os.environ.pop('DBT_LOG_PATH') delattr(self.args, 'log_path') + + def test__flags_are_mutually_exclusive(self): + # options from user config + self.user_config.warn_error = False + self.user_config.warn_error_options = '{"include":"all}' + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + #cleanup + self.user_config.warn_error = None + self.user_config.warn_error_options = None + + # options from args + setattr(self.args, 'warn_error', False) + setattr(self.args, 'warn_error_options', '{"include":"all}') + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + # cleanup + delattr(self.args, 'warn_error') + delattr(self.args, 'warn_error_options') + + # options from environment + os.environ['DBT_WARN_ERROR'] = 'false' + os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}' + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + #cleanup + os.environ.pop('DBT_WARN_ERROR') + os.environ.pop('DBT_WARN_ERROR_OPTIONS') + + # options from user config + args + self.user_config.warn_error = False + setattr(self.args, 'warn_error_options', '{"include":"all}') + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + # cleanup + self.user_config.warn_error = None + delattr(self.args, 'warn_error_options') + + # options from user config + environ + self.user_config.warn_error = False + os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}' + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + # cleanup + self.user_config.warn_error = None + os.environ.pop('DBT_WARN_ERROR_OPTIONS') + + # options from args + environ + setattr(self.args, 'warn_error', False) + os.environ['DBT_WARN_ERROR_OPTIONS'] = '{"include": []}' + with pytest.raises(ValueError): + flags.set_from_args(self.args, self.user_config) + # cleanup + delattr(self.args, 'warn_error') + os.environ.pop('DBT_WARN_ERROR_OPTIONS') + diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index c7d0260a93b..07f1fed2715 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -111,7 +111,7 @@ def test_event_codes(self): MainTrackingUserState(user_state=""), MergedFromState(num_merged=0, sample=[]), MissingProfileTarget(profile_name="", target_name=""), - InvalidVarsYAML(), + InvalidOptionYAML(option_name="vars"), LogDbtProjectError(), LogDbtProfileError(), StarterProjectPath(dir=""), diff --git a/tests/unit/test_functions.py b/tests/unit/test_functions.py new file mode 100644 index 00000000000..4f7cb6845ac --- /dev/null +++ b/tests/unit/test_functions.py @@ -0,0 +1,49 @@ +from argparse import Namespace +import pytest + +import dbt.flags as flags +from dbt.events.functions import warn_or_error +from dbt.events.types import NoNodesForSelectionCriteria +from dbt.exceptions import EventCompilationError + + +@pytest.mark.parametrize( + "warn_error_options,expect_compilation_exception", + [ + ('{"include": "all"}', True), + ('{"include": [NoNodesForSelectionCriteria]}', True), + ('{"include": []}', False), + ('{}', False), + ('{"include": [MainTrackingUserState]}', False), + ('{"include": "all", "exclude": [NoNodesForSelectionCriteria]}', False), + ], +) +def test_warn_or_error_warn_error_options(warn_error_options, expect_compilation_exception): + args = Namespace( + warn_error_options=warn_error_options + ) + flags.set_from_args(args, {}) + if expect_compilation_exception: + with pytest.raises(EventCompilationError): + warn_or_error(NoNodesForSelectionCriteria()) + else: + warn_or_error(NoNodesForSelectionCriteria()) + + +@pytest.mark.parametrize( + "warn_error,expect_compilation_exception", + [ + (True, True), + (False, False), + ], +) +def test_warn_or_error_warn_error(warn_error, expect_compilation_exception): + args = Namespace( + warn_error=warn_error + ) + flags.set_from_args(args, {}) + if expect_compilation_exception: + with pytest.raises(EventCompilationError): + warn_or_error(NoNodesForSelectionCriteria()) + else: + warn_or_error(NoNodesForSelectionCriteria()) diff --git a/tests/unit/test_helper_types.py b/tests/unit/test_helper_types.py new file mode 100644 index 00000000000..0c867f47255 --- /dev/null +++ b/tests/unit/test_helper_types.py @@ -0,0 +1,46 @@ + +import pytest + +from dbt.helper_types import IncludeExclude, WarnErrorOptions +from dbt.dataclass_schema import ValidationError + + +class TestIncludeExclude: + def test_init_invalid(self): + with pytest.raises(ValidationError): + IncludeExclude(include="invalid") + + with pytest.raises(ValidationError): + IncludeExclude(include=["ItemA"], exclude=["ItemB"]) + + @pytest.mark.parametrize( + "include,exclude,expected_includes", + [ + ("all", [], True), + ("*", [], True), + ("*", ["ItemA"], False), + (["ItemA"], [], True), + (["ItemA", "ItemB"], [], True), + ] + ) + def test_includes(self, include, exclude, expected_includes): + include_exclude = IncludeExclude(include=include, exclude=exclude) + + assert include_exclude.includes("ItemA") == expected_includes + + +class TestWarnErrorOptions: + def test_init(self): + with pytest.raises(ValidationError): + WarnErrorOptions(include=["InvalidError"]) + + with pytest.raises(ValidationError): + WarnErrorOptions(include="*", exclude=["InvalidError"]) + + warn_error_options = WarnErrorOptions(include=["NoNodesForSelectionCriteria"]) + assert warn_error_options.include == ["NoNodesForSelectionCriteria"] + assert warn_error_options.exclude == [] + + warn_error_options = WarnErrorOptions(include="*", exclude=["NoNodesForSelectionCriteria"]) + assert warn_error_options.include == "*" + assert warn_error_options.exclude == ["NoNodesForSelectionCriteria"] From 86e8722cd84a3c996a1da755ae14d35acedc5645 Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Thu, 12 Jan 2023 09:34:36 +0100 Subject: [PATCH 105/156] Call `update_event_status` earlier + rename an event (#6572) * Rename HookFinished -> FinishedRunningStats * Move update_event_status earlier when node finishes * Add changelog entry * Add update_event_status for skip * Update changelog entry --- .changes/unreleased/Fixes-20230111-134058.yaml | 6 ++++++ core/dbt/events/proto_types.py | 6 +++--- core/dbt/events/types.proto | 6 +++--- core/dbt/events/types.py | 2 +- core/dbt/task/base.py | 7 +++++++ core/dbt/task/run.py | 6 ++++-- core/dbt/task/runnable.py | 4 ---- tests/unit/test_events.py | 2 +- 8 files changed, 25 insertions(+), 14 deletions(-) create mode 100644 .changes/unreleased/Fixes-20230111-134058.yaml diff --git a/.changes/unreleased/Fixes-20230111-134058.yaml b/.changes/unreleased/Fixes-20230111-134058.yaml new file mode 100644 index 00000000000..707cbfb39c6 --- /dev/null +++ b/.changes/unreleased/Fixes-20230111-134058.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Call update_event_status earlier for node results. Rename event 'HookFinished' -> FinishedRunningStats +time: 2023-01-11T13:40:58.577722+01:00 +custom: + Author: jtcohen6 + Issue: "6571" diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 1fe552270bf..2fc6ad1ccd7 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -945,7 +945,7 @@ class HooksRunningMsg(betterproto.Message): @dataclass -class HookFinished(betterproto.Message): +class FinishedRunningStats(betterproto.Message): """E047""" stat_line: str = betterproto.string_field(1) @@ -954,9 +954,9 @@ class HookFinished(betterproto.Message): @dataclass -class HookFinishedMsg(betterproto.Message): +class FinishedRunningStatsMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "HookFinished" = betterproto.message_field(2) + data: "FinishedRunningStats" = betterproto.message_field(2) @dataclass diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 80510687f81..21f9c9ee4bb 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -747,15 +747,15 @@ message HooksRunningMsg { } // E047 -message HookFinished { +message FinishedRunningStats { string stat_line = 1; string execution = 2; float execution_time = 3; } -message HookFinishedMsg { +message FinishedRunningStatsMsg { EventInfo info = 1; - HookFinished data = 2; + FinishedRunningStats data = 2; } diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 83fdf7854c1..4a2a0fb99ee 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -755,7 +755,7 @@ def message(self) -> str: @dataclass -class HookFinished(InfoLevel, pt.HookFinished): +class FinishedRunningStats(InfoLevel, pt.FinishedRunningStats): def code(self): return "E047" diff --git a/core/dbt/task/base.py b/core/dbt/task/base.py index e13f963cc7b..63449de10c7 100644 --- a/core/dbt/task/base.py +++ b/core/dbt/task/base.py @@ -4,6 +4,7 @@ import traceback from abc import ABCMeta, abstractmethod from typing import Type, Union, Dict, Any, Optional +from datetime import datetime from dbt import tracking from dbt import flags @@ -208,6 +209,9 @@ def run_with_hooks(self, manifest): self.before_execute() result = self.safe_run(manifest) + self.node.update_event_status( + node_status=result.status, finished_at=datetime.utcnow().isoformat() + ) if not self.node.is_ephemeral_model: self.after_execute(result) @@ -448,6 +452,9 @@ def on_skip(self): ) ) else: + # 'skipped' nodes should not have a value for 'node_finished_at' + # they do have 'node_started_at', which is set in GraphRunnableTask.call_runner + self.node.update_event_status(node_status=RunStatus.Skipped) fire_event( SkippingDetails( resource_type=self.node.resource_type, diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 145225be9d5..411c57af663 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -32,7 +32,7 @@ DatabaseErrorRunningHook, EmptyLine, HooksRunning, - HookFinished, + FinishedRunningStats, LogModelResult, LogStartLine, LogHookEndLine, @@ -421,7 +421,9 @@ def print_results_line(self, results, execution_time): with TextOnly(): fire_event(EmptyLine()) fire_event( - HookFinished(stat_line=stat_line, execution=execution, execution_time=execution_time) + FinishedRunningStats( + stat_line=stat_line, execution=execution, execution_time=execution_time + ) ) def before_run(self, adapter, selected_uids: AbstractSet[str]): diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 0b4b47a7c5f..fee5fadc891 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -226,10 +226,6 @@ def call_runner(self, runner): status: Dict[str, str] = {} try: result = runner.run_with_hooks(self.manifest) - status = runner.get_result_status(result) - runner.node.update_event_status( - node_status=result.status, finished_at=datetime.utcnow().isoformat() - ) finally: finishctx = TimestampNamed("finished_at") with finishctx, DbtModelState(status): diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 07f1fed2715..2afee427c4d 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -177,7 +177,7 @@ def test_event_codes(self): BuildingCatalog(), DatabaseErrorRunningHook(hook_type=""), HooksRunning(num_hooks=0, hook_type=""), - HookFinished(stat_line="", execution="", execution_time=0), + FinishedRunningStats(stat_line="", execution="", execution_time=0), # I - Project parsing ====================== ParseCmdOut(msg="testing"), From 1c7c23ac7325ba3b83ec804846039c2fb4495952 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Thu, 12 Jan 2023 11:07:26 -0500 Subject: [PATCH 106/156] convert 043_custom_alias_tests (#6590) --- .../macros-configs/macros.sql | 17 ----- .../macros/macros.sql | 17 ----- .../models/model1.sql | 3 - .../models/model2.sql | 3 - .../models/schema.yml | 15 ---- .../test_custom_aliases.py | 39 ----------- tests/functional/custom_aliases/fixtures.py | 68 +++++++++++++++++++ .../custom_aliases/test_custom_aliases.py | 57 ++++++++++++++++ 8 files changed, 125 insertions(+), 94 deletions(-) delete mode 100644 test/integration/043_custom_aliases_tests/macros-configs/macros.sql delete mode 100644 test/integration/043_custom_aliases_tests/macros/macros.sql delete mode 100644 test/integration/043_custom_aliases_tests/models/model1.sql delete mode 100644 test/integration/043_custom_aliases_tests/models/model2.sql delete mode 100644 test/integration/043_custom_aliases_tests/models/schema.yml delete mode 100644 test/integration/043_custom_aliases_tests/test_custom_aliases.py create mode 100644 tests/functional/custom_aliases/fixtures.py create mode 100644 tests/functional/custom_aliases/test_custom_aliases.py diff --git a/test/integration/043_custom_aliases_tests/macros-configs/macros.sql b/test/integration/043_custom_aliases_tests/macros-configs/macros.sql deleted file mode 100644 index a50044ea09f..00000000000 --- a/test/integration/043_custom_aliases_tests/macros-configs/macros.sql +++ /dev/null @@ -1,17 +0,0 @@ - -{#-- Verify that the config['alias'] key is present #} -{% macro generate_alias_name(custom_alias_name, node) -%} - {%- if custom_alias_name is none -%} - {{ node.name }} - {%- else -%} - custom_{{ node.config['alias'] if 'alias' in node.config else '' | trim }} - {%- endif -%} -{%- endmacro %} - -{% macro string_literal(s) -%} - {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }} -{%- endmacro %} - -{% macro default__string_literal(s) %} - '{{ s }}'::text -{% endmacro %} diff --git a/test/integration/043_custom_aliases_tests/macros/macros.sql b/test/integration/043_custom_aliases_tests/macros/macros.sql deleted file mode 100644 index a29f223b075..00000000000 --- a/test/integration/043_custom_aliases_tests/macros/macros.sql +++ /dev/null @@ -1,17 +0,0 @@ - -{% macro generate_alias_name(custom_alias_name, node) -%} - {%- if custom_alias_name is none -%} - {{ node.name }} - {%- else -%} - custom_{{ custom_alias_name | trim }} - {%- endif -%} -{%- endmacro %} - - -{% macro string_literal(s) -%} - {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }} -{%- endmacro %} - -{% macro default__string_literal(s) %} - '{{ s }}'::text -{% endmacro %} diff --git a/test/integration/043_custom_aliases_tests/models/model1.sql b/test/integration/043_custom_aliases_tests/models/model1.sql deleted file mode 100644 index 000ce2ed6c5..00000000000 --- a/test/integration/043_custom_aliases_tests/models/model1.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table', alias='alias') }} - -select {{ string_literal(this.name) }} as model_name diff --git a/test/integration/043_custom_aliases_tests/models/model2.sql b/test/integration/043_custom_aliases_tests/models/model2.sql deleted file mode 100644 index a2de8f099ea..00000000000 --- a/test/integration/043_custom_aliases_tests/models/model2.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select {{ string_literal(this.name) }} as model_name diff --git a/test/integration/043_custom_aliases_tests/models/schema.yml b/test/integration/043_custom_aliases_tests/models/schema.yml deleted file mode 100644 index 4d43836e482..00000000000 --- a/test/integration/043_custom_aliases_tests/models/schema.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: 2 - -models: - - name: model1 - columns: - - name: model_name - tests: - - accepted_values: - values: ['custom_alias'] - - name: model2 - columns: - - name: model_name - tests: - - accepted_values: - values: ['model2'] diff --git a/test/integration/043_custom_aliases_tests/test_custom_aliases.py b/test/integration/043_custom_aliases_tests/test_custom_aliases.py deleted file mode 100644 index 1acc9dd5224..00000000000 --- a/test/integration/043_custom_aliases_tests/test_custom_aliases.py +++ /dev/null @@ -1,39 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestAliases(DBTIntegrationTest): - @property - def schema(self): - return "custom_aliases_043" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - "macro-paths": ['macros'], - } - - @use_profile('postgres') - def test_postgres_customer_alias_name(self): - results = self.run_dbt(['run']) - self.assertEqual(len(results), 2) - self.run_dbt(['test']) - - -class TestAliasesWithConfig(TestAliases): - @property - def project_config(self): - return { - 'config-version': 2, - "macro-paths": ['macros-configs'], - } - - @use_profile('postgres') - def test_postgres_customer_alias_name(self): - results = self.run_dbt(['run']) - self.assertEqual(len(results), 2) - self.run_dbt(['test']) diff --git a/tests/functional/custom_aliases/fixtures.py b/tests/functional/custom_aliases/fixtures.py new file mode 100644 index 00000000000..6324e1249e4 --- /dev/null +++ b/tests/functional/custom_aliases/fixtures.py @@ -0,0 +1,68 @@ +model1_sql = """ +{{ config(materialized='table', alias='alias') }} + +select {{ string_literal(this.name) }} as model_name +""" + +model2_sql = """ +{{ config(materialized='table') }} + +select {{ string_literal(this.name) }} as model_name +""" + +macros_sql = """ +{% macro generate_alias_name(custom_alias_name, node) -%} + {%- if custom_alias_name is none -%} + {{ node.name }} + {%- else -%} + custom_{{ custom_alias_name | trim }} + {%- endif -%} +{%- endmacro %} + + +{% macro string_literal(s) -%} + {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }} +{%- endmacro %} + +{% macro default__string_literal(s) %} + '{{ s }}'::text +{% endmacro %} +""" + +macros_config_sql = """ +{#-- Verify that the config['alias'] key is present #} +{% macro generate_alias_name(custom_alias_name, node) -%} + {%- if custom_alias_name is none -%} + {{ node.name }} + {%- else -%} + custom_{{ node.config['alias'] if 'alias' in node.config else '' | trim }} + {%- endif -%} +{%- endmacro %} + +{% macro string_literal(s) -%} + {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }} +{%- endmacro %} + +{% macro default__string_literal(s) %} + '{{ s }}'::text +{% endmacro %} +""" + +schema_yml = """ +version: 2 + +models: + - name: model1 + columns: + - name: model_name + tests: + - accepted_values: + values: ['custom_alias'] + - name: model2 + columns: + - name: model_name + tests: + - accepted_values: + values: ['model2'] + +""" diff --git a/tests/functional/custom_aliases/test_custom_aliases.py b/tests/functional/custom_aliases/test_custom_aliases.py new file mode 100644 index 00000000000..561899f4575 --- /dev/null +++ b/tests/functional/custom_aliases/test_custom_aliases.py @@ -0,0 +1,57 @@ +import pytest + +from dbt.tests.util import run_dbt + +from tests.functional.custom_aliases.fixtures import ( + model1_sql, + model2_sql, + macros_sql, + macros_config_sql, + schema_yml +) + + +class TestAliases: + @pytest.fixture(scope="class") + def models(self): + return { + "model1.sql": model1_sql, + "model2.sql": model2_sql, + "schema.yml": schema_yml + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + } + + def test_customer_alias_name(self, project): + results = run_dbt(['run']) + assert len(results) == 2 + + results = run_dbt(['test']) + assert len(results) == 2 + + +class TestAliasesWithConfig: + @pytest.fixture(scope="class") + def models(self): + return { + "model1.sql": model1_sql, + "model2.sql": model2_sql, + "schema.yml": schema_yml + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_config_sql, + } + + def test_customer_alias_name(self, project): + results = run_dbt(['run']) + assert len(results) == 2 + + results = run_dbt(['test']) + assert len(results) == 2 From d74ae19523122d63b0281e176158a6851204a711 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 12 Jan 2023 12:59:28 -0600 Subject: [PATCH 107/156] Bumping version to 1.5.0a1 and generate changelog (#6593) * Bumping version to 1.5.0a1 and generate CHANGELOG * make relevvant changelog updates Co-authored-by: Github Build Bot Co-authored-by: Emily Rockman --- .bumpversion.cfg | 2 +- .changes/0.0.0.md | 1 + .changes/1.4.0-b1.md | 88 ------------------ .../1.4.0/Dependency-20220923-000646.yaml | 6 -- .../1.4.0/Dependency-20221007-000848.yaml | 6 -- .../1.4.0/Dependency-20221020-000753.yaml | 6 -- .../1.4.0/Dependency-20221026-000910.yaml | 6 -- .../1.4.0/Dependency-20221205-002118.yaml | 7 -- .changes/1.4.0/Docs-20220908-154157.yaml | 6 -- .changes/1.4.0/Docs-20221007-090656.yaml | 6 -- .changes/1.4.0/Docs-20221017-171411.yaml | 5 - .changes/1.4.0/Docs-20221116-155743.yaml | 6 -- .changes/1.4.0/Docs-20221202-150523.yaml | 6 -- .changes/1.4.0/Features-20220408-165459.yaml | 7 -- .changes/1.4.0/Features-20220817-154857.yaml | 6 -- .changes/1.4.0/Features-20220823-085727.yaml | 7 -- .changes/1.4.0/Features-20220912-125935.yaml | 6 -- .changes/1.4.0/Features-20220925-211651.yaml | 6 -- .changes/1.4.0/Features-20221003-110705.yaml | 6 -- .changes/1.4.0/Features-20221102-150003.yaml | 7 -- .changes/1.4.0/Features-20221107-105018.yaml | 8 -- .changes/1.4.0/Features-20221114-185207.yaml | 6 -- .changes/1.4.0/Features-20221130-112913.yaml | 6 -- .changes/1.4.0/Features-20221206-150704.yaml | 7 -- .changes/1.4.0/Fixes-20220916-104854.yaml | 6 -- .changes/1.4.0/Fixes-20221010-113218.yaml | 6 -- .changes/1.4.0/Fixes-20221011-160715.yaml | 6 -- .changes/1.4.0/Fixes-20221016-173742.yaml | 7 -- .changes/1.4.0/Fixes-20221107-095314.yaml | 6 -- .changes/1.4.0/Fixes-20221115-081021.yaml | 6 -- .changes/1.4.0/Fixes-20221124-163419.yaml | 7 -- .changes/1.4.0/Fixes-20221202-164859.yaml | 6 -- .changes/1.4.0/Fixes-20221213-112620.yaml | 6 -- .changes/1.4.0/Fixes-20221214-155307.yaml | 7 -- .../1.4.0/Under the Hood-20220927-194259.yaml | 6 -- .../1.4.0/Under the Hood-20220929-134406.yaml | 6 -- .../1.4.0/Under the Hood-20221005-120310.yaml | 6 -- .../1.4.0/Under the Hood-20221007-094627.yaml | 6 -- .../1.4.0/Under the Hood-20221007-140044.yaml | 6 -- .../1.4.0/Under the Hood-20221013-181912.yaml | 6 -- .../1.4.0/Under the Hood-20221017-151511.yaml | 6 -- .../1.4.0/Under the Hood-20221017-155844.yaml | 6 -- .../1.4.0/Under the Hood-20221028-104837.yaml | 6 -- .../1.4.0/Under the Hood-20221028-110344.yaml | 6 -- .../1.4.0/Under the Hood-20221108-074550.yaml | 6 -- .../1.4.0/Under the Hood-20221108-115633.yaml | 6 -- .../1.4.0/Under the Hood-20221108-133104.yaml | 6 -- .../1.4.0/Under the Hood-20221116-130037.yaml | 6 -- .../1.4.0/Under the Hood-20221118-145717.yaml | 8 -- .../1.4.0/Under the Hood-20221205-164948.yaml | 7 -- .../1.4.0/Under the Hood-20221206-094015.yaml | 7 -- .../1.4.0/Under the Hood-20221206-113053.yaml | 7 -- .../1.4.0/Under the Hood-20221211-214240.yaml | 7 -- .../1.4.0/Under the Hood-20221213-214106.yaml | 7 -- .../Breaking Changes-20221205-141937.yaml | 9 -- .../Dependencies-20230104-000306.yaml | 6 -- .changes/unreleased/Docs-20230102-170216.yaml | 6 -- .../unreleased/Features-20221207-091722.yaml | 7 -- .../unreleased/Features-20221230-104820.yaml | 7 -- .../unreleased/Features-20230102-091335.yaml | 6 -- .../unreleased/Features-20230104-181003.yaml | 7 -- .../unreleased/Fixes-20221113-104150.yaml | 7 -- .../unreleased/Fixes-20221117-220320.yaml | 7 -- .../unreleased/Fixes-20221212-115912.yaml | 7 -- .../unreleased/Fixes-20221213-092655.yaml | 6 -- .../unreleased/Fixes-20221213-113915.yaml | 6 -- .../unreleased/Fixes-20221226-010211.yaml | 6 -- .../unreleased/Fixes-20230101-223405.yaml | 6 -- .../unreleased/Fixes-20230104-141047.yaml | 7 -- .../unreleased/Fixes-20230109-161254.yaml | 7 -- .../unreleased/Fixes-20230110-124132.yaml | 7 -- .../unreleased/Fixes-20230111-134058.yaml | 6 -- .../Under the Hood-20221219-193435.yaml | 6 -- .../Under the Hood-20221221-121904.yaml | 6 -- .../Under the Hood-20230104-155257.yaml | 6 -- .../Under the Hood-20230106-112855.yaml | 6 -- .../Under the Hood-20230106-114412.yaml | 6 -- .../Under the Hood-20230109-095907.yaml | 6 -- .../Under the Hood-20230110-114233.yaml | 6 -- .../Under the Hood-20230110-145648.yaml | 6 -- CHANGELOG.md | 91 +------------------ core/dbt/version.py | 2 +- core/setup.py | 2 +- docker/Dockerfile | 12 +-- .../dbt/adapters/postgres/__version__.py | 2 +- plugins/postgres/setup.py | 2 +- .../adapter/dbt/tests/adapter/__version__.py | 2 +- tests/adapter/setup.py | 2 +- 88 files changed, 15 insertions(+), 681 deletions(-) delete mode 100644 .changes/1.4.0-b1.md delete mode 100644 .changes/1.4.0/Dependency-20220923-000646.yaml delete mode 100644 .changes/1.4.0/Dependency-20221007-000848.yaml delete mode 100644 .changes/1.4.0/Dependency-20221020-000753.yaml delete mode 100644 .changes/1.4.0/Dependency-20221026-000910.yaml delete mode 100644 .changes/1.4.0/Dependency-20221205-002118.yaml delete mode 100644 .changes/1.4.0/Docs-20220908-154157.yaml delete mode 100644 .changes/1.4.0/Docs-20221007-090656.yaml delete mode 100644 .changes/1.4.0/Docs-20221017-171411.yaml delete mode 100644 .changes/1.4.0/Docs-20221116-155743.yaml delete mode 100644 .changes/1.4.0/Docs-20221202-150523.yaml delete mode 100644 .changes/1.4.0/Features-20220408-165459.yaml delete mode 100644 .changes/1.4.0/Features-20220817-154857.yaml delete mode 100644 .changes/1.4.0/Features-20220823-085727.yaml delete mode 100644 .changes/1.4.0/Features-20220912-125935.yaml delete mode 100644 .changes/1.4.0/Features-20220925-211651.yaml delete mode 100644 .changes/1.4.0/Features-20221003-110705.yaml delete mode 100644 .changes/1.4.0/Features-20221102-150003.yaml delete mode 100644 .changes/1.4.0/Features-20221107-105018.yaml delete mode 100644 .changes/1.4.0/Features-20221114-185207.yaml delete mode 100644 .changes/1.4.0/Features-20221130-112913.yaml delete mode 100644 .changes/1.4.0/Features-20221206-150704.yaml delete mode 100644 .changes/1.4.0/Fixes-20220916-104854.yaml delete mode 100644 .changes/1.4.0/Fixes-20221010-113218.yaml delete mode 100644 .changes/1.4.0/Fixes-20221011-160715.yaml delete mode 100644 .changes/1.4.0/Fixes-20221016-173742.yaml delete mode 100644 .changes/1.4.0/Fixes-20221107-095314.yaml delete mode 100644 .changes/1.4.0/Fixes-20221115-081021.yaml delete mode 100644 .changes/1.4.0/Fixes-20221124-163419.yaml delete mode 100644 .changes/1.4.0/Fixes-20221202-164859.yaml delete mode 100644 .changes/1.4.0/Fixes-20221213-112620.yaml delete mode 100644 .changes/1.4.0/Fixes-20221214-155307.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20220927-194259.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20220929-134406.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221005-120310.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221007-094627.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221007-140044.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221013-181912.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221017-151511.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221017-155844.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221028-104837.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221028-110344.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221108-074550.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221108-115633.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221108-133104.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221116-130037.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221118-145717.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221205-164948.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221206-094015.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221206-113053.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221211-214240.yaml delete mode 100644 .changes/1.4.0/Under the Hood-20221213-214106.yaml delete mode 100644 .changes/unreleased/Breaking Changes-20221205-141937.yaml delete mode 100644 .changes/unreleased/Dependencies-20230104-000306.yaml delete mode 100644 .changes/unreleased/Docs-20230102-170216.yaml delete mode 100644 .changes/unreleased/Features-20221207-091722.yaml delete mode 100644 .changes/unreleased/Features-20221230-104820.yaml delete mode 100644 .changes/unreleased/Features-20230102-091335.yaml delete mode 100644 .changes/unreleased/Features-20230104-181003.yaml delete mode 100644 .changes/unreleased/Fixes-20221113-104150.yaml delete mode 100644 .changes/unreleased/Fixes-20221117-220320.yaml delete mode 100644 .changes/unreleased/Fixes-20221212-115912.yaml delete mode 100644 .changes/unreleased/Fixes-20221213-092655.yaml delete mode 100644 .changes/unreleased/Fixes-20221213-113915.yaml delete mode 100644 .changes/unreleased/Fixes-20221226-010211.yaml delete mode 100644 .changes/unreleased/Fixes-20230101-223405.yaml delete mode 100644 .changes/unreleased/Fixes-20230104-141047.yaml delete mode 100644 .changes/unreleased/Fixes-20230109-161254.yaml delete mode 100644 .changes/unreleased/Fixes-20230110-124132.yaml delete mode 100644 .changes/unreleased/Fixes-20230111-134058.yaml delete mode 100644 .changes/unreleased/Under the Hood-20221219-193435.yaml delete mode 100644 .changes/unreleased/Under the Hood-20221221-121904.yaml delete mode 100644 .changes/unreleased/Under the Hood-20230104-155257.yaml delete mode 100644 .changes/unreleased/Under the Hood-20230106-112855.yaml delete mode 100644 .changes/unreleased/Under the Hood-20230106-114412.yaml delete mode 100644 .changes/unreleased/Under the Hood-20230109-095907.yaml delete mode 100644 .changes/unreleased/Under the Hood-20230110-114233.yaml delete mode 100644 .changes/unreleased/Under the Hood-20230110-145648.yaml diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 3cdca1ad352..e55d8b13ece 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.4.0b1 +current_version = 1.5.0a1 parse = (?P\d+) \.(?P\d+) \.(?P\d+) diff --git a/.changes/0.0.0.md b/.changes/0.0.0.md index 5359cd07bf2..f3a5e03d1a1 100644 --- a/.changes/0.0.0.md +++ b/.changes/0.0.0.md @@ -3,6 +3,7 @@ For information on prior major and minor releases, see their changelogs: +* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md) * [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md) * [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md) * [1.1](https://github.com/dbt-labs/dbt-core/blob/1.1.latest/CHANGELOG.md) diff --git a/.changes/1.4.0-b1.md b/.changes/1.4.0-b1.md deleted file mode 100644 index d9d8537e98a..00000000000 --- a/.changes/1.4.0-b1.md +++ /dev/null @@ -1,88 +0,0 @@ -## dbt-core 1.4.0-b1 - December 15, 2022 - -### Features - -- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) -- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) -- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) -- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) -- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) -- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) -- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) -- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) -- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) - -### Fixes - -- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) -- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) -- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) -- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) -- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) -- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) -- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) -- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) -- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) -- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) - -### Docs - -- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) -- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) -- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) -- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) -- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) - -### Under the Hood - -- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) -- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) -- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) -- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) -- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) -- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) -- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) -- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) -- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) -- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) -- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) -- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) -- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) -- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) -- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) -- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) -- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) -- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) -- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) -- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) - -### Dependencies - -- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) -- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) -- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) -- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) -- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) - -### Contributors -- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) -- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) -- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) -- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) -- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) -- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) -- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) -- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) -- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) -- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) -- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) -- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) -- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) -- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) -- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) diff --git a/.changes/1.4.0/Dependency-20220923-000646.yaml b/.changes/1.4.0/Dependency-20220923-000646.yaml deleted file mode 100644 index 0375eeb125f..00000000000 --- a/.changes/1.4.0/Dependency-20220923-000646.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core" -time: 2022-09-23T00:06:46.00000Z -custom: - Author: dependabot[bot] - PR: "5917" diff --git a/.changes/1.4.0/Dependency-20221007-000848.yaml b/.changes/1.4.0/Dependency-20221007-000848.yaml deleted file mode 100644 index 7e36733d14e..00000000000 --- a/.changes/1.4.0/Dependency-20221007-000848.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Bump black from 22.8.0 to 22.10.0" -time: 2022-10-07T00:08:48.00000Z -custom: - Author: dependabot[bot] - PR: "6019" diff --git a/.changes/1.4.0/Dependency-20221020-000753.yaml b/.changes/1.4.0/Dependency-20221020-000753.yaml deleted file mode 100644 index ce0f122826b..00000000000 --- a/.changes/1.4.0/Dependency-20221020-000753.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core" -time: 2022-10-20T00:07:53.00000Z -custom: - Author: dependabot[bot] - PR: "6108" diff --git a/.changes/1.4.0/Dependency-20221026-000910.yaml b/.changes/1.4.0/Dependency-20221026-000910.yaml deleted file mode 100644 index d68fa8a11ef..00000000000 --- a/.changes/1.4.0/Dependency-20221026-000910.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core" -time: 2022-10-26T00:09:10.00000Z -custom: - Author: dependabot[bot] - PR: "6144" diff --git a/.changes/1.4.0/Dependency-20221205-002118.yaml b/.changes/1.4.0/Dependency-20221205-002118.yaml deleted file mode 100644 index f4203a5285c..00000000000 --- a/.changes/1.4.0/Dependency-20221205-002118.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: "Dependencies" -body: "Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core" -time: 2022-12-05T00:21:18.00000Z -custom: - Author: dependabot[bot] - Issue: 4904 - PR: 6375 diff --git a/.changes/1.4.0/Docs-20220908-154157.yaml b/.changes/1.4.0/Docs-20220908-154157.yaml deleted file mode 100644 index e307f3bd5e0..00000000000 --- a/.changes/1.4.0/Docs-20220908-154157.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Docs -body: minor doc correction -time: 2022-09-08T15:41:57.689162-04:00 -custom: - Author: andy-clapson - Issue: "5791" diff --git a/.changes/1.4.0/Docs-20221007-090656.yaml b/.changes/1.4.0/Docs-20221007-090656.yaml deleted file mode 100644 index 070ecd48944..00000000000 --- a/.changes/1.4.0/Docs-20221007-090656.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Docs -body: Generate API docs for new CLI interface -time: 2022-10-07T09:06:56.446078-05:00 -custom: - Author: stu-k - Issue: "5528" diff --git a/.changes/1.4.0/Docs-20221017-171411.yaml b/.changes/1.4.0/Docs-20221017-171411.yaml deleted file mode 100644 index 487362c1d5c..00000000000 --- a/.changes/1.4.0/Docs-20221017-171411.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: Docs -time: 2022-10-17T17:14:11.715348-05:00 -custom: - Author: paulbenschmidt - Issue: "5880" diff --git a/.changes/1.4.0/Docs-20221116-155743.yaml b/.changes/1.4.0/Docs-20221116-155743.yaml deleted file mode 100644 index 84d90a67b99..00000000000 --- a/.changes/1.4.0/Docs-20221116-155743.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Docs -body: Fix rendering of sample code for metrics -time: 2022-11-16T15:57:43.204201+01:00 -custom: - Author: jtcohen6 - Issue: "323" diff --git a/.changes/1.4.0/Docs-20221202-150523.yaml b/.changes/1.4.0/Docs-20221202-150523.yaml deleted file mode 100644 index b08a32cddf6..00000000000 --- a/.changes/1.4.0/Docs-20221202-150523.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Docs -body: Alphabetize `core/dbt/README.md` -time: 2022-12-02T15:05:23.695333-07:00 -custom: - Author: dbeatty10 - Issue: "6368" diff --git a/.changes/1.4.0/Features-20220408-165459.yaml b/.changes/1.4.0/Features-20220408-165459.yaml deleted file mode 100644 index 18675c7244a..00000000000 --- a/.changes/1.4.0/Features-20220408-165459.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: Added favor-state flag to optionally favor state nodes even if unselected node - exists -time: 2022-04-08T16:54:59.696564+01:00 -custom: - Author: daniel-murray josephberni - Issue: "5016" diff --git a/.changes/1.4.0/Features-20220817-154857.yaml b/.changes/1.4.0/Features-20220817-154857.yaml deleted file mode 100644 index ad53df05a3f..00000000000 --- a/.changes/1.4.0/Features-20220817-154857.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. -time: 2022-08-17T15:48:57.225267-04:00 -custom: - Author: gshank - Issue: "5610" diff --git a/.changes/1.4.0/Features-20220823-085727.yaml b/.changes/1.4.0/Features-20220823-085727.yaml deleted file mode 100644 index 4d8daebbf5e..00000000000 --- a/.changes/1.4.0/Features-20220823-085727.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: incremental predicates -time: 2022-08-23T08:57:27.640804-05:00 -custom: - Author: dave-connors-3 - Issue: "5680" - PR: "5702" diff --git a/.changes/1.4.0/Features-20220912-125935.yaml b/.changes/1.4.0/Features-20220912-125935.yaml deleted file mode 100644 index d49f35fd0af..00000000000 --- a/.changes/1.4.0/Features-20220912-125935.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Friendlier error messages when packages.yml is malformed -time: 2022-09-12T12:59:35.121188+01:00 -custom: - Author: jared-rimmer - Issue: "5486" diff --git a/.changes/1.4.0/Features-20220925-211651.yaml b/.changes/1.4.0/Features-20220925-211651.yaml deleted file mode 100644 index d2c1911c720..00000000000 --- a/.changes/1.4.0/Features-20220925-211651.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Allow partitions in external tables to be supplied as a list -time: 2022-09-25T21:16:51.051239654+02:00 -custom: - Author: pgoslatara - Issue: "5929" diff --git a/.changes/1.4.0/Features-20221003-110705.yaml b/.changes/1.4.0/Features-20221003-110705.yaml deleted file mode 100644 index 637d8be58c6..00000000000 --- a/.changes/1.4.0/Features-20221003-110705.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: extend -f flag shorthand for seed command -time: 2022-10-03T11:07:05.381632-05:00 -custom: - Author: dave-connors-3 - Issue: "5990" diff --git a/.changes/1.4.0/Features-20221102-150003.yaml b/.changes/1.4.0/Features-20221102-150003.yaml deleted file mode 100644 index 9d8ba192687..00000000000 --- a/.changes/1.4.0/Features-20221102-150003.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: This pulls the profile name from args when constructing a RuntimeConfig in lib.py, - enabling the dbt-server to override the value that's in the dbt_project.yml -time: 2022-11-02T15:00:03.000805-05:00 -custom: - Author: racheldaniel - Issue: "6201" diff --git a/.changes/1.4.0/Features-20221107-105018.yaml b/.changes/1.4.0/Features-20221107-105018.yaml deleted file mode 100644 index db6a0ab753a..00000000000 --- a/.changes/1.4.0/Features-20221107-105018.yaml +++ /dev/null @@ -1,8 +0,0 @@ -kind: Features -body: Adding tarball install method for packages. Allowing package tarball to be specified - via url in the packages.yaml. -time: 2022-11-07T10:50:18.464545-05:00 -custom: - Author: timle2 - Issue: "4205" - PR: "4689" diff --git a/.changes/1.4.0/Features-20221114-185207.yaml b/.changes/1.4.0/Features-20221114-185207.yaml deleted file mode 100644 index 459bc8ce234..00000000000 --- a/.changes/1.4.0/Features-20221114-185207.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Added an md5 function to the base context -time: 2022-11-14T18:52:07.788593+02:00 -custom: - Author: haritamar - Issue: "6246" diff --git a/.changes/1.4.0/Features-20221130-112913.yaml b/.changes/1.4.0/Features-20221130-112913.yaml deleted file mode 100644 index 64832de2f68..00000000000 --- a/.changes/1.4.0/Features-20221130-112913.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Exposures support metrics in lineage -time: 2022-11-30T11:29:13.256034-05:00 -custom: - Author: michelleark - Issue: "6057" diff --git a/.changes/1.4.0/Features-20221206-150704.yaml b/.changes/1.4.0/Features-20221206-150704.yaml deleted file mode 100644 index 47939ea5a79..00000000000 --- a/.changes/1.4.0/Features-20221206-150704.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: Add support for Python 3.11 -time: 2022-12-06T15:07:04.753127+01:00 -custom: - Author: joshuataylor MichelleArk jtcohen6 - Issue: "6147" - PR: "6326" diff --git a/.changes/1.4.0/Fixes-20220916-104854.yaml b/.changes/1.4.0/Fixes-20220916-104854.yaml deleted file mode 100644 index bd9af0469a7..00000000000 --- a/.changes/1.4.0/Fixes-20220916-104854.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Account for disabled flags on models in schema files more completely -time: 2022-09-16T10:48:54.162273-05:00 -custom: - Author: emmyoop - Issue: "3992" diff --git a/.changes/1.4.0/Fixes-20221010-113218.yaml b/.changes/1.4.0/Fixes-20221010-113218.yaml deleted file mode 100644 index 5b73b8d9ccd..00000000000 --- a/.changes/1.4.0/Fixes-20221010-113218.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Add validation of enabled config for metrics, exposures and sources -time: 2022-10-10T11:32:18.752322-05:00 -custom: - Author: emmyoop - Issue: "6030" diff --git a/.changes/1.4.0/Fixes-20221011-160715.yaml b/.changes/1.4.0/Fixes-20221011-160715.yaml deleted file mode 100644 index 936546a5232..00000000000 --- a/.changes/1.4.0/Fixes-20221011-160715.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: check length of args of python model function before accessing it -time: 2022-10-11T16:07:15.464093-04:00 -custom: - Author: chamini2 - Issue: "6041" diff --git a/.changes/1.4.0/Fixes-20221016-173742.yaml b/.changes/1.4.0/Fixes-20221016-173742.yaml deleted file mode 100644 index c7b00dddba8..00000000000 --- a/.changes/1.4.0/Fixes-20221016-173742.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Add functors to ensure event types with str-type attributes are initialized - to spec, even when provided non-str type params. -time: 2022-10-16T17:37:42.846683-07:00 -custom: - Author: versusfacit - Issue: "5436" diff --git a/.changes/1.4.0/Fixes-20221107-095314.yaml b/.changes/1.4.0/Fixes-20221107-095314.yaml deleted file mode 100644 index 99da9c44522..00000000000 --- a/.changes/1.4.0/Fixes-20221107-095314.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Allow hooks to fail without halting execution flow -time: 2022-11-07T09:53:14.340257-06:00 -custom: - Author: ChenyuLInx - Issue: "5625" diff --git a/.changes/1.4.0/Fixes-20221115-081021.yaml b/.changes/1.4.0/Fixes-20221115-081021.yaml deleted file mode 100644 index 40c81fabacb..00000000000 --- a/.changes/1.4.0/Fixes-20221115-081021.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Clarify Error Message for how many models are allowed in a Python file -time: 2022-11-15T08:10:21.527884-05:00 -custom: - Author: justbldwn - Issue: "6245" diff --git a/.changes/1.4.0/Fixes-20221124-163419.yaml b/.changes/1.4.0/Fixes-20221124-163419.yaml deleted file mode 100644 index 010a073269a..00000000000 --- a/.changes/1.4.0/Fixes-20221124-163419.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: After this, will be possible to use default values for dbt.config.get -time: 2022-11-24T16:34:19.039512764-03:00 -custom: - Author: devmessias - Issue: "6309" - PR: "6317" diff --git a/.changes/1.4.0/Fixes-20221202-164859.yaml b/.changes/1.4.0/Fixes-20221202-164859.yaml deleted file mode 100644 index 6aad4ced192..00000000000 --- a/.changes/1.4.0/Fixes-20221202-164859.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Use full path for writing manifest -time: 2022-12-02T16:48:59.029519-05:00 -custom: - Author: gshank - Issue: "6055" diff --git a/.changes/1.4.0/Fixes-20221213-112620.yaml b/.changes/1.4.0/Fixes-20221213-112620.yaml deleted file mode 100644 index fabe5d1af2d..00000000000 --- a/.changes/1.4.0/Fixes-20221213-112620.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: '[CT-1284] Change Python model default materialization to table' -time: 2022-12-13T11:26:20.550017-08:00 -custom: - Author: aranke - Issue: "5989" diff --git a/.changes/1.4.0/Fixes-20221214-155307.yaml b/.changes/1.4.0/Fixes-20221214-155307.yaml deleted file mode 100644 index cb37e0a809c..00000000000 --- a/.changes/1.4.0/Fixes-20221214-155307.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Repair a regression which prevented basic logging before the logging subsystem - is completely configured. -time: 2022-12-14T15:53:07.396512-05:00 -custom: - Author: peterallenwebb - Issue: "6434" diff --git a/.changes/1.4.0/Under the Hood-20220927-194259.yaml b/.changes/1.4.0/Under the Hood-20220927-194259.yaml deleted file mode 100644 index b6cb64b0155..00000000000 --- a/.changes/1.4.0/Under the Hood-20220927-194259.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Put black config in explicit config -time: 2022-09-27T19:42:59.241433-07:00 -custom: - Author: max-sixty - Issue: "5946" diff --git a/.changes/1.4.0/Under the Hood-20220929-134406.yaml b/.changes/1.4.0/Under the Hood-20220929-134406.yaml deleted file mode 100644 index b0175190747..00000000000 --- a/.changes/1.4.0/Under the Hood-20220929-134406.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Added flat_graph attribute the Manifest class's deepcopy() coverage -time: 2022-09-29T13:44:06.275941-04:00 -custom: - Author: peterallenwebb - Issue: "5809" diff --git a/.changes/1.4.0/Under the Hood-20221005-120310.yaml b/.changes/1.4.0/Under the Hood-20221005-120310.yaml deleted file mode 100644 index 797be31c319..00000000000 --- a/.changes/1.4.0/Under the Hood-20221005-120310.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Add mypy configs so `mypy` passes from CLI -time: 2022-10-05T12:03:10.061263-07:00 -custom: - Author: max-sixty - Issue: "5983" diff --git a/.changes/1.4.0/Under the Hood-20221007-094627.yaml b/.changes/1.4.0/Under the Hood-20221007-094627.yaml deleted file mode 100644 index d3a5da61566..00000000000 --- a/.changes/1.4.0/Under the Hood-20221007-094627.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Exception message cleanup. -time: 2022-10-07T09:46:27.682872-05:00 -custom: - Author: emmyoop - Issue: "6023" diff --git a/.changes/1.4.0/Under the Hood-20221007-140044.yaml b/.changes/1.4.0/Under the Hood-20221007-140044.yaml deleted file mode 100644 index 971d5a40ce8..00000000000 --- a/.changes/1.4.0/Under the Hood-20221007-140044.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Add dmypy cache to gitignore -time: 2022-10-07T14:00:44.227644-07:00 -custom: - Author: max-sixty - Issue: "6028" diff --git a/.changes/1.4.0/Under the Hood-20221013-181912.yaml b/.changes/1.4.0/Under the Hood-20221013-181912.yaml deleted file mode 100644 index 4f5218891b4..00000000000 --- a/.changes/1.4.0/Under the Hood-20221013-181912.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Provide useful errors when the value of 'materialized' is invalid -time: 2022-10-13T18:19:12.167548-04:00 -custom: - Author: peterallenwebb - Issue: "5229" diff --git a/.changes/1.4.0/Under the Hood-20221017-151511.yaml b/.changes/1.4.0/Under the Hood-20221017-151511.yaml deleted file mode 100644 index 94f4d27d6de..00000000000 --- a/.changes/1.4.0/Under the Hood-20221017-151511.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Fixed extra whitespace in strings introduced by black. -time: 2022-10-17T15:15:11.499246-05:00 -custom: - Author: luke-bassett - Issue: "1350" diff --git a/.changes/1.4.0/Under the Hood-20221017-155844.yaml b/.changes/1.4.0/Under the Hood-20221017-155844.yaml deleted file mode 100644 index c46ef040410..00000000000 --- a/.changes/1.4.0/Under the Hood-20221017-155844.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Clean up string formatting -time: 2022-10-17T15:58:44.676549-04:00 -custom: - Author: eve-johns - Issue: "6068" diff --git a/.changes/1.4.0/Under the Hood-20221028-104837.yaml b/.changes/1.4.0/Under the Hood-20221028-104837.yaml deleted file mode 100644 index 446d4898920..00000000000 --- a/.changes/1.4.0/Under the Hood-20221028-104837.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Remove the 'root_path' field from most nodes -time: 2022-10-28T10:48:37.687886-04:00 -custom: - Author: gshank - Issue: "6171" diff --git a/.changes/1.4.0/Under the Hood-20221028-110344.yaml b/.changes/1.4.0/Under the Hood-20221028-110344.yaml deleted file mode 100644 index cbe8dacb3d5..00000000000 --- a/.changes/1.4.0/Under the Hood-20221028-110344.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Combine certain logging events with different levels -time: 2022-10-28T11:03:44.887836-04:00 -custom: - Author: gshank - Issue: "6173" diff --git a/.changes/1.4.0/Under the Hood-20221108-074550.yaml b/.changes/1.4.0/Under the Hood-20221108-074550.yaml deleted file mode 100644 index a8fbc7e208b..00000000000 --- a/.changes/1.4.0/Under the Hood-20221108-074550.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Convert threading tests to pytest -time: 2022-11-08T07:45:50.589147-06:00 -custom: - Author: stu-k - Issue: "5942" diff --git a/.changes/1.4.0/Under the Hood-20221108-115633.yaml b/.changes/1.4.0/Under the Hood-20221108-115633.yaml deleted file mode 100644 index ea073719cda..00000000000 --- a/.changes/1.4.0/Under the Hood-20221108-115633.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Convert postgres index tests to pytest -time: 2022-11-08T11:56:33.743042-06:00 -custom: - Author: stu-k - Issue: "5770" diff --git a/.changes/1.4.0/Under the Hood-20221108-133104.yaml b/.changes/1.4.0/Under the Hood-20221108-133104.yaml deleted file mode 100644 index 6829dc097eb..00000000000 --- a/.changes/1.4.0/Under the Hood-20221108-133104.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Convert use color tests to pytest -time: 2022-11-08T13:31:04.788547-06:00 -custom: - Author: stu-k - Issue: "5771" diff --git a/.changes/1.4.0/Under the Hood-20221116-130037.yaml b/.changes/1.4.0/Under the Hood-20221116-130037.yaml deleted file mode 100644 index ecdedd6bd2d..00000000000 --- a/.changes/1.4.0/Under the Hood-20221116-130037.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Add github actions workflow to generate high level CLI API docs -time: 2022-11-16T13:00:37.916202-06:00 -custom: - Author: stu-k - Issue: "5942" diff --git a/.changes/1.4.0/Under the Hood-20221118-145717.yaml b/.changes/1.4.0/Under the Hood-20221118-145717.yaml deleted file mode 100644 index 934cd9dd5cb..00000000000 --- a/.changes/1.4.0/Under the Hood-20221118-145717.yaml +++ /dev/null @@ -1,8 +0,0 @@ -kind: Under the Hood -body: Functionality-neutral refactor of event logging system to improve encapsulation - and modularity. -time: 2022-11-18T14:57:17.792622-05:00 -custom: - Author: peterallenwebb - Issue: "6139" - PR: "6291" diff --git a/.changes/1.4.0/Under the Hood-20221205-164948.yaml b/.changes/1.4.0/Under the Hood-20221205-164948.yaml deleted file mode 100644 index 579f973955b..00000000000 --- a/.changes/1.4.0/Under the Hood-20221205-164948.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Consolidate ParsedNode and CompiledNode classes -time: 2022-12-05T16:49:48.563583-05:00 -custom: - Author: gshank - Issue: "6383" - PR: "6384" diff --git a/.changes/1.4.0/Under the Hood-20221206-094015.yaml b/.changes/1.4.0/Under the Hood-20221206-094015.yaml deleted file mode 100644 index ebcb9999430..00000000000 --- a/.changes/1.4.0/Under the Hood-20221206-094015.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Prevent doc gen workflow from running on forks -time: 2022-12-06T09:40:15.301984-06:00 -custom: - Author: stu-k - Issue: "6386" - PR: "6390" diff --git a/.changes/1.4.0/Under the Hood-20221206-113053.yaml b/.changes/1.4.0/Under the Hood-20221206-113053.yaml deleted file mode 100644 index a1f94f68f43..00000000000 --- a/.changes/1.4.0/Under the Hood-20221206-113053.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Fix intermittent database connection failure in Windows CI test -time: 2022-12-06T11:30:53.166009-07:00 -custom: - Author: MichelleArk dbeatty10 - Issue: "6394" - PR: "6395" diff --git a/.changes/1.4.0/Under the Hood-20221211-214240.yaml b/.changes/1.4.0/Under the Hood-20221211-214240.yaml deleted file mode 100644 index adeaefba257..00000000000 --- a/.changes/1.4.0/Under the Hood-20221211-214240.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Refactor and clean up manifest nodes -time: 2022-12-11T21:42:40.560074-05:00 -custom: - Author: gshank - Issue: "6426" - PR: "6427" diff --git a/.changes/1.4.0/Under the Hood-20221213-214106.yaml b/.changes/1.4.0/Under the Hood-20221213-214106.yaml deleted file mode 100644 index 708c84661d6..00000000000 --- a/.changes/1.4.0/Under the Hood-20221213-214106.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Under the Hood -body: Restore important legacy logging behaviors, following refactor which removed - them -time: 2022-12-13T21:41:06.815133-05:00 -custom: - Author: peterallenwebb - Issue: "6437" diff --git a/.changes/unreleased/Breaking Changes-20221205-141937.yaml b/.changes/unreleased/Breaking Changes-20221205-141937.yaml deleted file mode 100644 index 39506f9ab2b..00000000000 --- a/.changes/unreleased/Breaking Changes-20221205-141937.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: Breaking Changes -body: Cleaned up exceptions to directly raise in code. Also updated the existing - exception to meet PEP guidelines.Removed use of all exception - functions in the code base and marked them all as deprecated to be removed next - minor release. -time: 2022-12-05T14:19:37.863032-06:00 -custom: - Author: emmyoop - Issue: 6339 6393 6460 diff --git a/.changes/unreleased/Dependencies-20230104-000306.yaml b/.changes/unreleased/Dependencies-20230104-000306.yaml deleted file mode 100644 index 9da884ff595..00000000000 --- a/.changes/unreleased/Dependencies-20230104-000306.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: "Dependencies" -body: "Update agate requirement from <1.6.4,>=1.6 to >=1.6,<1.7.1 in /core" -time: 2023-01-04T00:03:06.00000Z -custom: - Author: dependabot[bot] - PR: 6506 diff --git a/.changes/unreleased/Docs-20230102-170216.yaml b/.changes/unreleased/Docs-20230102-170216.yaml deleted file mode 100644 index 602d7a80b24..00000000000 --- a/.changes/unreleased/Docs-20230102-170216.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Docs -body: Updated minor typos encountered when skipping profile setup -time: 2023-01-02T17:02:16.66596191-05:00 -custom: - Author: nshuman1 - Issue: 6529 diff --git a/.changes/unreleased/Features-20221207-091722.yaml b/.changes/unreleased/Features-20221207-091722.yaml deleted file mode 100644 index 16845f3663e..00000000000 --- a/.changes/unreleased/Features-20221207-091722.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: Making timestamp optional for metrics -time: 2022-12-07T09:17:22.571877-06:00 -custom: - Author: callum-mcdata - Issue: "6398" - PR: "9400" diff --git a/.changes/unreleased/Features-20221230-104820.yaml b/.changes/unreleased/Features-20221230-104820.yaml deleted file mode 100644 index 51dc7ca85f0..00000000000 --- a/.changes/unreleased/Features-20221230-104820.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: The meta configuration field is now included in the node_info property of structured - logs. -time: 2022-12-30T10:48:20.486416-06:00 -custom: - Author: tmastny - Issue: "6216" diff --git a/.changes/unreleased/Features-20230102-091335.yaml b/.changes/unreleased/Features-20230102-091335.yaml deleted file mode 100644 index 78154c12e53..00000000000 --- a/.changes/unreleased/Features-20230102-091335.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Features -body: Adds buildable selection mode -time: 2023-01-02T09:13:35.663627-05:00 -custom: - Author: agpapa - Issue: "6365" diff --git a/.changes/unreleased/Features-20230104-181003.yaml b/.changes/unreleased/Features-20230104-181003.yaml deleted file mode 100644 index 856329cb4a7..00000000000 --- a/.changes/unreleased/Features-20230104-181003.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Features -body: '--warn-error-options: Treat warnings as errors for specific events, based on - user configuration' -time: 2023-01-04T18:10:03.203142-05:00 -custom: - Author: MichelleArk - Issue: "6165" diff --git a/.changes/unreleased/Fixes-20221113-104150.yaml b/.changes/unreleased/Fixes-20221113-104150.yaml deleted file mode 100644 index 75c34bda436..00000000000 --- a/.changes/unreleased/Fixes-20221113-104150.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: fix missing f-strings, convert old .format() messages to f-strings for consistency -time: 2022-11-13T10:41:50.009727-05:00 -custom: - Author: justbldwn - Issue: "6241" - PR: "6243" diff --git a/.changes/unreleased/Fixes-20221117-220320.yaml b/.changes/unreleased/Fixes-20221117-220320.yaml deleted file mode 100644 index 2f71fe213fc..00000000000 --- a/.changes/unreleased/Fixes-20221117-220320.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Fix typo in util.py -time: 2022-11-17T22:03:20.4836855+09:00 -custom: - Author: eltociear - Issue: "4904" - PR: "6037" diff --git a/.changes/unreleased/Fixes-20221212-115912.yaml b/.changes/unreleased/Fixes-20221212-115912.yaml deleted file mode 100644 index 1dc428830eb..00000000000 --- a/.changes/unreleased/Fixes-20221212-115912.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: add pre-commit install to make dev script in Makefile -time: 2022-12-12T11:59:12.175136-05:00 -custom: - Author: justbldwn - Issue: "6269" - PR: "6417" diff --git a/.changes/unreleased/Fixes-20221213-092655.yaml b/.changes/unreleased/Fixes-20221213-092655.yaml deleted file mode 100644 index b187daf9ad8..00000000000 --- a/.changes/unreleased/Fixes-20221213-092655.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Late-rendering for `pre_` and `post_hook`s in `dbt_project.yml` -time: 2022-12-13T09:26:55.11397-07:00 -custom: - Author: dbeatty10 - Issue: "6411" diff --git a/.changes/unreleased/Fixes-20221213-113915.yaml b/.changes/unreleased/Fixes-20221213-113915.yaml deleted file mode 100644 index b92a2d6cbc9..00000000000 --- a/.changes/unreleased/Fixes-20221213-113915.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: '[CT-1591] Don''t parse empty Python files' -time: 2022-12-13T11:39:15.818464-08:00 -custom: - Author: aranke - Issue: "6345" diff --git a/.changes/unreleased/Fixes-20221226-010211.yaml b/.changes/unreleased/Fixes-20221226-010211.yaml deleted file mode 100644 index 4674b27df4d..00000000000 --- a/.changes/unreleased/Fixes-20221226-010211.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: fix docs generate --defer by adding defer_to_manifest to before_run -time: 2022-12-26T01:02:11.630614+01:00 -custom: - Author: mivanicova - Issue: "6488" diff --git a/.changes/unreleased/Fixes-20230101-223405.yaml b/.changes/unreleased/Fixes-20230101-223405.yaml deleted file mode 100644 index d90e24aaa56..00000000000 --- a/.changes/unreleased/Fixes-20230101-223405.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Bug when partial parsing with an empty schema file -time: 2023-01-01T22:34:05.97322-05:00 -custom: - Author: gshank - Issue: "4850" diff --git a/.changes/unreleased/Fixes-20230104-141047.yaml b/.changes/unreleased/Fixes-20230104-141047.yaml deleted file mode 100644 index 9d5466fbe68..00000000000 --- a/.changes/unreleased/Fixes-20230104-141047.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Fix DBT_FAVOR_STATE env var -time: 2023-01-04T14:10:47.637495-08:00 -custom: - Author: NiallRees - Issue: "5859" - PR: "6392" diff --git a/.changes/unreleased/Fixes-20230109-161254.yaml b/.changes/unreleased/Fixes-20230109-161254.yaml deleted file mode 100644 index 2ccd417b107..00000000000 --- a/.changes/unreleased/Fixes-20230109-161254.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Restore historical behavior of certain disabled test messages, so that they - are at the less obtrusive debug level, rather than the warning level. -time: 2023-01-09T16:12:54.064875-05:00 -custom: - Author: peterallenwebb - Issue: "6501" diff --git a/.changes/unreleased/Fixes-20230110-124132.yaml b/.changes/unreleased/Fixes-20230110-124132.yaml deleted file mode 100644 index cc484367fe0..00000000000 --- a/.changes/unreleased/Fixes-20230110-124132.yaml +++ /dev/null @@ -1,7 +0,0 @@ -kind: Fixes -body: Bump mashumuro version to get regression fix and add unit test to verify that - fix. -time: 2023-01-10T12:41:32.339631-05:00 -custom: - Author: peterallenwebb - Issue: "6428" diff --git a/.changes/unreleased/Fixes-20230111-134058.yaml b/.changes/unreleased/Fixes-20230111-134058.yaml deleted file mode 100644 index 707cbfb39c6..00000000000 --- a/.changes/unreleased/Fixes-20230111-134058.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Call update_event_status earlier for node results. Rename event 'HookFinished' -> FinishedRunningStats -time: 2023-01-11T13:40:58.577722+01:00 -custom: - Author: jtcohen6 - Issue: "6571" diff --git a/.changes/unreleased/Under the Hood-20221219-193435.yaml b/.changes/unreleased/Under the Hood-20221219-193435.yaml deleted file mode 100644 index 82388dbb759..00000000000 --- a/.changes/unreleased/Under the Hood-20221219-193435.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Treat dense text blobs as binary for `git grep` -time: 2022-12-19T19:34:35.890275-07:00 -custom: - Author: dbeatty10 - Issue: "6294" diff --git a/.changes/unreleased/Under the Hood-20221221-121904.yaml b/.changes/unreleased/Under the Hood-20221221-121904.yaml deleted file mode 100644 index d1f2f03bef7..00000000000 --- a/.changes/unreleased/Under the Hood-20221221-121904.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Prune partial parsing logging events -time: 2022-12-21T12:19:04.7402-05:00 -custom: - Author: gshank - Issue: "6313" diff --git a/.changes/unreleased/Under the Hood-20230104-155257.yaml b/.changes/unreleased/Under the Hood-20230104-155257.yaml deleted file mode 100644 index 2d10f09d857..00000000000 --- a/.changes/unreleased/Under the Hood-20230104-155257.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Updating the deprecation warning in the metric attributes renamed event -time: 2023-01-04T15:52:57.916398-06:00 -custom: - Author: callum-mcdata - Issue: "6507" diff --git a/.changes/unreleased/Under the Hood-20230106-112855.yaml b/.changes/unreleased/Under the Hood-20230106-112855.yaml deleted file mode 100644 index 1344b3397c0..00000000000 --- a/.changes/unreleased/Under the Hood-20230106-112855.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: '[CT-1693] Port severity test to Pytest' -time: 2023-01-06T11:28:55.800547-08:00 -custom: - Author: aranke - Issue: "6466" diff --git a/.changes/unreleased/Under the Hood-20230106-114412.yaml b/.changes/unreleased/Under the Hood-20230106-114412.yaml deleted file mode 100644 index e6f7e46f930..00000000000 --- a/.changes/unreleased/Under the Hood-20230106-114412.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: '[CT-1694] Deprecate event tracking tests' -time: 2023-01-06T11:44:12.210821-08:00 -custom: - Author: aranke - Issue: "6467" diff --git a/.changes/unreleased/Under the Hood-20230109-095907.yaml b/.changes/unreleased/Under the Hood-20230109-095907.yaml deleted file mode 100644 index 2133f06ddad..00000000000 --- a/.changes/unreleased/Under the Hood-20230109-095907.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Reorganize structured logging events to have two top keys -time: 2023-01-09T09:59:07.842187-05:00 -custom: - Author: gshank - Issue: "6311" diff --git a/.changes/unreleased/Under the Hood-20230110-114233.yaml b/.changes/unreleased/Under the Hood-20230110-114233.yaml deleted file mode 100644 index c18a26d4a03..00000000000 --- a/.changes/unreleased/Under the Hood-20230110-114233.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Combine some logging events -time: 2023-01-10T11:42:33.580756-05:00 -custom: - Author: gshank - Issue: 1716 1717 1719 diff --git a/.changes/unreleased/Under the Hood-20230110-145648.yaml b/.changes/unreleased/Under the Hood-20230110-145648.yaml deleted file mode 100644 index 9a21f1da645..00000000000 --- a/.changes/unreleased/Under the Hood-20230110-145648.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Under the Hood -body: Check length of escaped strings in the adapter test -time: 2023-01-10T14:56:48.044198-07:00 -custom: - Author: dbeatty10 - Issue: "6566" diff --git a/CHANGELOG.md b/CHANGELOG.md index 5925cb9492d..45347e50b1f 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,101 +5,12 @@ - "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version. - Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-changelog-entry) -## dbt-core 1.4.0-b1 - December 15, 2022 - -### Features - -- Added favor-state flag to optionally favor state nodes even if unselected node exists ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- Update structured logging. Convert to using protobuf messages. Ensure events are enriched with node_info. ([#5610](https://github.com/dbt-labs/dbt-core/issues/5610)) -- Friendlier error messages when packages.yml is malformed ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- Allow partitions in external tables to be supplied as a list ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) -- extend -f flag shorthand for seed command ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) -- This pulls the profile name from args when constructing a RuntimeConfig in lib.py, enabling the dbt-server to override the value that's in the dbt_project.yml ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) -- Adding tarball install method for packages. Allowing package tarball to be specified via url in the packages.yaml. ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) -- Added an md5 function to the base context ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) -- Exposures support metrics in lineage ([#6057](https://github.com/dbt-labs/dbt-core/issues/6057)) -- Add support for Python 3.11 ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) -- incremental predicates ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) - -### Fixes - -- Account for disabled flags on models in schema files more completely ([#3992](https://github.com/dbt-labs/dbt-core/issues/3992)) -- Add validation of enabled config for metrics, exposures and sources ([#6030](https://github.com/dbt-labs/dbt-core/issues/6030)) -- check length of args of python model function before accessing it ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) -- Add functors to ensure event types with str-type attributes are initialized to spec, even when provided non-str type params. ([#5436](https://github.com/dbt-labs/dbt-core/issues/5436)) -- Allow hooks to fail without halting execution flow ([#5625](https://github.com/dbt-labs/dbt-core/issues/5625)) -- Clarify Error Message for how many models are allowed in a Python file ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) -- After this, will be possible to use default values for dbt.config.get ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) -- Use full path for writing manifest ([#6055](https://github.com/dbt-labs/dbt-core/issues/6055)) -- [CT-1284] Change Python model default materialization to table ([#6345](https://github.com/dbt-labs/dbt-core/issues/6345)) -- Repair a regression which prevented basic logging before the logging subsystem is completely configured. ([#6434](https://github.com/dbt-labs/dbt-core/issues/6434)) - -### Docs - -- minor doc correction ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) -- Generate API docs for new CLI interface ([dbt-docs/#5528](https://github.com/dbt-labs/dbt-docs/issues/5528)) -- ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) -- Fix rendering of sample code for metrics ([dbt-docs/#323](https://github.com/dbt-labs/dbt-docs/issues/323)) -- Alphabetize `core/dbt/README.md` ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368)) - -### Under the Hood - -- Put black config in explicit config ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946)) -- Added flat_graph attribute the Manifest class's deepcopy() coverage ([#5809](https://github.com/dbt-labs/dbt-core/issues/5809)) -- Add mypy configs so `mypy` passes from CLI ([#5983](https://github.com/dbt-labs/dbt-core/issues/5983)) -- Exception message cleanup. ([#6023](https://github.com/dbt-labs/dbt-core/issues/6023)) -- Add dmypy cache to gitignore ([#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) -- Provide useful errors when the value of 'materialized' is invalid ([#5229](https://github.com/dbt-labs/dbt-core/issues/5229)) -- Clean up string formatting ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) -- Fixed extra whitespace in strings introduced by black. ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) -- Remove the 'root_path' field from most nodes ([#6171](https://github.com/dbt-labs/dbt-core/issues/6171)) -- Combine certain logging events with different levels ([#6173](https://github.com/dbt-labs/dbt-core/issues/6173)) -- Convert threading tests to pytest ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) -- Convert postgres index tests to pytest ([#5770](https://github.com/dbt-labs/dbt-core/issues/5770)) -- Convert use color tests to pytest ([#5771](https://github.com/dbt-labs/dbt-core/issues/5771)) -- Add github actions workflow to generate high level CLI API docs ([#5942](https://github.com/dbt-labs/dbt-core/issues/5942)) -- Functionality-neutral refactor of event logging system to improve encapsulation and modularity. ([#6139](https://github.com/dbt-labs/dbt-core/issues/6139)) -- Consolidate ParsedNode and CompiledNode classes ([#6383](https://github.com/dbt-labs/dbt-core/issues/6383)) -- Prevent doc gen workflow from running on forks ([#6386](https://github.com/dbt-labs/dbt-core/issues/6386)) -- Fix intermittent database connection failure in Windows CI test ([#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) -- Refactor and clean up manifest nodes ([#6426](https://github.com/dbt-labs/dbt-core/issues/6426)) -- Restore important legacy logging behaviors, following refactor which removed them ([#6437](https://github.com/dbt-labs/dbt-core/issues/6437)) - -### Dependencies - -- Update pathspec requirement from ~=0.9.0 to >=0.9,<0.11 in /core ([#5917](https://github.com/dbt-labs/dbt-core/pull/5917)) -- Bump black from 22.8.0 to 22.10.0 ([#6019](https://github.com/dbt-labs/dbt-core/pull/6019)) -- Bump mashumaro[msgpack] from 3.0.4 to 3.1.1 in /core ([#6108](https://github.com/dbt-labs/dbt-core/pull/6108)) -- Update colorama requirement from <0.4.6,>=0.3.9 to >=0.3.9,<0.4.7 in /core ([#6144](https://github.com/dbt-labs/dbt-core/pull/6144)) -- Bump mashumaro[msgpack] from 3.1.1 to 3.2 in /core ([#4904](https://github.com/dbt-labs/dbt-core/issues/4904)) - -### Contributors -- [@andy-clapson](https://github.com/andy-clapson) ([dbt-docs/#5791](https://github.com/dbt-labs/dbt-docs/issues/5791)) -- [@chamini2](https://github.com/chamini2) ([#6041](https://github.com/dbt-labs/dbt-core/issues/6041)) -- [@daniel-murray](https://github.com/daniel-murray) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5990](https://github.com/dbt-labs/dbt-core/issues/5990)) -- [@dbeatty10](https://github.com/dbeatty10) ([dbt-docs/#6368](https://github.com/dbt-labs/dbt-docs/issues/6368), [#6394](https://github.com/dbt-labs/dbt-core/issues/6394)) -- [@devmessias](https://github.com/devmessias) ([#6309](https://github.com/dbt-labs/dbt-core/issues/6309)) -- [@eve-johns](https://github.com/eve-johns) ([#6068](https://github.com/dbt-labs/dbt-core/issues/6068)) -- [@haritamar](https://github.com/haritamar) ([#6246](https://github.com/dbt-labs/dbt-core/issues/6246)) -- [@jared-rimmer](https://github.com/jared-rimmer) ([#5486](https://github.com/dbt-labs/dbt-core/issues/5486)) -- [@josephberni](https://github.com/josephberni) ([#2968](https://github.com/dbt-labs/dbt-core/issues/2968)) -- [@joshuataylor](https://github.com/joshuataylor) ([#6147](https://github.com/dbt-labs/dbt-core/issues/6147)) -- [@justbldwn](https://github.com/justbldwn) ([#6245](https://github.com/dbt-labs/dbt-core/issues/6245)) -- [@luke-bassett](https://github.com/luke-bassett) ([#1350](https://github.com/dbt-labs/dbt-core/issues/1350)) -- [@max-sixty](https://github.com/max-sixty) ([#5946](https://github.com/dbt-labs/dbt-core/issues/5946), [#5983](https://github.com/dbt-labs/dbt-core/issues/5983), [#6028](https://github.com/dbt-labs/dbt-core/issues/6028)) -- [@paulbenschmidt](https://github.com/paulbenschmidt) ([dbt-docs/#5880](https://github.com/dbt-labs/dbt-docs/issues/5880)) -- [@pgoslatara](https://github.com/pgoslatara) ([#5929](https://github.com/dbt-labs/dbt-core/issues/5929)) -- [@racheldaniel](https://github.com/racheldaniel) ([#6201](https://github.com/dbt-labs/dbt-core/issues/6201)) -- [@timle2](https://github.com/timle2) ([#4205](https://github.com/dbt-labs/dbt-core/issues/4205)) -- [@dave-connors-3](https://github.com/dave-connors-3) ([#5680](https://github.com/dbt-labs/dbt-core/issues/5680)) - - ## Previous Releases For information on prior major and minor releases, see their changelogs: +* [1.4](https://github.com/dbt-labs/dbt-core/blob/1.4.latest/CHANGELOG.md) * [1.3](https://github.com/dbt-labs/dbt-core/blob/1.3.latest/CHANGELOG.md) * [1.2](https://github.com/dbt-labs/dbt-core/blob/1.2.latest/CHANGELOG.md) * [1.1](https://github.com/dbt-labs/dbt-core/blob/1.1.latest/CHANGELOG.md) diff --git a/core/dbt/version.py b/core/dbt/version.py index 21eba9e0d6f..d836e2b4a43 100644 --- a/core/dbt/version.py +++ b/core/dbt/version.py @@ -235,5 +235,5 @@ def _get_adapter_plugin_names() -> Iterator[str]: yield plugin_name -__version__ = "1.4.0b1" +__version__ = "1.5.0a1" installed = get_installed_version() diff --git a/core/setup.py b/core/setup.py index 5378f0e6065..b5c43cc184a 100644 --- a/core/setup.py +++ b/core/setup.py @@ -25,7 +25,7 @@ package_name = "dbt-core" -package_version = "1.4.0b1" +package_version = "1.5.0a1" description = """With dbt, data analysts and engineers can build analytics \ the way engineers build applications.""" diff --git a/docker/Dockerfile b/docker/Dockerfile index 72332c35de9..4061e1e9746 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -14,12 +14,12 @@ FROM --platform=$build_for python:3.10.7-slim-bullseye as base # N.B. The refs updated automagically every release via bumpversion # N.B. dbt-postgres is currently found in the core codebase so a value of dbt-core@ is correct -ARG dbt_core_ref=dbt-core@v1.4.0b1 -ARG dbt_postgres_ref=dbt-core@v1.4.0b1 -ARG dbt_redshift_ref=dbt-redshift@v1.4.0b1 -ARG dbt_bigquery_ref=dbt-bigquery@v1.4.0b1 -ARG dbt_snowflake_ref=dbt-snowflake@v1.4.0b1 -ARG dbt_spark_ref=dbt-spark@v1.4.0b1 +ARG dbt_core_ref=dbt-core@v1.5.0a1 +ARG dbt_postgres_ref=dbt-core@v1.5.0a1 +ARG dbt_redshift_ref=dbt-redshift@v1.5.0a1 +ARG dbt_bigquery_ref=dbt-bigquery@v1.5.0a1 +ARG dbt_snowflake_ref=dbt-snowflake@v1.5.0a1 +ARG dbt_spark_ref=dbt-spark@v1.5.0a1 # special case args ARG dbt_spark_version=all ARG dbt_third_party diff --git a/plugins/postgres/dbt/adapters/postgres/__version__.py b/plugins/postgres/dbt/adapters/postgres/__version__.py index 27cfeecd9e8..219c289b1bf 100644 --- a/plugins/postgres/dbt/adapters/postgres/__version__.py +++ b/plugins/postgres/dbt/adapters/postgres/__version__.py @@ -1 +1 @@ -version = "1.4.0b1" +version = "1.5.0a1" diff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py index 00a91759aec..ade5f95121b 100644 --- a/plugins/postgres/setup.py +++ b/plugins/postgres/setup.py @@ -41,7 +41,7 @@ def _dbt_psycopg2_name(): package_name = "dbt-postgres" -package_version = "1.4.0b1" +package_version = "1.5.0a1" description = """The postgres adapter plugin for dbt (data build tool)""" this_directory = os.path.abspath(os.path.dirname(__file__)) diff --git a/tests/adapter/dbt/tests/adapter/__version__.py b/tests/adapter/dbt/tests/adapter/__version__.py index 27cfeecd9e8..219c289b1bf 100644 --- a/tests/adapter/dbt/tests/adapter/__version__.py +++ b/tests/adapter/dbt/tests/adapter/__version__.py @@ -1 +1 @@ -version = "1.4.0b1" +version = "1.5.0a1" diff --git a/tests/adapter/setup.py b/tests/adapter/setup.py index f9ac627e445..c4c1e393483 100644 --- a/tests/adapter/setup.py +++ b/tests/adapter/setup.py @@ -20,7 +20,7 @@ package_name = "dbt-tests-adapter" -package_version = "1.4.0b1" +package_version = "1.5.0a1" description = """The dbt adapter tests for adapter plugins""" this_directory = os.path.abspath(os.path.dirname(__file__)) From 2bfc6917e262beec77b942fbce8d3b6afbe0e45f Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Thu, 12 Jan 2023 14:33:56 -0600 Subject: [PATCH 108/156] finish message rename in types.proto (#6594) * finish message rename in types.proto * add new parameter --- core/dbt/config/utils.py | 4 ++-- core/dbt/events/proto_types.py | 4 +++- core/dbt/events/types.proto | 7 ++++--- core/dbt/exceptions.py | 2 +- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/core/dbt/config/utils.py b/core/dbt/config/utils.py index cb7c90eac68..c69f0d5c79c 100644 --- a/core/dbt/config/utils.py +++ b/core/dbt/config/utils.py @@ -9,7 +9,7 @@ from dbt.config.renderer import DbtProjectYamlRenderer, ProfileRenderer from dbt.events.functions import fire_event from dbt.events.types import InvalidOptionYAML -from dbt.exceptions import DbtValidationError, OptionNotYamlDict +from dbt.exceptions import DbtValidationError, OptionNotYamlDictError def parse_cli_vars(var_string: str) -> Dict[str, Any]: @@ -23,7 +23,7 @@ def parse_cli_yaml_string(var_string: str, cli_option_name: str) -> Dict[str, An if var_type is dict: return cli_vars else: - raise OptionNotYamlDict(var_type, cli_option_name) + raise OptionNotYamlDictError(var_type, cli_option_name) except DbtValidationError: fire_event(InvalidOptionYAML(option_name=cli_option_name)) raise diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 2fc6ad1ccd7..da8721d55b9 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -47,7 +47,9 @@ class NodeInfo(betterproto.Message): node_status: str = betterproto.string_field(6) node_started_at: str = betterproto.string_field(7) node_finished_at: str = betterproto.string_field(8) - meta: str = betterproto.string_field(9) + meta: Dict[str, str] = betterproto.map_field( + 9, betterproto.TYPE_STRING, betterproto.TYPE_STRING + ) @dataclass diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 21f9c9ee4bb..71e7fc3176c 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -124,12 +124,13 @@ message MissingProfileTargetMsg { // Skipped A006, A007 // A008 -message InvalidVarsYAML { +message InvalidOptionYAML { + string option_name = 1; } -message InvalidVarsYAMLMsg { +message InvalidOptionYAMLMsg { EventInfo info = 1; - InvalidVarsYAML data = 2; + InvalidOptionYAML data = 2; } // A009 diff --git a/core/dbt/exceptions.py b/core/dbt/exceptions.py index b8f99eb5fdc..4e7b6c9fe6a 100644 --- a/core/dbt/exceptions.py +++ b/core/dbt/exceptions.py @@ -1703,7 +1703,7 @@ def get_message(self) -> str: return msg -class OptionNotYamlDict(CompilationError): +class OptionNotYamlDictError(CompilationError): def __init__(self, var_type, option_name): self.var_type = var_type self.option_name = option_name From f789b2535a14cbe6210c288bf799562535a0fd98 Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Thu, 12 Jan 2023 16:05:51 -0800 Subject: [PATCH 109/156] convert adapter table materialization test (#6595) --- .../models/materialized.sql | 9 -- .../018_adapter_ddl_tests/seed.sql | 110 ------------------ .../018_adapter_ddl_tests/test_adapter_ddl.py | 23 ---- .../dbt/tests/adapter/basic/test_table_mat.py | 97 +++++++++++++++ 4 files changed, 97 insertions(+), 142 deletions(-) delete mode 100644 test/integration/018_adapter_ddl_tests/models/materialized.sql delete mode 100644 test/integration/018_adapter_ddl_tests/seed.sql delete mode 100644 test/integration/018_adapter_ddl_tests/test_adapter_ddl.py create mode 100644 tests/adapter/dbt/tests/adapter/basic/test_table_mat.py diff --git a/test/integration/018_adapter_ddl_tests/models/materialized.sql b/test/integration/018_adapter_ddl_tests/models/materialized.sql deleted file mode 100644 index edd9c8e04bf..00000000000 --- a/test/integration/018_adapter_ddl_tests/models/materialized.sql +++ /dev/null @@ -1,9 +0,0 @@ -{{ - config( - materialized = "table", - sort = 'first_name', - dist = 'first_name' - ) -}} - -select * from {{ this.schema }}.seed diff --git a/test/integration/018_adapter_ddl_tests/seed.sql b/test/integration/018_adapter_ddl_tests/seed.sql deleted file mode 100644 index 695cfbeffdf..00000000000 --- a/test/integration/018_adapter_ddl_tests/seed.sql +++ /dev/null @@ -1,110 +0,0 @@ -create table {schema}.seed ( - id BIGSERIAL PRIMARY KEY, - first_name VARCHAR(50), - last_name VARCHAR(50), - email VARCHAR(50), - gender VARCHAR(50), - ip_address VARCHAR(20) -); - - -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jack', 'Hunter', 'jhunter0@pbs.org', 'Male', '59.80.20.168'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kathryn', 'Walker', 'kwalker1@ezinearticles.com', 'Female', '194.121.179.35'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Ryan', 'gryan2@com.com', 'Male', '11.3.212.243'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Bonnie', 'Spencer', 'bspencer3@ameblo.jp', 'Female', '216.32.196.175'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Harold', 'Taylor', 'htaylor4@people.com.cn', 'Male', '253.10.246.136'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jacqueline', 'Griffin', 'jgriffin5@t.co', 'Female', '16.13.192.220'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Wanda', 'Arnold', 'warnold6@google.nl', 'Female', '232.116.150.64'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Craig', 'Ortiz', 'cortiz7@sciencedaily.com', 'Male', '199.126.106.13'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gary', 'Day', 'gday8@nih.gov', 'Male', '35.81.68.186'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Rose', 'Wright', 'rwright9@yahoo.co.jp', 'Female', '236.82.178.100'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Raymond', 'Kelley', 'rkelleya@fc2.com', 'Male', '213.65.166.67'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Robinson', 'grobinsonb@disqus.com', 'Male', '72.232.194.193'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Mildred', 'Martinez', 'mmartinezc@samsung.com', 'Female', '198.29.112.5'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Dennis', 'Arnold', 'darnoldd@google.com', 'Male', '86.96.3.250'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Judy', 'Gray', 'jgraye@opensource.org', 'Female', '79.218.162.245'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Theresa', 'Garza', 'tgarzaf@epa.gov', 'Female', '21.59.100.54'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Robertson', 'grobertsong@csmonitor.com', 'Male', '131.134.82.96'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Philip', 'Hernandez', 'phernandezh@adobe.com', 'Male', '254.196.137.72'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Julia', 'Gonzalez', 'jgonzalezi@cam.ac.uk', 'Female', '84.240.227.174'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Andrew', 'Davis', 'adavisj@patch.com', 'Male', '9.255.67.25'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kimberly', 'Harper', 'kharperk@foxnews.com', 'Female', '198.208.120.253'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Mark', 'Martin', 'mmartinl@marketwatch.com', 'Male', '233.138.182.153'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Cynthia', 'Ruiz', 'cruizm@google.fr', 'Female', '18.178.187.201'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Samuel', 'Carroll', 'scarrolln@youtu.be', 'Male', '128.113.96.122'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jennifer', 'Larson', 'jlarsono@vinaora.com', 'Female', '98.234.85.95'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ashley', 'Perry', 'aperryp@rakuten.co.jp', 'Female', '247.173.114.52'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Howard', 'Rodriguez', 'hrodriguezq@shutterfly.com', 'Male', '231.188.95.26'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Amy', 'Brooks', 'abrooksr@theatlantic.com', 'Female', '141.199.174.118'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Louise', 'Warren', 'lwarrens@adobe.com', 'Female', '96.105.158.28'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Tina', 'Watson', 'twatsont@myspace.com', 'Female', '251.142.118.177'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Janice', 'Kelley', 'jkelleyu@creativecommons.org', 'Female', '239.167.34.233'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Terry', 'Mccoy', 'tmccoyv@bravesites.com', 'Male', '117.201.183.203'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jeffrey', 'Morgan', 'jmorganw@surveymonkey.com', 'Male', '78.101.78.149'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Louis', 'Harvey', 'lharveyx@sina.com.cn', 'Male', '51.50.0.167'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Philip', 'Miller', 'pmillery@samsung.com', 'Male', '103.255.222.110'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Willie', 'Marshall', 'wmarshallz@ow.ly', 'Male', '149.219.91.68'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Patrick', 'Lopez', 'plopez10@redcross.org', 'Male', '250.136.229.89'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Adam', 'Jenkins', 'ajenkins11@harvard.edu', 'Male', '7.36.112.81'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Benjamin', 'Cruz', 'bcruz12@linkedin.com', 'Male', '32.38.98.15'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ruby', 'Hawkins', 'rhawkins13@gmpg.org', 'Female', '135.171.129.255'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Carlos', 'Barnes', 'cbarnes14@a8.net', 'Male', '240.197.85.140'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ruby', 'Griffin', 'rgriffin15@bravesites.com', 'Female', '19.29.135.24'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Sean', 'Mason', 'smason16@icq.com', 'Male', '159.219.155.249'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Anthony', 'Payne', 'apayne17@utexas.edu', 'Male', '235.168.199.218'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Steve', 'Cruz', 'scruz18@pcworld.com', 'Male', '238.201.81.198'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Anthony', 'Garcia', 'agarcia19@flavors.me', 'Male', '25.85.10.18'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Doris', 'Lopez', 'dlopez1a@sphinn.com', 'Female', '245.218.51.238'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Susan', 'Nichols', 'snichols1b@freewebs.com', 'Female', '199.99.9.61'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Wanda', 'Ferguson', 'wferguson1c@yahoo.co.jp', 'Female', '236.241.135.21'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Andrea', 'Pierce', 'apierce1d@google.co.uk', 'Female', '132.40.10.209'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Lawrence', 'Phillips', 'lphillips1e@jugem.jp', 'Male', '72.226.82.87'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Judy', 'Gilbert', 'jgilbert1f@multiply.com', 'Female', '196.250.15.142'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Eric', 'Williams', 'ewilliams1g@joomla.org', 'Male', '222.202.73.126'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ralph', 'Romero', 'rromero1h@sogou.com', 'Male', '123.184.125.212'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jean', 'Wilson', 'jwilson1i@ocn.ne.jp', 'Female', '176.106.32.194'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Lori', 'Reynolds', 'lreynolds1j@illinois.edu', 'Female', '114.181.203.22'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Donald', 'Moreno', 'dmoreno1k@bbc.co.uk', 'Male', '233.249.97.60'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Steven', 'Berry', 'sberry1l@eepurl.com', 'Male', '186.193.50.50'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Theresa', 'Shaw', 'tshaw1m@people.com.cn', 'Female', '120.37.71.222'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('John', 'Stephens', 'jstephens1n@nationalgeographic.com', 'Male', '191.87.127.115'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Richard', 'Jacobs', 'rjacobs1o@state.tx.us', 'Male', '66.210.83.155'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Andrew', 'Lawson', 'alawson1p@over-blog.com', 'Male', '54.98.36.94'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Peter', 'Morgan', 'pmorgan1q@rambler.ru', 'Male', '14.77.29.106'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Nicole', 'Garrett', 'ngarrett1r@zimbio.com', 'Female', '21.127.74.68'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Joshua', 'Kim', 'jkim1s@edublogs.org', 'Male', '57.255.207.41'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ralph', 'Roberts', 'rroberts1t@people.com.cn', 'Male', '222.143.131.109'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('George', 'Montgomery', 'gmontgomery1u@smugmug.com', 'Male', '76.75.111.77'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Gerald', 'Alvarez', 'galvarez1v@flavors.me', 'Male', '58.157.186.194'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Donald', 'Olson', 'dolson1w@whitehouse.gov', 'Male', '69.65.74.135'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Carlos', 'Morgan', 'cmorgan1x@pbs.org', 'Male', '96.20.140.87'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Aaron', 'Stanley', 'astanley1y@webnode.com', 'Male', '163.119.217.44'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Virginia', 'Long', 'vlong1z@spiegel.de', 'Female', '204.150.194.182'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Robert', 'Berry', 'rberry20@tripadvisor.com', 'Male', '104.19.48.241'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Antonio', 'Brooks', 'abrooks21@unesco.org', 'Male', '210.31.7.24'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Ruby', 'Garcia', 'rgarcia22@ovh.net', 'Female', '233.218.162.214'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jack', 'Hanson', 'jhanson23@blogtalkradio.com', 'Male', '31.55.46.199'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kathryn', 'Nelson', 'knelson24@walmart.com', 'Female', '14.189.146.41'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jason', 'Reed', 'jreed25@printfriendly.com', 'Male', '141.189.89.255'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('George', 'Coleman', 'gcoleman26@people.com.cn', 'Male', '81.189.221.144'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Rose', 'King', 'rking27@ucoz.com', 'Female', '212.123.168.231'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Johnny', 'Holmes', 'jholmes28@boston.com', 'Male', '177.3.93.188'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Katherine', 'Gilbert', 'kgilbert29@altervista.org', 'Female', '199.215.169.61'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Joshua', 'Thomas', 'jthomas2a@ustream.tv', 'Male', '0.8.205.30'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Julie', 'Perry', 'jperry2b@opensource.org', 'Female', '60.116.114.192'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Richard', 'Perry', 'rperry2c@oracle.com', 'Male', '181.125.70.232'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Kenneth', 'Ruiz', 'kruiz2d@wikimedia.org', 'Male', '189.105.137.109'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jose', 'Morgan', 'jmorgan2e@webnode.com', 'Male', '101.134.215.156'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Donald', 'Campbell', 'dcampbell2f@goo.ne.jp', 'Male', '102.120.215.84'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Debra', 'Collins', 'dcollins2g@uol.com.br', 'Female', '90.13.153.235'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Jesse', 'Johnson', 'jjohnson2h@stumbleupon.com', 'Male', '225.178.125.53'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Elizabeth', 'Stone', 'estone2i@histats.com', 'Female', '123.184.126.221'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Angela', 'Rogers', 'arogers2j@goodreads.com', 'Female', '98.104.132.187'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Emily', 'Dixon', 'edixon2k@mlb.com', 'Female', '39.190.75.57'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Albert', 'Scott', 'ascott2l@tinypic.com', 'Male', '40.209.13.189'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Barbara', 'Peterson', 'bpeterson2m@ow.ly', 'Female', '75.249.136.180'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Adam', 'Greene', 'agreene2n@fastcompany.com', 'Male', '184.173.109.144'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Earl', 'Sanders', 'esanders2o@hc360.com', 'Male', '247.34.90.117'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Angela', 'Brooks', 'abrooks2p@mtv.com', 'Female', '10.63.249.126'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Harold', 'Foster', 'hfoster2q@privacy.gov.au', 'Male', '139.214.40.244'); -insert into {schema}.seed (first_name, last_name, email, gender, ip_address) values ('Carl', 'Meyer', 'cmeyer2r@disqus.com', 'Male', '204.117.7.88'); diff --git a/test/integration/018_adapter_ddl_tests/test_adapter_ddl.py b/test/integration/018_adapter_ddl_tests/test_adapter_ddl.py deleted file mode 100644 index 99162efde67..00000000000 --- a/test/integration/018_adapter_ddl_tests/test_adapter_ddl.py +++ /dev/null @@ -1,23 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - -class TestAdapterDDL(DBTIntegrationTest): - - def setUp(self): - DBTIntegrationTest.setUp(self) - - self.run_sql_file("seed.sql") - - @property - def schema(self): - return "adaper_ddl_018" - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_sort_and_dist_keys_are_nops_on_postgres(self): - results = self.run_dbt(['run']) - self.assertEqual(len(results), 1) - - self.assertTablesEqual("seed","materialized") diff --git a/tests/adapter/dbt/tests/adapter/basic/test_table_mat.py b/tests/adapter/dbt/tests/adapter/basic/test_table_mat.py new file mode 100644 index 00000000000..7191195e7cc --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/basic/test_table_mat.py @@ -0,0 +1,97 @@ +import pytest + +from dbt.tests.util import run_dbt, check_relations_equal + + +seeds__seed_csv = """id,first_name,last_name,email,gender,ip_address +1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168 +2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35 +3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243 +4,Bonnie,Spencer,bspencer3@ameblo.jp,Female,216.32.196.175 +5,Harold,Taylor,htaylor4@people.com.cn,Male,253.10.246.136 +6,Jacqueline,Griffin,jgriffin5@t.co,Female,16.13.192.220 +7,Wanda,Arnold,warnold6@google.nl,Female,232.116.150.64 +8,Craig,Ortiz,cortiz7@sciencedaily.com,Male,199.126.106.13 +9,Gary,Day,gday8@nih.gov,Male,35.81.68.186 +10,Rose,Wright,rwright9@yahoo.co.jp,Female,236.82.178.100 +11,Raymond,Kelley,rkelleya@fc2.com,Male,213.65.166.67 +12,Gerald,Robinson,grobinsonb@disqus.com,Male,72.232.194.193 +13,Mildred,Martinez,mmartinezc@samsung.com,Female,198.29.112.5 +14,Dennis,Arnold,darnoldd@google.com,Male,86.96.3.250 +15,Judy,Gray,jgraye@opensource.org,Female,79.218.162.245 +16,Theresa,Garza,tgarzaf@epa.gov,Female,21.59.100.54 +17,Gerald,Robertson,grobertsong@csmonitor.com,Male,131.134.82.96 +18,Philip,Hernandez,phernandezh@adobe.com,Male,254.196.137.72 +19,Julia,Gonzalez,jgonzalezi@cam.ac.uk,Female,84.240.227.174 +20,Andrew,Davis,adavisj@patch.com,Male,9.255.67.25 +21,Kimberly,Harper,kharperk@foxnews.com,Female,198.208.120.253 +22,Mark,Martin,mmartinl@marketwatch.com,Male,233.138.182.153 +23,Cynthia,Ruiz,cruizm@google.fr,Female,18.178.187.201 +24,Samuel,Carroll,scarrolln@youtu.be,Male,128.113.96.122 +25,Jennifer,Larson,jlarsono@vinaora.com,Female,98.234.85.95 +26,Ashley,Perry,aperryp@rakuten.co.jp,Female,247.173.114.52 +27,Howard,Rodriguez,hrodriguezq@shutterfly.com,Male,231.188.95.26 +28,Amy,Brooks,abrooksr@theatlantic.com,Female,141.199.174.118 +29,Louise,Warren,lwarrens@adobe.com,Female,96.105.158.28 +30,Tina,Watson,twatsont@myspace.com,Female,251.142.118.177 +31,Janice,Kelley,jkelleyu@creativecommons.org,Female,239.167.34.233 +32,Terry,Mccoy,tmccoyv@bravesites.com,Male,117.201.183.203 +33,Jeffrey,Morgan,jmorganw@surveymonkey.com,Male,78.101.78.149 +34,Louis,Harvey,lharveyx@sina.com.cn,Male,51.50.0.167 +35,Philip,Miller,pmillery@samsung.com,Male,103.255.222.110 +36,Willie,Marshall,wmarshallz@ow.ly,Male,149.219.91.68 +37,Patrick,Lopez,plopez10@redcross.org,Male,250.136.229.89 +38,Adam,Jenkins,ajenkins11@harvard.edu,Male,7.36.112.81 +39,Benjamin,Cruz,bcruz12@linkedin.com,Male,32.38.98.15 +40,Ruby,Hawkins,rhawkins13@gmpg.org,Female,135.171.129.255 +41,Carlos,Barnes,cbarnes14@a8.net,Male,240.197.85.140 +42,Ruby,Griffin,rgriffin15@bravesites.com,Female,19.29.135.24 +43,Sean,Mason,smason16@icq.com,Male,159.219.155.249 +44,Anthony,Payne,apayne17@utexas.edu,Male,235.168.199.218 +45,Steve,Cruz,scruz18@pcworld.com,Male,238.201.81.198 +46,Anthony,Garcia,agarcia19@flavors.me,Male,25.85.10.18 +47,Doris,Lopez,dlopez1a@sphinn.com,Female,245.218.51.238 +48,Susan,Nichols,snichols1b@freewebs.com,Female,199.99.9.61 +49,Wanda,Ferguson,wferguson1c@yahoo.co.jp,Female,236.241.135.21 +50,Andrea,Pierce,apierce1d@google.co.uk,Female,132.40.10.209 +""" + +model_sql = """ +{{ + config( + materialized = "table", + sort = 'first_name', + dist = 'first_name' + ) +}} + +select * from {{ this.schema }}.seed +""" + + +class BaseTableMaterialization: + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": seeds__seed_csv} + + @pytest.fixture(scope="class") + def models(self): + return {'materialized.sql': model_sql} + + def test_table_materialization_sort_dist_no_op(self, project): + # basic table mat test, sort and dist is not supported by postgres so the result table would still be same as input + + # check seed + results = run_dbt(["seed"]) + assert len(results) == 1 + + # check run + results = run_dbt(["run"]) + assert len(results) == 1 + + check_relations_equal(project.adapter, ["seed", "materialized"]) + + +class TestTableMat(BaseTableMaterialization): + pass From 7077c475511b4f251245b6e246b9a23bed18eed0 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 13 Jan 2023 10:52:21 -0500 Subject: [PATCH 110/156] converting 022_test_timezones (#6588) --- .../022_timezones_tests/models/timezones.sql | 10 --- .../022_timezones_tests/test_timezones.py | 52 --------------- tests/functional/timezones/test_timezones.py | 65 +++++++++++++++++++ 3 files changed, 65 insertions(+), 62 deletions(-) delete mode 100644 test/integration/022_timezones_tests/models/timezones.sql delete mode 100644 test/integration/022_timezones_tests/test_timezones.py create mode 100644 tests/functional/timezones/test_timezones.py diff --git a/test/integration/022_timezones_tests/models/timezones.sql b/test/integration/022_timezones_tests/models/timezones.sql deleted file mode 100644 index 87d565487e1..00000000000 --- a/test/integration/022_timezones_tests/models/timezones.sql +++ /dev/null @@ -1,10 +0,0 @@ - -{{ - config( - materialized='table' - ) -}} - -select - '{{ run_started_at.astimezone(modules.pytz.timezone("America/New_York")) }}' as run_started_at_est, - '{{ run_started_at }}' as run_started_at_utc diff --git a/test/integration/022_timezones_tests/test_timezones.py b/test/integration/022_timezones_tests/test_timezones.py deleted file mode 100644 index 993f9dcb83f..00000000000 --- a/test/integration/022_timezones_tests/test_timezones.py +++ /dev/null @@ -1,52 +0,0 @@ -from freezegun import freeze_time -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestTimezones(DBTIntegrationTest): - - @property - def schema(self): - return "timezones_022" - - @property - def models(self): - return "models" - - @property - def profile_config(self): - return { - 'test': { - 'outputs': { - 'dev': { - 'type': 'postgres', - 'threads': 1, - 'host': self.database_host, - 'port': 5432, - 'user': "root", - 'pass': "password", - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - }, - 'target': 'dev' - } - } - - @property - def query(self): - return """ - select - run_started_at_est, - run_started_at_utc - from {schema}.timezones - """.format(schema=self.unique_schema()) - - @freeze_time("2017-01-01 03:00:00", tz_offset=0) - @use_profile('postgres') - def test_postgres_run_started_at(self): - results = self.run_dbt(['run']) - self.assertEqual(len(results), 1) - result = self.run_sql(self.query, fetch='all')[0] - est, utc = result - self.assertEqual(utc, '2017-01-01 03:00:00+00:00') - self.assertEqual(est, '2016-12-31 22:00:00-05:00') diff --git a/tests/functional/timezones/test_timezones.py b/tests/functional/timezones/test_timezones.py new file mode 100644 index 00000000000..f12d85ca553 --- /dev/null +++ b/tests/functional/timezones/test_timezones.py @@ -0,0 +1,65 @@ +import os +import pytest +from freezegun import freeze_time + +from dbt.tests.util import run_dbt + + +model_sql = """ +{{ + config( + materialized='table' + ) +}} + +select + '{{ run_started_at.astimezone(modules.pytz.timezone("America/New_York")) }}' as run_started_at_est, + '{{ run_started_at }}' as run_started_at_utc +""" + + +class TestTimezones: + @pytest.fixture(scope="class") + def models(self): + return {"timezones.sql": model_sql} + + @pytest.fixture(scope="class") + def dbt_profile_data(self, unique_schema): + return { + 'test': { + 'outputs': { + 'dev': { + 'type': 'postgres', + 'threads': 1, + 'host': 'localhost', + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": os.getenv("POSTGRES_TEST_USER", "root"), + "pass": os.getenv("POSTGRES_TEST_PASS", "password"), + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + 'schema': unique_schema + }, + }, + 'target': 'dev' + } + } + + @pytest.fixture(scope="class") + def query(self, project): + return """ + select + run_started_at_est, + run_started_at_utc + from {schema}.timezones + """.format(schema=project.test_schema) + + @freeze_time("2022-01-01 03:00:00", tz_offset=0) + def test_run_started_at(self, project, query): + results = run_dbt(['run']) + + assert len(results) == 1 + + result = project.run_sql(query, fetch='all')[0] + est, utc = result + + assert utc == '2022-01-01 03:00:00+00:00' + assert est == '2021-12-31 22:00:00-05:00' From 585e7c59e86f06daf0a942b28e66c128f15eb25a Mon Sep 17 00:00:00 2001 From: Chenyu Li Date: Fri, 13 Jan 2023 09:15:04 -0800 Subject: [PATCH 111/156] migrate ref override (#6600) --- .../macros/ref_override_macro.sql | 4 - .../models/ref_override.sql | 3 - .../055_ref_override_tests/seeds/seed_1.csv | 4 - .../055_ref_override_tests/seeds/seed_2.csv | 4 - .../test_ref_override.py | 30 ------- ...e_mat.py => test_table_materialization.py} | 2 +- .../ref_override/test_ref_override.py | 79 +++++++++++++++++++ 7 files changed, 80 insertions(+), 46 deletions(-) delete mode 100644 test/integration/055_ref_override_tests/macros/ref_override_macro.sql delete mode 100644 test/integration/055_ref_override_tests/models/ref_override.sql delete mode 100644 test/integration/055_ref_override_tests/seeds/seed_1.csv delete mode 100644 test/integration/055_ref_override_tests/seeds/seed_2.csv delete mode 100644 test/integration/055_ref_override_tests/test_ref_override.py rename tests/adapter/dbt/tests/adapter/basic/{test_table_mat.py => test_table_materialization.py} (96%) create mode 100644 tests/functional/ref_override/test_ref_override.py diff --git a/test/integration/055_ref_override_tests/macros/ref_override_macro.sql b/test/integration/055_ref_override_tests/macros/ref_override_macro.sql deleted file mode 100644 index a4a85b50324..00000000000 --- a/test/integration/055_ref_override_tests/macros/ref_override_macro.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Macro to override ref and always return the same result -{% macro ref(modelname) %} -{% do return(builtins.ref(modelname).replace_path(identifier='seed_2')) %} -{% endmacro %} \ No newline at end of file diff --git a/test/integration/055_ref_override_tests/models/ref_override.sql b/test/integration/055_ref_override_tests/models/ref_override.sql deleted file mode 100644 index 3bbf936ae2e..00000000000 --- a/test/integration/055_ref_override_tests/models/ref_override.sql +++ /dev/null @@ -1,3 +0,0 @@ -select - * -from {{ ref('seed_1') }} \ No newline at end of file diff --git a/test/integration/055_ref_override_tests/seeds/seed_1.csv b/test/integration/055_ref_override_tests/seeds/seed_1.csv deleted file mode 100644 index 4de2771bdac..00000000000 --- a/test/integration/055_ref_override_tests/seeds/seed_1.csv +++ /dev/null @@ -1,4 +0,0 @@ -a,b -1,2 -2,4 -3,6 \ No newline at end of file diff --git a/test/integration/055_ref_override_tests/seeds/seed_2.csv b/test/integration/055_ref_override_tests/seeds/seed_2.csv deleted file mode 100644 index eeadef9495c..00000000000 --- a/test/integration/055_ref_override_tests/seeds/seed_2.csv +++ /dev/null @@ -1,4 +0,0 @@ -a,b -6,2 -12,4 -18,6 \ No newline at end of file diff --git a/test/integration/055_ref_override_tests/test_ref_override.py b/test/integration/055_ref_override_tests/test_ref_override.py deleted file mode 100644 index 748379b447c..00000000000 --- a/test/integration/055_ref_override_tests/test_ref_override.py +++ /dev/null @@ -1,30 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestRefOverride(DBTIntegrationTest): - @property - def schema(self): - return "dbt_ref_override_055" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds'], - "macro-paths": ["macros"], - 'seeds': { - 'quote_columns': False, - }, - } - - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_ref_override(self): - self.run_dbt(['seed']) - self.run_dbt(['run']) - # We want it to equal seed_2 and not seed_1. If it's - # still pointing at seed_1 then the override hasn't worked. - self.assertTablesEqual('ref_override', 'seed_2') diff --git a/tests/adapter/dbt/tests/adapter/basic/test_table_mat.py b/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py similarity index 96% rename from tests/adapter/dbt/tests/adapter/basic/test_table_mat.py rename to tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py index 7191195e7cc..875e0f1b66b 100644 --- a/tests/adapter/dbt/tests/adapter/basic/test_table_mat.py +++ b/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py @@ -80,7 +80,7 @@ def models(self): return {'materialized.sql': model_sql} def test_table_materialization_sort_dist_no_op(self, project): - # basic table mat test, sort and dist is not supported by postgres so the result table would still be same as input + # basic table materialization test, sort and dist is not supported by postgres so the result table would still be same as input # check seed results = run_dbt(["seed"]) diff --git a/tests/functional/ref_override/test_ref_override.py b/tests/functional/ref_override/test_ref_override.py new file mode 100644 index 00000000000..9a6b1def435 --- /dev/null +++ b/tests/functional/ref_override/test_ref_override.py @@ -0,0 +1,79 @@ +import pytest + +from dbt.tests.util import run_dbt, check_relations_equal +from dbt.tests.fixtures.project import write_project_files + + +models__ref_override_sql = """ +select + * +from {{ ref('seed_1') }} +""" + +macros__ref_override_macro_sql = """ +-- Macro to override ref and always return the same result +{% macro ref(modelname) %} +{% do return(builtins.ref(modelname).replace_path(identifier='seed_2')) %} +{% endmacro %} +""" + +seeds__seed_2_csv = """a,b +6,2 +12,4 +18,6""" + +seeds__seed_1_csv = """a,b +1,2 +2,4 +3,6""" + + +@pytest.fixture(scope="class") +def models(): + return {"ref_override.sql": models__ref_override_sql} + + +@pytest.fixture(scope="class") +def macros(): + return {"ref_override_macro.sql": macros__ref_override_macro_sql} + + +@pytest.fixture(scope="class") +def seeds(): + return {"seed_2.csv": seeds__seed_2_csv, "seed_1.csv": seeds__seed_1_csv} + + +@pytest.fixture(scope="class") +def project_files( + project_root, + models, + macros, + seeds, +): + write_project_files(project_root, "models", models) + write_project_files(project_root, "macros", macros) + write_project_files(project_root, "seeds", seeds) + + +class TestRefOverride: + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "seed-paths": ["seeds"], + "macro-paths": ["macros"], + "seeds": { + "quote_columns": False, + }, + } + + def test_ref_override( + self, + project, + ): + run_dbt(["seed"]) + run_dbt(["run"]) + + # We want it to equal seed_2 and not seed_1. If it's + # still pointing at seed_1 then the override hasn't worked. + check_relations_equal(project.adapter, ["ref_override", "seed_2"]) From c40b488cb4e8aacd52d6df053b0be50f9c0a87cd Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 13 Jan 2023 14:15:03 -0500 Subject: [PATCH 112/156] convert 037_external_references (#6589) --- .../models/my_model.sql | 7 -- .../standalone_models/my_model.sql | 2 - .../test_external_reference.py | 78 ------------------- .../test_external_reference.py | 57 ++++++++++++++ 4 files changed, 57 insertions(+), 87 deletions(-) delete mode 100644 test/integration/037_external_reference_tests/models/my_model.sql delete mode 100644 test/integration/037_external_reference_tests/standalone_models/my_model.sql delete mode 100644 test/integration/037_external_reference_tests/test_external_reference.py create mode 100644 tests/functional/external_reference/test_external_reference.py diff --git a/test/integration/037_external_reference_tests/models/my_model.sql b/test/integration/037_external_reference_tests/models/my_model.sql deleted file mode 100644 index 5d10e607ed7..00000000000 --- a/test/integration/037_external_reference_tests/models/my_model.sql +++ /dev/null @@ -1,7 +0,0 @@ -{{ - config( - materialized = "view" - ) -}} - -select * from "{{ this.schema + 'z' }}"."external" diff --git a/test/integration/037_external_reference_tests/standalone_models/my_model.sql b/test/integration/037_external_reference_tests/standalone_models/my_model.sql deleted file mode 100644 index 2cd691ea7b4..00000000000 --- a/test/integration/037_external_reference_tests/standalone_models/my_model.sql +++ /dev/null @@ -1,2 +0,0 @@ - -select 1 as id diff --git a/test/integration/037_external_reference_tests/test_external_reference.py b/test/integration/037_external_reference_tests/test_external_reference.py deleted file mode 100644 index d5a7e129e3a..00000000000 --- a/test/integration/037_external_reference_tests/test_external_reference.py +++ /dev/null @@ -1,78 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - -class TestExternalReference(DBTIntegrationTest): - @property - def schema(self): - return "external_reference_037" - - @property - def models(self): - return "models" - - def setUp(self): - super().setUp() - self.use_default_project() - self.external_schema = self.unique_schema()+'z' - self.run_sql( - 'create schema "{}"'.format(self.external_schema) - ) - self.run_sql( - 'create table "{}"."external" (id integer)' - .format(self.external_schema) - ) - self.run_sql( - 'insert into "{}"."external" values (1), (2)' - .format(self.external_schema) - ) - - def tearDown(self): - # This has to happen before we drop the external schema, because - # otherwise postgres hangs forever. - self._drop_schemas() - with self.get_connection(): - self._drop_schema_named(self.default_database, self.external_schema) - super().tearDown() - - @use_profile('postgres') - def test__postgres__external_reference(self): - self.assertEqual(len(self.run_dbt()), 1) - # running it again should succeed - self.assertEqual(len(self.run_dbt()), 1) - - -# The opposite of the test above -- check that external relations that -# depend on a dbt model do not create issues with caching -class TestExternalDependency(DBTIntegrationTest): - @property - def schema(self): - return "external_dependency_037" - - @property - def models(self): - return "standalone_models" - - def tearDown(self): - # This has to happen before we drop the external schema, because - # otherwise postgres hangs forever. - self._drop_schemas() - with self.get_connection(): - self._drop_schema_named(self.default_database, self.external_schema) - super().tearDown() - - @use_profile('postgres') - def test__postgres__external_reference(self): - self.assertEqual(len(self.run_dbt()), 1) - - # create a view outside of the dbt schema that depends on this model - self.external_schema = self.unique_schema()+'zz' - self.run_sql( - 'create schema "{}"'.format(self.external_schema) - ) - self.run_sql( - 'create view "{}"."external" as (select * from {}.my_model)' - .format(self.external_schema, self.unique_schema()) - ) - - # running it again should succeed - self.assertEqual(len(self.run_dbt()), 1) - diff --git a/tests/functional/external_reference/test_external_reference.py b/tests/functional/external_reference/test_external_reference.py new file mode 100644 index 00000000000..50cf7835494 --- /dev/null +++ b/tests/functional/external_reference/test_external_reference.py @@ -0,0 +1,57 @@ +import pytest + +from dbt.tests.util import run_dbt + + +external_model_sql = """ +{{ + config( + materialized = "view" + ) +}} + +select * from "{{ this.schema + 'z' }}"."external" +""" + +model_sql = """ +select 1 as id +""" + + +class TestExternalReference: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": external_model_sql} + + def test_external_reference(self, project, unique_schema): + external_schema = unique_schema + 'z' + project.run_sql(f'create schema "{external_schema}"') + project.run_sql(f'create table "{external_schema}"."external" (id integer)') + project.run_sql(f'insert into "{external_schema}"."external" values (1), (2)') + + results = run_dbt(['run']) + assert len(results) == 1 + + # running it again should succeed + results = run_dbt(['run']) + assert len(results) == 1 + + +# The opposite of the test above -- check that external relations that +# depend on a dbt model do not create issues with caching +class TestExternalDependency: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": model_sql} + + def test_external_reference(self, project, unique_schema): + results = run_dbt(['run']) + assert len(results) == 1 + + external_schema = unique_schema + 'z' + project.run_sql(f'create schema "{external_schema}"') + project.run_sql(f'create view "{external_schema}"."external" as (select * from {unique_schema}.model)') + + # running it again should succeed + results = run_dbt(['run']) + assert len(results) == 1 From 20c95a49933cd48dfbba9bd2ad5bc372ad440c6b Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 13 Jan 2023 15:02:17 -0500 Subject: [PATCH 113/156] convert 057_run_query_tests (#6607) --- .../057_run_query_tests/test_pg_types.py | 25 ------------------- .../functional/run_query/test_types.py | 17 +++++++++++++ 2 files changed, 17 insertions(+), 25 deletions(-) delete mode 100644 test/integration/057_run_query_tests/test_pg_types.py rename test/integration/057_run_query_tests/macros/test_pg_array_queries.sql => tests/functional/run_query/test_types.py (52%) diff --git a/test/integration/057_run_query_tests/test_pg_types.py b/test/integration/057_run_query_tests/test_pg_types.py deleted file mode 100644 index d6553bb9e8e..00000000000 --- a/test/integration/057_run_query_tests/test_pg_types.py +++ /dev/null @@ -1,25 +0,0 @@ - -from test.integration.base import DBTIntegrationTest, use_profile -import json - -class TestPostgresTypes(DBTIntegrationTest): - - @property - def schema(self): - return "pg_query_types_057" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'macro-paths': ['macros'], - } - - @use_profile('postgres') - def test__postgres_nested_types(self): - result = self.run_dbt(['run-operation', 'test_array_results']) - self.assertTrue(result.success) diff --git a/test/integration/057_run_query_tests/macros/test_pg_array_queries.sql b/tests/functional/run_query/test_types.py similarity index 52% rename from test/integration/057_run_query_tests/macros/test_pg_array_queries.sql rename to tests/functional/run_query/test_types.py index f672d777f6f..c33ca3478a8 100644 --- a/test/integration/057_run_query_tests/macros/test_pg_array_queries.sql +++ b/tests/functional/run_query/test_types.py @@ -1,4 +1,8 @@ +import pytest +from dbt.tests.util import run_dbt + +macros_sql = """ {% macro test_array_results() %} {% set sql %} @@ -14,3 +18,16 @@ {% endif %} {% endmacro %} +""" + + +class TestTypes: + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + } + + def test_nested_types(self, project): + result = run_dbt(['run-operation', 'test_array_results']) + assert result.success From 065ab2ebc2104635770f49ca48bc434532d3ef41 Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Mon, 16 Jan 2023 16:39:54 +0100 Subject: [PATCH 114/156] Reformat `tests/` (#6622) * Run black + flake8 on tests dir * Run pre-commit --- .flake8 | 2 +- core/dbt/parser/generic_test_builders.py | 2 +- core/dbt/task/test.py | 8 +- core/dbt/tests/fixtures/project.py | 4 +- core/dbt/tests/util.py | 7 +- pyproject.toml | 2 +- .../dbt/tests/adapter/aliases/test_aliases.py | 24 +---- .../basic/test_table_materialization.py | 3 +- .../tests/adapter/dbt_debug/test_dbt_debug.py | 12 +-- .../test_incremental_predicates.py | 26 +++-- .../relations/test_changing_relation_type.py | 18 ++-- .../functional/artifacts/expected_manifest.py | 8 +- tests/functional/colors/test_colors.py | 2 +- tests/functional/configs/test_configs.py | 13 ++- .../context_methods/test_builtin_functions.py | 12 ++- .../context_methods/test_custom_env_vars.py | 6 +- .../custom_aliases/test_custom_aliases.py | 22 ++--- tests/functional/cycles/test_cycles.py | 5 +- tests/functional/exit_codes/fixtures.py | 2 +- .../functional/exit_codes/test_exit_codes.py | 52 +++++----- tests/functional/exposures/fixtures.py | 1 - .../exposures/test_exposure_configs.py | 2 +- tests/functional/exposures/test_exposures.py | 8 +- .../test_external_reference.py | 16 +-- .../incremental_schema_tests/fixtures.py | 1 - .../test_incremental_schema.py | 76 ++++++-------- tests/functional/logging/test_logging.py | 6 +- tests/functional/logging/test_meta_logging.py | 13 +-- .../functional/metrics/test_metric_configs.py | 2 +- .../metrics/test_metric_helper_functions.py | 5 +- tests/functional/metrics/test_metrics.py | 2 +- .../persist_docs_tests/test_persist_docs.py | 82 +++++++--------- .../postgres/test_postgres_indexes.py | 10 +- .../relation_names/test_relation_name.py | 26 ++--- .../run_operations/test_run_operations.py | 58 +++++------ tests/functional/run_query/test_types.py | 2 +- .../schema_tests/test_schema_v2_tests.py | 8 +- tests/functional/severity/test_severity.py | 33 ++++--- .../functional/statements/test_statements.py | 10 +- .../test_store_test_failures.py | 81 ++++++++------- .../test_selection_expansion.py | 8 +- tests/functional/timezones/test_timezones.py | 28 +++--- tests/unit/test_deprecations.py | 98 +++++++++---------- tests/unit/test_events.py | 25 ++--- tests/unit/test_functions.py | 10 +- tests/unit/test_helper_types.py | 3 +- tests/unit/test_proto_events.py | 29 ++++-- 47 files changed, 420 insertions(+), 453 deletions(-) diff --git a/.flake8 b/.flake8 index 38b207c6e9b..e39b2fa4646 100644 --- a/.flake8 +++ b/.flake8 @@ -9,4 +9,4 @@ ignore = E203 # makes Flake8 work like black E741 E501 # long line checking is done in black -exclude = test +exclude = test/ diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index 206e9c51438..678f7de9df3 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -272,7 +272,7 @@ def __init__( column_name=column_name, name=self.name, key=key, - err_msg=e.msg + err_msg=e.msg, ) if value is not None: diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index 48422b5e726..e7f449873aa 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -91,9 +91,7 @@ def print_start_line(self): def before_execute(self): self.print_start_line() - def execute_test( - self, test: TestNode, manifest: Manifest - ) -> TestResultData: + def execute_test(self, test: TestNode, manifest: Manifest) -> TestResultData: context = generate_runtime_model_context(test, self.config, manifest) materialization_macro = manifest.find_materialization_macro_by_name( @@ -101,7 +99,9 @@ def execute_test( ) if materialization_macro is None: - raise MissingMaterializationError(materialization=test.get_materialization(), adapter_type=self.adapter.type()) + raise MissingMaterializationError( + materialization=test.get_materialization(), adapter_type=self.adapter.type() + ) if "config" not in context: raise DbtInternalError( diff --git a/core/dbt/tests/fixtures/project.py b/core/dbt/tests/fixtures/project.py index 9fb34ff59a4..5fcdca408c9 100644 --- a/core/dbt/tests/fixtures/project.py +++ b/core/dbt/tests/fixtures/project.py @@ -249,7 +249,9 @@ def clean_up_logging(): # otherwise this will fail. So to test errors in those areas, you need to copy the files # into the project in the tests instead of putting them in the fixtures. @pytest.fixture(scope="class") -def adapter(unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml, clean_up_logging): +def adapter( + unique_schema, project_root, profiles_root, profiles_yml, dbt_project_yml, clean_up_logging +): # The profiles.yml and dbt_project.yml should already be written out args = Namespace( profiles_dir=str(profiles_root), project_dir=str(project_root), target=None, profile=None diff --git a/core/dbt/tests/util.py b/core/dbt/tests/util.py index 3904a90a37d..245648ceb48 100644 --- a/core/dbt/tests/util.py +++ b/core/dbt/tests/util.py @@ -12,7 +12,12 @@ from dbt.main import handle_and_check from dbt.logger import log_manager from dbt.contracts.graph.manifest import Manifest -from dbt.events.functions import fire_event, capture_stdout_logs, stop_capture_stdout_logs, reset_metadata_vars +from dbt.events.functions import ( + fire_event, + capture_stdout_logs, + stop_capture_stdout_logs, + reset_metadata_vars, +) from dbt.events.test_types import IntegrationTestDebug # ============================================================================= diff --git a/pyproject.toml b/pyproject.toml index 4d9d26d4ff5..bcf52f2414c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,6 @@ namespace_packages = true [tool.black] # TODO: remove global exclusion of tests when testing overhaul is complete -force-exclude = 'test' +force-exclude = 'test/' line-length = 99 target-version = ['py38'] diff --git a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py index a9f846e2ca4..d9ff6b5b28f 100644 --- a/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py +++ b/tests/adapter/dbt/tests/adapter/aliases/test_aliases.py @@ -50,10 +50,7 @@ def models(self): @pytest.fixture(scope="class") def macros(self): - return { - "cast.sql": MACROS__CAST_SQL, - "expect_value.sql": MACROS__EXPECT_VALUE_SQL - } + return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} def test_alias_model_name(self, project): results = run_dbt(["run"]) @@ -71,10 +68,7 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return { - "cast.sql": MACROS__CAST_SQL, - "expect_value.sql": MACROS__EXPECT_VALUE_SQL - } + return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} @pytest.fixture(scope="class") def models(self): @@ -100,10 +94,7 @@ def project_config_update(self): @pytest.fixture(scope="class") def macros(self): - return { - "cast.sql": MACROS__CAST_SQL, - "expect_value.sql": MACROS__EXPECT_VALUE_SQL - } + return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} @pytest.fixture(scope="class") def models(self): @@ -130,19 +121,14 @@ def project_config_update(self, unique_schema): "models": { "test": { "alias": "duped_alias", - "model_b": { - "schema": unique_schema + "_alt" - }, + "model_b": {"schema": unique_schema + "_alt"}, }, }, } @pytest.fixture(scope="class") def macros(self): - return { - "cast.sql": MACROS__CAST_SQL, - "expect_value.sql": MACROS__EXPECT_VALUE_SQL - } + return {"cast.sql": MACROS__CAST_SQL, "expect_value.sql": MACROS__EXPECT_VALUE_SQL} @pytest.fixture(scope="class") def models(self): diff --git a/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py b/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py index 875e0f1b66b..279152d6985 100644 --- a/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py +++ b/tests/adapter/dbt/tests/adapter/basic/test_table_materialization.py @@ -70,14 +70,13 @@ class BaseTableMaterialization: - @pytest.fixture(scope="class") def seeds(self): return {"seed.csv": seeds__seed_csv} @pytest.fixture(scope="class") def models(self): - return {'materialized.sql': model_sql} + return {"materialized.sql": model_sql} def test_table_materialization_sort_dist_no_op(self, project): # basic table materialization test, sort and dist is not supported by postgres so the result table would still be same as input diff --git a/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py index b7b0ff9ac17..8d3fd7751f2 100644 --- a/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py +++ b/tests/adapter/dbt/tests/adapter/dbt_debug/test_dbt_debug.py @@ -21,7 +21,7 @@ def capsys(self, capsys): def assertGotValue(self, linepat, result): found = False output = self.capsys.readouterr().out - for line in output.split('\n'): + for line in output.split("\n"): if linepat.match(line): found = True assert result in line @@ -41,10 +41,7 @@ def check_project(self, splitout, msg="ERROR invalid"): class BaseDebugProfileVariable(BaseDebug): @pytest.fixture(scope="class") def project_config_update(self): - return { - "config-version": 2, - "profile": '{{ "te" ~ "st" }}' - } + return {"config-version": 2, "profile": '{{ "te" ~ "st" }}'} class TestDebugPostgres(BaseDebug): @@ -70,7 +67,6 @@ class TestDebugProfileVariablePostgres(BaseDebugProfileVariable): class TestDebugInvalidProjectPostgres(BaseDebug): - def test_empty_project(self, project): with open("dbt_project.yml", "w") as f: # noqa: F841 pass @@ -96,9 +92,7 @@ def test_not_found_project(self, project): def test_invalid_project_outside_current_dir(self, project): # create a dbt_project.yml - project_config = { - "invalid-key": "not a valid key in this project" - } + project_config = {"invalid-key": "not a valid key in this project"} os.makedirs("custom", exist_ok=True) with open("custom/dbt_project.yml", "w") as f: yaml.safe_dump(project_config, f, default_flow_style=True) diff --git a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py index 11a4b6c0384..2060e9eb6d4 100644 --- a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_predicates.py @@ -64,10 +64,8 @@ def seeds(self): def project_config_update(self): return { "models": { - "+incremental_predicates": [ - "id != 2" - ], - "+incremental_strategy": "delete+insert" + "+incremental_predicates": ["id != 2"], + "+incremental_strategy": "delete+insert", } } @@ -123,16 +121,21 @@ def get_expected_fields(self, relation, seed_rows, opt_model_count=None): inc_test_model_count=1, seed_rows=seed_rows, opt_model_count=opt_model_count, - relation=relation + relation=relation, ) # no unique_key test def test__incremental_predicates(self, project): """seed should match model after two incremental runs""" - expected_fields = self.get_expected_fields(relation="expected_delete_insert_incremental_predicates", seed_rows=4) + expected_fields = self.get_expected_fields( + relation="expected_delete_insert_incremental_predicates", seed_rows=4 + ) test_case_fields = self.get_test_fields( - project, seed="expected_delete_insert_incremental_predicates", incremental_model="delete_insert_incremental_predicates", update_sql_file=None + project, + seed="expected_delete_insert_incremental_predicates", + incremental_model="delete_insert_incremental_predicates", + update_sql_file=None, ) self.check_scenario_correctness(expected_fields, test_case_fields, project) @@ -144,11 +147,4 @@ class TestIncrementalPredicatesDeleteInsert(BaseIncrementalPredicates): class TestPredicatesDeleteInsert(BaseIncrementalPredicates): @pytest.fixture(scope="class") def project_config_update(self): - return { - "models": { - "+predicates": [ - "id != 2" - ], - "+incremental_strategy": "delete+insert" - } - } + return {"models": {"+predicates": ["id != 2"], "+incremental_strategy": "delete+insert"}} diff --git a/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py b/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py index 38515bc0206..2eeb5aea64d 100644 --- a/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py +++ b/tests/adapter/dbt/tests/adapter/relations/test_changing_relation_type.py @@ -1,5 +1,3 @@ - - from typing import List, Optional import pytest @@ -20,12 +18,10 @@ class BaseChangeRelationTypeValidator: @pytest.fixture(scope="class") def models(self): - return { - "model_mc_modelface.sql": _DEFAULT_CHANGE_RELATION_TYPE_MODEL - } + return {"model_mc_modelface.sql": _DEFAULT_CHANGE_RELATION_TYPE_MODEL} def _run_and_check_materialization(self, materialization, extra_args: Optional[List] = None): - run_args = ["run", '--vars', f'materialized: {materialization}'] + run_args = ["run", "--vars", f"materialized: {materialization}"] if extra_args: run_args.extend(extra_args) results = run_dbt(run_args) @@ -33,11 +29,11 @@ def _run_and_check_materialization(self, materialization, extra_args: Optional[L assert len(results) == 1 def test_changing_materialization_changes_relation_type(self, project): - self._run_and_check_materialization('view') - self._run_and_check_materialization('table') - self._run_and_check_materialization('view') - self._run_and_check_materialization('incremental') - self._run_and_check_materialization('table', extra_args=['--full-refresh']) + self._run_and_check_materialization("view") + self._run_and_check_materialization("table") + self._run_and_check_materialization("view") + self._run_and_check_materialization("incremental") + self._run_and_check_materialization("table", extra_args=["--full-refresh"]) class TestChangeRelationTypes(BaseChangeRelationTypeValidator): diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 51a6b633e40..6e1e8e89af5 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -1061,9 +1061,7 @@ def expected_references_manifest(project): "unique_id": "seed.test.seed", "checksum": checksum_file(seed_path), "unrendered_config": get_unrendered_seed_config(), - "relation_name": '"{0}"."{1}".seed'.format( - project.database, my_schema_name - ), + "relation_name": '"{0}"."{1}".seed'.format(project.database, my_schema_name), }, "snapshot.test.snapshot_seed": { "alias": "snapshot_seed", @@ -1244,9 +1242,7 @@ def expected_references_manifest(project): "unique_id": "doc.test.table_info", }, "doc.test.view_summary": { - "block_contents": ( - "A view of the summary of the ephemeral copy of the seed data" - ), + "block_contents": ("A view of the summary of the ephemeral copy of the seed data"), "resource_type": "doc", "name": "view_summary", "original_file_path": docs_path, diff --git a/tests/functional/colors/test_colors.py b/tests/functional/colors/test_colors.py index 7e92e039506..f42591c2b6a 100644 --- a/tests/functional/colors/test_colors.py +++ b/tests/functional/colors/test_colors.py @@ -16,7 +16,7 @@ def models(): @pytest.fixture(scope="class") def project_config_update(): - return {'config-version': 2} + return {"config-version": 2} class TestColors: diff --git a/tests/functional/configs/test_configs.py b/tests/functional/configs/test_configs.py index 97e29362d4b..086ef455f18 100644 --- a/tests/functional/configs/test_configs.py +++ b/tests/functional/configs/test_configs.py @@ -1,4 +1,3 @@ - from hologram import ValidationError import pytest import os @@ -94,7 +93,11 @@ def test_seeds_materialization_proj_config(self, project): class TestInvalidSeedsMaterializationSchema(object): def test_seeds_materialization_schema_config(self, project): seeds_dir = os.path.join(project.project_root, "seeds") - write_file("version: 2\nseeds:\n - name: myseed\n config:\n materialized: table", seeds_dir, "schema.yml") + write_file( + "version: 2\nseeds:\n - name: myseed\n config:\n materialized: table", + seeds_dir, + "schema.yml", + ) write_file("id1, id2\n1, 2", seeds_dir, "myseed.csv") with pytest.raises(ValidationError): @@ -116,7 +119,11 @@ def test_snapshots_materialization_proj_config(self, project): class TestInvalidSnapshotsMaterializationSchema(object): def test_snapshots_materialization_schema_config(self, project): snapshots_dir = os.path.join(project.project_root, "snapshots") - write_file("version: 2\nsnapshots:\n - name: mysnapshot\n config:\n materialized: table", snapshots_dir, "schema.yml") + write_file( + "version: 2\nsnapshots:\n - name: mysnapshot\n config:\n materialized: table", + snapshots_dir, + "schema.yml", + ) write_file(simple_snapshot, snapshots_dir, "mysnapshot.sql") with pytest.raises(ValidationError): diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py index 1e741a2b283..562118f946f 100644 --- a/tests/functional/context_methods/test_builtin_functions.py +++ b/tests/functional/context_methods/test_builtin_functions.py @@ -112,7 +112,17 @@ def test_builtin_invocation_args_dict_function(self, project): expected = "invocation_result: {'debug': True, 'log_format': 'json', 'write_json': True, 'use_colors': True, 'printer_width': 80, 'version_check': True, 'partial_parse': True, 'static_parser': True, 'profiles_dir': " assert expected in str(result) - expected = ("'send_anonymous_usage_stats': False", "'quiet': False", "'no_print': False", "'cache_selected_only': False", "'macro': 'validate_invocation'", "'args': '{my_variable: test_variable}'", "'which': 'run-operation'", "'rpc_method': 'run-operation'", "'indirect_selection': 'eager'") + expected = ( + "'send_anonymous_usage_stats': False", + "'quiet': False", + "'no_print': False", + "'cache_selected_only': False", + "'macro': 'validate_invocation'", + "'args': '{my_variable: test_variable}'", + "'which': 'run-operation'", + "'rpc_method': 'run-operation'", + "'indirect_selection': 'eager'", + ) for element in expected: assert element in str(result) diff --git a/tests/functional/context_methods/test_custom_env_vars.py b/tests/functional/context_methods/test_custom_env_vars.py index 413789c7676..e74a5dcee09 100644 --- a/tests/functional/context_methods/test_custom_env_vars.py +++ b/tests/functional/context_methods/test_custom_env_vars.py @@ -27,7 +27,9 @@ def setup(self): del os.environ["DBT_ENV_CUSTOM_ENV_SOME_VAR"] def test_extra_filled(self, project): - _, log_output = run_dbt_and_capture(['--log-format=json', 'deps'],) + _, log_output = run_dbt_and_capture( + ["--log-format=json", "deps"], + ) logs = parse_json_logs(log_output) for log in logs: - assert log['info'].get('extra') == {"SOME_VAR": "value"} + assert log["info"].get("extra") == {"SOME_VAR": "value"} diff --git a/tests/functional/custom_aliases/test_custom_aliases.py b/tests/functional/custom_aliases/test_custom_aliases.py index 561899f4575..86b44c3b3f0 100644 --- a/tests/functional/custom_aliases/test_custom_aliases.py +++ b/tests/functional/custom_aliases/test_custom_aliases.py @@ -7,18 +7,14 @@ model2_sql, macros_sql, macros_config_sql, - schema_yml + schema_yml, ) class TestAliases: @pytest.fixture(scope="class") def models(self): - return { - "model1.sql": model1_sql, - "model2.sql": model2_sql, - "schema.yml": schema_yml - } + return {"model1.sql": model1_sql, "model2.sql": model2_sql, "schema.yml": schema_yml} @pytest.fixture(scope="class") def macros(self): @@ -27,21 +23,17 @@ def macros(self): } def test_customer_alias_name(self, project): - results = run_dbt(['run']) + results = run_dbt(["run"]) assert len(results) == 2 - results = run_dbt(['test']) + results = run_dbt(["test"]) assert len(results) == 2 class TestAliasesWithConfig: @pytest.fixture(scope="class") def models(self): - return { - "model1.sql": model1_sql, - "model2.sql": model2_sql, - "schema.yml": schema_yml - } + return {"model1.sql": model1_sql, "model2.sql": model2_sql, "schema.yml": schema_yml} @pytest.fixture(scope="class") def macros(self): @@ -50,8 +42,8 @@ def macros(self): } def test_customer_alias_name(self, project): - results = run_dbt(['run']) + results = run_dbt(["run"]) assert len(results) == 2 - results = run_dbt(['test']) + results = run_dbt(["test"]) assert len(results) == 2 diff --git a/tests/functional/cycles/test_cycles.py b/tests/functional/cycles/test_cycles.py index 0e2cdcaf911..6d2eb3fd0cc 100644 --- a/tests/functional/cycles/test_cycles.py +++ b/tests/functional/cycles/test_cycles.py @@ -36,10 +36,7 @@ class TestSimpleCycle: @pytest.fixture(scope="class") def models(self): - return { - "model_a.sql": model_a_sql, - "model_b.sql": model_b_sql - } + return {"model_a.sql": model_a_sql, "model_b.sql": model_b_sql} def test_simple_cycle(self, project): with pytest.raises(RuntimeError) as exc: diff --git a/tests/functional/exit_codes/fixtures.py b/tests/functional/exit_codes/fixtures.py index 23a0bef3897..296e1a3f6c0 100644 --- a/tests/functional/exit_codes/fixtures.py +++ b/tests/functional/exit_codes/fixtures.py @@ -74,5 +74,5 @@ def models(self): "bad.sql": bad_sql, "dupe.sql": dupe_sql, "good.sql": good_sql, - "schema.yml": schema_yml + "schema.yml": schema_yml, } diff --git a/tests/functional/exit_codes/test_exit_codes.py b/tests/functional/exit_codes/test_exit_codes.py index 54b5cb6865e..44672beecae 100644 --- a/tests/functional/exit_codes/test_exit_codes.py +++ b/tests/functional/exit_codes/test_exit_codes.py @@ -1,17 +1,13 @@ import pytest import dbt.exceptions -from dbt.tests.util import ( - check_table_does_exist, - check_table_does_not_exist, - run_dbt -) +from dbt.tests.util import check_table_does_exist, check_table_does_not_exist, run_dbt from tests.functional.exit_codes.fixtures import ( BaseConfigProject, snapshots_bad_sql, snapshots_good_sql, data_seed_bad_csv, - data_seed_good_csv + data_seed_good_csv, ) @@ -21,38 +17,38 @@ def snapshots(self): return {"g.sql": snapshots_good_sql} def test_exit_code_run_succeed(self, project): - results = run_dbt(['run', '--model', 'good']) + results = run_dbt(["run", "--model", "good"]) assert len(results) == 1 - check_table_does_exist(project.adapter, 'good') + check_table_does_exist(project.adapter, "good") def test_exit_code_run_fail(self, project): - results = run_dbt(['run', '--model', 'bad'], expect_pass=False) + results = run_dbt(["run", "--model", "bad"], expect_pass=False) assert len(results) == 1 - check_table_does_not_exist(project.adapter, 'bad') + check_table_does_not_exist(project.adapter, "bad") def test_schema_test_pass(self, project): - results = run_dbt(['run', '--model', 'good']) + results = run_dbt(["run", "--model", "good"]) assert len(results) == 1 - results = run_dbt(['test', '--model', 'good']) + results = run_dbt(["test", "--model", "good"]) assert len(results) == 1 def test_schema_test_fail(self, project): - results = run_dbt(['run', '--model', 'dupe']) + results = run_dbt(["run", "--model", "dupe"]) assert len(results) == 1 - results = run_dbt(['test', '--model', 'dupe'], expect_pass=False) + results = run_dbt(["test", "--model", "dupe"], expect_pass=False) assert len(results) == 1 def test_compile(self, project): - results = run_dbt(['compile']) + results = run_dbt(["compile"]) assert len(results) == 7 def test_snapshot_pass(self, project): run_dbt(["run", "--model", "good"]) - results = run_dbt(['snapshot']) + results = run_dbt(["snapshot"]) assert len(results) == 1 - check_table_does_exist(project.adapter, 'good_snapshot') + check_table_does_exist(project.adapter, "good_snapshot") class TestExitCodesSnapshotFail(BaseConfigProject): @@ -61,12 +57,12 @@ def snapshots(self): return {"b.sql": snapshots_bad_sql} def test_snapshot_fail(self, project): - results = run_dbt(['run', '--model', 'good']) + results = run_dbt(["run", "--model", "good"]) assert len(results) == 1 - results = run_dbt(['snapshot'], expect_pass=False) + results = run_dbt(["snapshot"], expect_pass=False) assert len(results) == 1 - check_table_does_not_exist(project.adapter, 'good_snapshot') + check_table_does_not_exist(project.adapter, "good_snapshot") class TestExitCodesDeps: @@ -75,14 +71,14 @@ def packages(self): return { "packages": [ { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'dbt/1.0.0', + "git": "https://github.com/dbt-labs/dbt-integration-project", + "revision": "dbt/1.0.0", } ] } def test_deps(self, project): - results = run_dbt(['deps']) + results = run_dbt(["deps"]) assert results is None @@ -92,15 +88,15 @@ def packages(self): return { "packages": [ { - 'git': 'https://github.com/dbt-labs/dbt-integration-project', - 'revision': 'bad-branch', + "git": "https://github.com/dbt-labs/dbt-integration-project", + "revision": "bad-branch", }, ] } def test_deps_fail(self, project): with pytest.raises(dbt.exceptions.GitCheckoutError) as exc: - run_dbt(['deps']) + run_dbt(["deps"]) expected_msg = "Error checking out spec='bad-branch'" assert expected_msg in str(exc.value) @@ -111,7 +107,7 @@ def seeds(self): return {"good.csv": data_seed_good_csv} def test_seed(self, project): - results = run_dbt(['seed']) + results = run_dbt(["seed"]) assert len(results) == 1 @@ -121,4 +117,4 @@ def seeds(self): return {"bad.csv": data_seed_bad_csv} def test_seed(self, project): - run_dbt(['seed'], expect_pass=False) + run_dbt(["seed"], expect_pass=False) diff --git a/tests/functional/exposures/fixtures.py b/tests/functional/exposures/fixtures.py index 1d573b1a7b6..f02c5723f72 100644 --- a/tests/functional/exposures/fixtures.py +++ b/tests/functional/exposures/fixtures.py @@ -1,4 +1,3 @@ - models_sql = """ select 1 as id """ diff --git a/tests/functional/exposures/test_exposure_configs.py b/tests/functional/exposures/test_exposure_configs.py index a7018204952..199a6368a4a 100644 --- a/tests/functional/exposures/test_exposure_configs.py +++ b/tests/functional/exposures/test_exposure_configs.py @@ -12,7 +12,7 @@ enabled_yaml_level_exposure_yml, invalid_config_exposure_yml, source_schema_yml, - metrics_schema_yml + metrics_schema_yml, ) diff --git a/tests/functional/exposures/test_exposures.py b/tests/functional/exposures/test_exposures.py index 777a8e161c4..97849fa0835 100644 --- a/tests/functional/exposures/test_exposures.py +++ b/tests/functional/exposures/test_exposures.py @@ -6,7 +6,7 @@ second_model_sql, simple_exposure_yml, source_schema_yml, - metrics_schema_yml + metrics_schema_yml, ) @@ -37,8 +37,8 @@ def test_depends_on(self, project): manifest = get_manifest(project.project_root) exposure_depends_on = manifest.exposures["exposure.test.simple_exposure"].depends_on.nodes expected_exposure_depends_on = [ - 'source.test.test_source.test_table', - 'model.test.model', - 'metric.test.metric' + "source.test.test_source.test_table", + "model.test.model", + "metric.test.metric", ] assert sorted(exposure_depends_on) == sorted(expected_exposure_depends_on) diff --git a/tests/functional/external_reference/test_external_reference.py b/tests/functional/external_reference/test_external_reference.py index 50cf7835494..8b5294155d8 100644 --- a/tests/functional/external_reference/test_external_reference.py +++ b/tests/functional/external_reference/test_external_reference.py @@ -24,16 +24,16 @@ def models(self): return {"model.sql": external_model_sql} def test_external_reference(self, project, unique_schema): - external_schema = unique_schema + 'z' + external_schema = unique_schema + "z" project.run_sql(f'create schema "{external_schema}"') project.run_sql(f'create table "{external_schema}"."external" (id integer)') project.run_sql(f'insert into "{external_schema}"."external" values (1), (2)') - results = run_dbt(['run']) + results = run_dbt(["run"]) assert len(results) == 1 # running it again should succeed - results = run_dbt(['run']) + results = run_dbt(["run"]) assert len(results) == 1 @@ -45,13 +45,15 @@ def models(self): return {"model.sql": model_sql} def test_external_reference(self, project, unique_schema): - results = run_dbt(['run']) + results = run_dbt(["run"]) assert len(results) == 1 - external_schema = unique_schema + 'z' + external_schema = unique_schema + "z" project.run_sql(f'create schema "{external_schema}"') - project.run_sql(f'create view "{external_schema}"."external" as (select * from {unique_schema}.model)') + project.run_sql( + f'create view "{external_schema}"."external" as (select * from {unique_schema}.model)' + ) # running it again should succeed - results = run_dbt(['run']) + results = run_dbt(["run"]) assert len(results) == 1 diff --git a/tests/functional/incremental_schema_tests/fixtures.py b/tests/functional/incremental_schema_tests/fixtures.py index c6eebc5e183..b80bea45e80 100644 --- a/tests/functional/incremental_schema_tests/fixtures.py +++ b/tests/functional/incremental_schema_tests/fixtures.py @@ -1,4 +1,3 @@ - # # Properties # diff --git a/tests/functional/incremental_schema_tests/test_incremental_schema.py b/tests/functional/incremental_schema_tests/test_incremental_schema.py index 3ee9e6477e4..8203f497331 100644 --- a/tests/functional/incremental_schema_tests/test_incremental_schema.py +++ b/tests/functional/incremental_schema_tests/test_incremental_schema.py @@ -41,21 +41,16 @@ def models(self): return { "incremental_sync_remove_only.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, "incremental_ignore.sql": _MODELS__INCREMENTAL_IGNORE, - "incremental_sync_remove_only_target.sql": - _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, + "incremental_sync_remove_only_target.sql": _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, "incremental_ignore_target.sql": _MODELS__INCREMENTAL_IGNORE_TARGET, "incremental_fail.sql": _MODELS__INCREMENTAL_FAIL, "incremental_sync_all_columns.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS, - "incremental_append_new_columns_remove_one.sql": - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, + "incremental_append_new_columns_remove_one.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE, "model_a.sql": _MODELS__A, - "incremental_append_new_columns_target.sql": - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + "incremental_append_new_columns_target.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, "incremental_append_new_columns.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, - "incremental_sync_all_columns_target.sql": - _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, - "incremental_append_new_columns_remove_one_target.sql": - _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, + "incremental_sync_all_columns_target.sql": _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + "incremental_append_new_columns_remove_one_target.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, } @pytest.fixture(scope="class") @@ -63,26 +58,19 @@ def tests(self): return { "select_from_incremental.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, "select_from_a.sql": _TESTS__SELECT_FROM_A, - "select_from_incremental_append_new_columns_target.sql": - _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, - "select_from_incremental_sync_all_columns.sql": - _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, - "select_from_incremental_sync_all_columns_target.sql": - _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, - "select_from_incremental_ignore_target.sql": - _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, - "select_from_incremental_append_new_columns.sql": - _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, + "select_from_incremental_append_new_columns_target.sql": _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, + "select_from_incremental_sync_all_columns.sql": _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, + "select_from_incremental_sync_all_columns_target.sql": _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, + "select_from_incremental_ignore_target.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, + "select_from_incremental_append_new_columns.sql": _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, } - def run_twice_and_assert( - self, include, compare_source, compare_target, project - ): + def run_twice_and_assert(self, include, compare_source, compare_target, project): # dbt run (twice) - run_args = ['run'] + run_args = ["run"] if include: - run_args.extend(('--select', include)) + run_args.extend(("--select", include)) results_one = run_dbt(run_args) assert len(results_one) == 3 @@ -92,33 +80,33 @@ def run_twice_and_assert( check_relations_equal(project.adapter, [compare_source, compare_target]) def run_incremental_append_new_columns(self, project): - select = 'model_a incremental_append_new_columns incremental_append_new_columns_target' - compare_source = 'incremental_append_new_columns' - compare_target = 'incremental_append_new_columns_target' + select = "model_a incremental_append_new_columns incremental_append_new_columns_target" + compare_source = "incremental_append_new_columns" + compare_target = "incremental_append_new_columns_target" self.run_twice_and_assert(select, compare_source, compare_target, project) def run_incremental_append_new_columns_remove_one(self, project): - select = 'model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target' - compare_source = 'incremental_append_new_columns_remove_one' - compare_target = 'incremental_append_new_columns_remove_one_target' + select = "model_a incremental_append_new_columns_remove_one incremental_append_new_columns_remove_one_target" + compare_source = "incremental_append_new_columns_remove_one" + compare_target = "incremental_append_new_columns_remove_one_target" self.run_twice_and_assert(select, compare_source, compare_target, project) def run_incremental_sync_all_columns(self, project): - select = 'model_a incremental_sync_all_columns incremental_sync_all_columns_target' - compare_source = 'incremental_sync_all_columns' - compare_target = 'incremental_sync_all_columns_target' + select = "model_a incremental_sync_all_columns incremental_sync_all_columns_target" + compare_source = "incremental_sync_all_columns" + compare_target = "incremental_sync_all_columns_target" self.run_twice_and_assert(select, compare_source, compare_target, project) def run_incremental_sync_remove_only(self, project): - select = 'model_a incremental_sync_remove_only incremental_sync_remove_only_target' - compare_source = 'incremental_sync_remove_only' - compare_target = 'incremental_sync_remove_only_target' + select = "model_a incremental_sync_remove_only incremental_sync_remove_only_target" + compare_source = "incremental_sync_remove_only" + compare_target = "incremental_sync_remove_only_target" self.run_twice_and_assert(select, compare_source, compare_target, project) def test_run_incremental_ignore(self, project): - select = 'model_a incremental_ignore incremental_ignore_target' - compare_source = 'incremental_ignore' - compare_target = 'incremental_ignore_target' + select = "model_a incremental_ignore incremental_ignore_target" + compare_source = "incremental_ignore" + compare_target = "incremental_ignore_target" self.run_twice_and_assert(select, compare_source, compare_target, project) def test_run_incremental_append_new_columns(self, project): @@ -130,7 +118,7 @@ def test_run_incremental_sync_all_columns(self, project): self.run_incremental_sync_remove_only(project) def test_run_incremental_fail_on_schema_change(self, project): - select = 'model_a incremental_fail' - run_dbt(['run', '--models', select, '--full-refresh']) - results_two = run_dbt(['run', '--models', select], expect_pass=False) - assert 'Compilation Error' in results_two[1].message + select = "model_a incremental_fail" + run_dbt(["run", "--models", select, "--full-refresh"]) + results_two = run_dbt(["run", "--models", select], expect_pass=False) + assert "Compilation Error" in results_two[1].message diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py index 863c40f1e7d..fe98d68e676 100644 --- a/tests/functional/logging/test_logging.py +++ b/tests/functional/logging/test_logging.py @@ -26,7 +26,7 @@ def test_basic(project, logs_dir): assert log_file node_start = False node_finished = False - for log_line in log_file.split('\n'): + for log_line in log_file.split("\n"): # skip empty lines if len(log_line) == 0: continue @@ -35,13 +35,13 @@ def test_basic(project, logs_dir): continue log_dct = json.loads(log_line) log_data = log_dct["data"] - log_event = log_dct['info']['name'] + log_event = log_dct["info"]["name"] if log_event == "NodeStart": node_start = True if log_event == "NodeFinished": node_finished = True if node_start and not node_finished: - if log_event == 'NodeExecuting': + if log_event == "NodeExecuting": assert "node_info" in log_data if log_event == "JinjaLogDebug": assert "node_info" in log_data diff --git a/tests/functional/logging/test_meta_logging.py b/tests/functional/logging/test_meta_logging.py index 76c261fe901..189562bba49 100644 --- a/tests/functional/logging/test_meta_logging.py +++ b/tests/functional/logging/test_meta_logging.py @@ -2,10 +2,11 @@ from dbt.tests.util import run_dbt, read_file import json -model1 = 'select 1 as fun' +model1 = "select 1 as fun" model2 = '{{ config(meta={"owners": ["team1", "team2"]})}} select 1 as fun' model3 = '{{ config(meta={"key": 1})}} select 1 as fun' + @pytest.fixture(scope="class") # noqa def models(): return {"model1.sql": model1, "model2.sql": model2, "model3.sql": model3} @@ -20,7 +21,7 @@ def test_meta(project, logs_dir): log_file = read_file(logs_dir, "dbt.log") assert log_file - for log_line in log_file.split('\n'): + for log_line in log_file.split("\n"): # skip empty lines if len(log_line) == 0: continue @@ -34,10 +35,10 @@ def test_meta(project, logs_dir): print(f"--- log_dct: {log_dct}") node_info = log_dct["data"]["node_info"] - node_path = node_info['node_path'] + node_path = node_info["node_path"] if node_path == "model1.sql": - assert node_info['meta'] == {} + assert node_info["meta"] == {} elif node_path == "model2.sql": - assert node_info['meta'] == {"owners": "['team1', 'team2']"} + assert node_info["meta"] == {"owners": "['team1', 'team2']"} elif node_path == "model3.sql": - assert node_info['meta'] == {"key": "1"} + assert node_info["meta"] == {"key": "1"} diff --git a/tests/functional/metrics/test_metric_configs.py b/tests/functional/metrics/test_metric_configs.py index d81c97f79a6..6ad960ec11f 100644 --- a/tests/functional/metrics/test_metric_configs.py +++ b/tests/functional/metrics/test_metric_configs.py @@ -11,7 +11,7 @@ disabled_metric_level_schema_yml, enabled_metric_level_schema_yml, models_people_metrics_sql, - invalid_config_metric_yml + invalid_config_metric_yml, ) diff --git a/tests/functional/metrics/test_metric_helper_functions.py b/tests/functional/metrics/test_metric_helper_functions.py index c1b7a3487b6..da9a0046ba4 100644 --- a/tests/functional/metrics/test_metric_helper_functions.py +++ b/tests/functional/metrics/test_metric_helper_functions.py @@ -3,10 +3,7 @@ from dbt.tests.util import run_dbt, get_manifest from dbt.contracts.graph.metrics import ResolvedMetricReference -from tests.functional.metrics.fixtures import ( - models_people_sql, - basic_metrics_yml -) +from tests.functional.metrics.fixtures import models_people_sql, basic_metrics_yml class TestMetricHelperFunctions: diff --git a/tests/functional/metrics/test_metrics.py b/tests/functional/metrics/test_metrics.py index 10e34770cf1..adc55c3b996 100644 --- a/tests/functional/metrics/test_metrics.py +++ b/tests/functional/metrics/test_metrics.py @@ -21,7 +21,7 @@ derived_metric_old_attr_names_yml, metric_without_timestamp_or_timegrains_yml, invalid_metric_without_timestamp_with_time_grains_yml, - invalid_metric_without_timestamp_with_window_yml + invalid_metric_without_timestamp_with_window_yml, ) diff --git a/tests/functional/persist_docs_tests/test_persist_docs.py b/tests/functional/persist_docs_tests/test_persist_docs.py index 7d337edd7cc..8c3822b497a 100644 --- a/tests/functional/persist_docs_tests/test_persist_docs.py +++ b/tests/functional/persist_docs_tests/test_persist_docs.py @@ -47,50 +47,44 @@ def _assert_common_comments(self, *comments): for comment in comments: assert '"with double quotes"' in comment assert """'''abc123'''""" in comment - assert '\n' in comment - assert 'Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting' in comment - assert '/* comment */' in comment - if os.name == 'nt': - assert '--\r\n' in comment or '--\n' in comment + assert "\n" in comment + assert "Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting" in comment + assert "/* comment */" in comment + if os.name == "nt": + assert "--\r\n" in comment or "--\n" in comment else: - assert '--\n' in comment + assert "--\n" in comment def _assert_has_table_comments(self, table_node): - table_comment = table_node['metadata']['comment'] - assert table_comment.startswith('Table model description') + table_comment = table_node["metadata"]["comment"] + assert table_comment.startswith("Table model description") - table_id_comment = table_node['columns']['id']['comment'] - assert table_id_comment.startswith('id Column description') + table_id_comment = table_node["columns"]["id"]["comment"] + assert table_id_comment.startswith("id Column description") - table_name_comment = table_node['columns']['name']['comment'] - assert table_name_comment.startswith( - 'Some stuff here and then a call to') + table_name_comment = table_node["columns"]["name"]["comment"] + assert table_name_comment.startswith("Some stuff here and then a call to") - self._assert_common_comments( - table_comment, table_id_comment, table_name_comment - ) + self._assert_common_comments(table_comment, table_id_comment, table_name_comment) def _assert_has_view_comments( - self, - view_node, - has_node_comments=True, - has_column_comments=True + self, view_node, has_node_comments=True, has_column_comments=True ): - view_comment = view_node['metadata']['comment'] + view_comment = view_node["metadata"]["comment"] if has_node_comments: - assert view_comment.startswith('View model description') + assert view_comment.startswith("View model description") self._assert_common_comments(view_comment) else: assert view_comment is None - view_id_comment = view_node['columns']['id']['comment'] + view_id_comment = view_node["columns"]["id"]["comment"] if has_column_comments: - assert view_id_comment.startswith('id Column description') + assert view_id_comment.startswith("id Column description") self._assert_common_comments(view_id_comment) else: assert view_id_comment is None - view_name_comment = view_node['columns']['name']['comment'] + view_name_comment = view_node["columns"]["name"]["comment"] assert view_name_comment is None @@ -98,9 +92,9 @@ class TestPersistDocs(BasePersistDocsTest): @pytest.fixture(scope="class") def project_config_update(self): return { - 'models': { - 'test': { - '+persist_docs': { + "models": { + "test": { + "+persist_docs": { "relation": True, "columns": True, }, @@ -109,18 +103,18 @@ def project_config_update(self): } def test_has_comments_pglike(self, project): - run_dbt(['docs', 'generate']) - with open('target/catalog.json') as fp: + run_dbt(["docs", "generate"]) + with open("target/catalog.json") as fp: catalog_data = json.load(fp) - assert 'nodes' in catalog_data - assert len(catalog_data['nodes']) == 4 - table_node = catalog_data['nodes']['model.test.table_model'] + assert "nodes" in catalog_data + assert len(catalog_data["nodes"]) == 4 + table_node = catalog_data["nodes"]["model.test.table_model"] view_node = self._assert_has_table_comments(table_node) - view_node = catalog_data['nodes']['model.test.view_model'] + view_node = catalog_data["nodes"]["model.test.view_model"] self._assert_has_view_comments(view_node) - no_docs_node = catalog_data['nodes']['model.test.no_docs_model'] + no_docs_node = catalog_data["nodes"]["model.test.no_docs_model"] self._assert_has_view_comments(no_docs_node, False, False) @@ -128,9 +122,9 @@ class TestPersistDocsColumnMissing(BasePersistDocsTest): @pytest.fixture(scope="class") def project_config_update(self): return { - 'models': { - 'test': { - '+persist_docs': { + "models": { + "test": { + "+persist_docs": { "columns": True, }, } @@ -146,11 +140,11 @@ def properties(self): return {"schema.yml": _PROPERITES__SCHEMA_MISSING_COL} def test_postgres_missing_column(self, project): - run_dbt(['docs', 'generate']) - with open('target/catalog.json') as fp: + run_dbt(["docs", "generate"]) + with open("target/catalog.json") as fp: catalog_data = json.load(fp) - assert 'nodes' in catalog_data + assert "nodes" in catalog_data - table_node = catalog_data['nodes']['model.test.missing_column'] - table_id_comment = table_node['columns']['id']['comment'] - assert table_id_comment.startswith('test id column description') + table_node = catalog_data["nodes"]["model.test.missing_column"] + table_id_comment = table_node["columns"]["id"]["comment"] + assert table_id_comment.startswith("test id column description") diff --git a/tests/functional/postgres/test_postgres_indexes.py b/tests/functional/postgres/test_postgres_indexes.py index 64d61d2df87..143a0888755 100644 --- a/tests/functional/postgres/test_postgres_indexes.py +++ b/tests/functional/postgres/test_postgres_indexes.py @@ -70,7 +70,7 @@ def test_incremental(self, project, unique_schema): results = run_dbt(["run", "--models", "incremental"] + additional_argument) assert len(results) == 1 - indexes = self.get_indexes('incremental', project, unique_schema) + indexes = self.get_indexes("incremental", project, unique_schema) expected = [ {"columns": "column_a", "unique": False, "type": "hash"}, {"columns": "column_a, column_b", "unique": True, "type": "btree"}, @@ -78,11 +78,11 @@ def test_incremental(self, project, unique_schema): assert len(indexes) == len(expected) def test_seed(self, project, unique_schema): - for additional_argument in [[], [], ['--full-refresh']]: + for additional_argument in [[], [], ["--full-refresh"]]: results = run_dbt(["seed"] + additional_argument) assert len(results) == 1 - indexes = self.get_indexes('seed', project, unique_schema) + indexes = self.get_indexes("seed", project, unique_schema) expected = [ {"columns": "country_code", "unique": False, "type": "hash"}, {"columns": "country_code, country_name", "unique": True, "type": "btree"}, @@ -94,7 +94,7 @@ def test_snapshot(self, project, unique_schema): results = run_dbt(["snapshot", "--vars", f"version: {version}"]) assert len(results) == 1 - indexes = self.get_indexes('colors', project, unique_schema) + indexes = self.get_indexes("colors", project, unique_schema) expected = [ {"columns": "id", "unique": False, "type": "hash"}, {"columns": "id, color", "unique": True, "type": "btree"}, @@ -130,7 +130,7 @@ def assertCountEqual(self, a, b): assert len(a) == len(b) -class TestPostgresInvalidIndex(): +class TestPostgresInvalidIndex: @pytest.fixture(scope="class") def models(self): return { diff --git a/tests/functional/relation_names/test_relation_name.py b/tests/functional/relation_names/test_relation_name.py index 5d941d96da5..f0c241c9302 100644 --- a/tests/functional/relation_names/test_relation_name.py +++ b/tests/functional/relation_names/test_relation_name.py @@ -40,9 +40,13 @@ class TestGeneratedDDLNameRules: def setup_class(self): self.incremental_filename = "my_name_is_51_characters_incremental_abcdefghijklmn" # length is 63 - self.max_length_filename = "my_name_is_max_length_chars_abcdefghijklmnopqrstuvwxyz123456789" + self.max_length_filename = ( + "my_name_is_max_length_chars_abcdefghijklmnopqrstuvwxyz123456789" + ) # length is 64 - self.over_max_length_filename = "my_name_is_one_over_max_length_chats_abcdefghijklmnopqrstuvwxyz1" + self.over_max_length_filename = ( + "my_name_is_one_over_max_length_chats_abcdefghijklmnopqrstuvwxyz1" + ) self.filename_for_backup_file = "my_name_is_52_characters_abcdefghijklmnopqrstuvwxyz0" @@ -57,14 +61,10 @@ def seeds(self): @pytest.fixture(scope="class") def models(self): return { - f"{self.incremental_filename}.sql": - models__basic_incremental, - f"{self.filename_for_backup_file}.sql": - models__basic_table, - f"{self.max_length_filename}.sql": - models__basic_table, - f"{self.over_max_length_filename}.sql": - models__basic_table, + f"{self.incremental_filename}.sql": models__basic_incremental, + f"{self.filename_for_backup_file}.sql": models__basic_table, + f"{self.max_length_filename}.sql": models__basic_table, + f"{self.over_max_length_filename}.sql": models__basic_table, } @pytest.fixture(scope="class") @@ -110,15 +110,17 @@ def test_long_name_passes_when_temp_tables_are_generated(self): # 63 characters is the character limit for a table name in a postgres database # (assuming compiled without changes from source) def test_name_longer_than_63_does_not_build(self): - err_msg = "Relation name 'my_name_is_one_over_max"\ + err_msg = ( + "Relation name 'my_name_is_one_over_max" "_length_chats_abcdefghijklmnopqrstuvwxyz1' is longer than 63 characters" + ) res = run_dbt( [ "run", "-s", self.over_max_length_filename, ], - expect_pass=False + expect_pass=False, ) assert res[0].status == RunStatus.Error assert err_msg in res[0].message diff --git a/tests/functional/run_operations/test_run_operations.py b/tests/functional/run_operations/test_run_operations.py index f91ef2d8359..68e9fb8c6e0 100644 --- a/tests/functional/run_operations/test_run_operations.py +++ b/tests/functional/run_operations/test_run_operations.py @@ -2,15 +2,8 @@ import pytest import yaml -from dbt.tests.util import ( - check_table_does_exist, - run_dbt -) -from tests.functional.run_operations.fixtures import ( - happy_macros_sql, - sad_macros_sql, - model_sql -) +from dbt.tests.util import check_table_does_exist, run_dbt +from tests.functional.run_operations.fixtures import happy_macros_sql, sad_macros_sql, model_sql class TestOperations: @@ -20,10 +13,7 @@ def models(self): @pytest.fixture(scope="class") def macros(self): - return { - "happy_macros.sql": happy_macros_sql, - "sad_macros.sql": sad_macros_sql - } + return {"happy_macros.sql": happy_macros_sql, "sad_macros.sql": sad_macros_sql} @pytest.fixture(scope="class") def dbt_profile_data(self, unique_schema): @@ -46,59 +36,57 @@ def dbt_profile_data(self, unique_schema): "threads": 4, "host": "localhost", "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), - "user": 'noaccess', - "pass": 'password', + "user": "noaccess", + "pass": "password", "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), - 'schema': unique_schema - } + "schema": unique_schema, + }, }, "target": "default", }, } def run_operation(self, macro, expect_pass=True, extra_args=None, **kwargs): - args = ['run-operation', macro] + args = ["run-operation", macro] if kwargs: - args.extend(('--args', yaml.safe_dump(kwargs))) + args.extend(("--args", yaml.safe_dump(kwargs))) if extra_args: args.extend(extra_args) return run_dbt(args, expect_pass=expect_pass) def test_macro_noargs(self, project): - self.run_operation('no_args') - check_table_does_exist(project.adapter, 'no_args') + self.run_operation("no_args") + check_table_does_exist(project.adapter, "no_args") def test_macro_args(self, project): - self.run_operation('table_name_args', table_name='my_fancy_table') - check_table_does_exist(project.adapter, 'my_fancy_table') + self.run_operation("table_name_args", table_name="my_fancy_table") + check_table_does_exist(project.adapter, "my_fancy_table") def test_macro_exception(self, project): - self.run_operation('syntax_error', False) + self.run_operation("syntax_error", False) def test_macro_missing(self, project): - self.run_operation('this_macro_does_not_exist', False) + self.run_operation("this_macro_does_not_exist", False) def test_cannot_connect(self, project): - self.run_operation('no_args', - extra_args=['--target', 'noaccess'], - expect_pass=False) + self.run_operation("no_args", extra_args=["--target", "noaccess"], expect_pass=False) def test_vacuum(self, project): - run_dbt(['run']) + run_dbt(["run"]) # this should succeed - self.run_operation('vacuum', table_name='model') + self.run_operation("vacuum", table_name="model") def test_vacuum_ref(self, project): - run_dbt(['run']) + run_dbt(["run"]) # this should succeed - self.run_operation('vacuum_ref', ref_target='model') + self.run_operation("vacuum_ref", ref_target="model") def test_select(self, project): - self.run_operation('select_something', name='world') + self.run_operation("select_something", name="world") def test_access_graph(self, project): - self.run_operation('log_graph') + self.run_operation("log_graph") def test_print(self, project): # Tests that calling the `print()` macro does not cause an exception - self.run_operation('print_something') + self.run_operation("print_something") diff --git a/tests/functional/run_query/test_types.py b/tests/functional/run_query/test_types.py index c33ca3478a8..825d3793895 100644 --- a/tests/functional/run_query/test_types.py +++ b/tests/functional/run_query/test_types.py @@ -29,5 +29,5 @@ def macros(self): } def test_nested_types(self, project): - result = run_dbt(['run-operation', 'test_array_results']) + result = run_dbt(["run-operation", "test_array_results"]) assert result.success diff --git a/tests/functional/schema_tests/test_schema_v2_tests.py b/tests/functional/schema_tests/test_schema_v2_tests.py index 36495fd7020..7b80c5d3eb4 100644 --- a/tests/functional/schema_tests/test_schema_v2_tests.py +++ b/tests/functional/schema_tests/test_schema_v2_tests.py @@ -1003,11 +1003,13 @@ def models(self): def test_quoted_schema_file(self, project): try: # A schema file consisting entirely of quotes should not be a problem - run_dbt(['parse']) + run_dbt(["parse"]) except TypeError: - assert False, '`dbt parse` failed with a yaml file that is all comments with the same exception as 3568' + assert ( + False + ), "`dbt parse` failed with a yaml file that is all comments with the same exception as 3568" except Exception: - assert False, '`dbt parse` failed with a yaml file that is all comments' + assert False, "`dbt parse` failed with a yaml file that is all comments" class TestWrongSpecificationBlock: diff --git a/tests/functional/severity/test_severity.py b/tests/functional/severity/test_severity.py index 050ccd22325..8a76ef6ac24 100644 --- a/tests/functional/severity/test_severity.py +++ b/tests/functional/severity/test_severity.py @@ -60,10 +60,7 @@ @pytest.fixture(scope="class") def models(): - return { - "sample_model.sql": models__sample_model_sql, - "schema.yml": models__schema_yml - } + return {"sample_model.sql": models__sample_model_sql, "schema.yml": models__schema_yml} @pytest.fixture(scope="class") @@ -79,9 +76,9 @@ def tests(): @pytest.fixture(scope="class") def project_config_update(): return { - 'config-version': 2, - 'seed-paths': ['seeds'], - 'test-paths': ['tests'], + "config-version": 2, + "seed-paths": ["seeds"], + "test-paths": ["tests"], "seeds": { "quote_columns": False, }, @@ -95,25 +92,31 @@ def seed_and_run(self, project): run_dbt(["run"]) def test_generic_default(self, project): - results = run_dbt(['test', '--select', 'test_type:generic']) + results = run_dbt(["test", "--select", "test_type:generic"]) assert len(results) == 2 - assert all([r.status == 'warn' for r in results]) + assert all([r.status == "warn" for r in results]) assert all([r.failures == 2 for r in results]) def test_generic_strict(self, project): - results = run_dbt(['test', '--select', 'test_type:generic', "--vars", '{"strict": True}'], expect_pass=False) + results = run_dbt( + ["test", "--select", "test_type:generic", "--vars", '{"strict": True}'], + expect_pass=False, + ) assert len(results) == 2 - assert all([r.status == 'fail' for r in results]) + assert all([r.status == "fail" for r in results]) assert all([r.failures == 2 for r in results]) def test_singular_default(self, project): - results = run_dbt(['test', '--select', 'test_type:singular']) + results = run_dbt(["test", "--select", "test_type:singular"]) assert len(results) == 1 - assert all([r.status == 'warn' for r in results]) + assert all([r.status == "warn" for r in results]) assert all([r.failures == 2 for r in results]) def test_singular_strict(self, project): - results = run_dbt(['test', '--select', 'test_type:singular', "--vars", '{"strict": True}'], expect_pass=False) + results = run_dbt( + ["test", "--select", "test_type:singular", "--vars", '{"strict": True}'], + expect_pass=False, + ) assert len(results) == 1 - assert all([r.status == 'fail' for r in results]) + assert all([r.status == "fail" for r in results]) assert all([r.failures == 2 for r in results]) diff --git a/tests/functional/statements/test_statements.py b/tests/functional/statements/test_statements.py index 4b8640b8066..b3d615a2b69 100644 --- a/tests/functional/statements/test_statements.py +++ b/tests/functional/statements/test_statements.py @@ -1,11 +1,7 @@ import pathlib import pytest -from dbt.tests.util import ( - run_dbt, - check_relations_equal, - write_file -) +from dbt.tests.util import run_dbt, check_relations_equal, write_file from tests.functional.statements.fixtures import ( models__statement_actual, seeds__statement_actual, @@ -19,7 +15,9 @@ def setUp(self, project): # put seeds in 'seed' not 'seeds' directory (pathlib.Path(project.project_root) / "seed").mkdir(parents=True, exist_ok=True) write_file(seeds__statement_actual, project.project_root, "seed", "seed.csv") - write_file(seeds__statement_expected, project.project_root, "seed", "statement_expected.csv") + write_file( + seeds__statement_expected, project.project_root, "seed", "statement_expected.csv" + ) @pytest.fixture(scope="class") def models(self): diff --git a/tests/functional/store_test_failures_tests/test_store_test_failures.py b/tests/functional/store_test_failures_tests/test_store_test_failures.py index ff26d7d97d3..15527c86bd3 100644 --- a/tests/functional/store_test_failures_tests/test_store_test_failures.py +++ b/tests/functional/store_test_failures_tests/test_store_test_failures.py @@ -38,10 +38,8 @@ def seeds(self): "people.csv": seeds__people, "expected_accepted_values.csv": seeds__expected_accepted_values, "expected_failing_test.csv": seeds__expected_failing_test, - "expected_not_null_problematic_model_id.csv": - seeds__expected_not_null_problematic_model_id, - "expected_unique_problematic_model_id.csv": - seeds__expected_unique_problematic_model_id, + "expected_not_null_problematic_model_id.csv": seeds__expected_not_null_problematic_model_id, + "expected_unique_problematic_model_id.csv": seeds__expected_unique_problematic_model_id, } @pytest.fixture(scope="class") @@ -59,8 +57,7 @@ def properties(self): def models(self): return { "fine_model.sql": models__fine_model, - "fine_model_but_with_a_no_good_very_long_name.sql": - models__file_model_but_with_a_no_good_very_long_name, + "fine_model_but_with_a_no_good_very_long_name.sql": models__file_model_but_with_a_no_good_very_long_name, "problematic_model.sql": models__problematic_model, } @@ -71,9 +68,7 @@ def project_config_update(self): "quote_columns": False, "test": self.column_type_overrides(), }, - "tests": { - "+schema": TEST_AUDIT_SCHEMA_SUFFIX - } + "tests": {"+schema": TEST_AUDIT_SCHEMA_SUFFIX}, } def column_type_overrides(self): @@ -87,8 +82,8 @@ def run_tests_store_one_failure(self, project): project.adapter, [ f"{self.test_audit_schema}.unique_problematic_model_id", - "expected_unique_problematic_model_id" - ] + "expected_unique_problematic_model_id", + ], ) def run_tests_store_failures_and_assert(self, project): @@ -98,39 +93,59 @@ def run_tests_store_failures_and_assert(self, project): # compare test results actual = [(r.status, r.failures) for r in results] - expected = [('pass', 0), ('pass', 0), ('pass', 0), ('pass', 0), - ('fail', 2), ('fail', 2), ('fail', 2), ('fail', 10)] + expected = [ + ("pass", 0), + ("pass", 0), + ("pass", 0), + ("pass", 0), + ("fail", 2), + ("fail", 2), + ("fail", 2), + ("fail", 10), + ] assert sorted(actual) == sorted(expected) # compare test results stored in database - check_relations_equal(project.adapter, [ - f"{self.test_audit_schema}.failing_test", - "expected_failing_test" - ]) - check_relations_equal(project.adapter, [ - f"{self.test_audit_schema}.not_null_problematic_model_id", - "expected_not_null_problematic_model_id" - ]) - check_relations_equal(project.adapter, [ - f"{self.test_audit_schema}.unique_problematic_model_id", - "expected_unique_problematic_model_id" - ]) - check_relations_equal(project.adapter, [ - f"{self.test_audit_schema}.accepted_values_problemat" - "ic_mo_c533ab4ca65c1a9dbf14f79ded49b628", - "expected_accepted_values" - ]) + check_relations_equal( + project.adapter, [f"{self.test_audit_schema}.failing_test", "expected_failing_test"] + ) + check_relations_equal( + project.adapter, + [ + f"{self.test_audit_schema}.not_null_problematic_model_id", + "expected_not_null_problematic_model_id", + ], + ) + check_relations_equal( + project.adapter, + [ + f"{self.test_audit_schema}.unique_problematic_model_id", + "expected_unique_problematic_model_id", + ], + ) + check_relations_equal( + project.adapter, + [ + f"{self.test_audit_schema}.accepted_values_problemat" + "ic_mo_c533ab4ca65c1a9dbf14f79ded49b628", + "expected_accepted_values", + ], + ) class TestStoreTestFailures(StoreTestFailuresBase): @pytest.fixture(scope="function") def clean_up(self, project): yield - with project.adapter.connection_named('__test'): - relation = project.adapter.Relation.create(database=project.database, schema=self.test_audit_schema) + with project.adapter.connection_named("__test"): + relation = project.adapter.Relation.create( + database=project.database, schema=self.test_audit_schema + ) project.adapter.drop_schema(relation) - relation = project.adapter.Relation.create(database=project.database, schema=project.test_schema) + relation = project.adapter.Relation.create( + database=project.database, schema=project.test_schema + ) project.adapter.drop_schema(relation) def column_type_overrides(self): diff --git a/tests/functional/test_selection/test_selection_expansion.py b/tests/functional/test_selection/test_selection_expansion.py index e006fd50258..290b8f066ff 100644 --- a/tests/functional/test_selection/test_selection_expansion.py +++ b/tests/functional/test_selection/test_selection_expansion.py @@ -190,7 +190,13 @@ def test_model_a_exclude_specific_test_buildable( ): select = "model_a" exclude = "unique_model_a_fun" - expected = ["just_a", "cf_a_b", "cf_a_src", "relationships_model_a_fun__fun__ref_model_b_", "relationships_model_a_fun__fun__source_my_src_my_tbl_"] + expected = [ + "just_a", + "cf_a_b", + "cf_a_src", + "relationships_model_a_fun__fun__ref_model_b_", + "relationships_model_a_fun__fun__source_my_src_my_tbl_", + ] indirect_selection = "buildable" self.list_tests_and_assert(select, exclude, expected, indirect_selection) diff --git a/tests/functional/timezones/test_timezones.py b/tests/functional/timezones/test_timezones.py index f12d85ca553..7b0135442c8 100644 --- a/tests/functional/timezones/test_timezones.py +++ b/tests/functional/timezones/test_timezones.py @@ -26,20 +26,20 @@ def models(self): @pytest.fixture(scope="class") def dbt_profile_data(self, unique_schema): return { - 'test': { - 'outputs': { - 'dev': { - 'type': 'postgres', - 'threads': 1, - 'host': 'localhost', + "test": { + "outputs": { + "dev": { + "type": "postgres", + "threads": 1, + "host": "localhost", "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), "user": os.getenv("POSTGRES_TEST_USER", "root"), "pass": os.getenv("POSTGRES_TEST_PASS", "password"), "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), - 'schema': unique_schema + "schema": unique_schema, }, }, - 'target': 'dev' + "target": "dev", } } @@ -50,16 +50,18 @@ def query(self, project): run_started_at_est, run_started_at_utc from {schema}.timezones - """.format(schema=project.test_schema) + """.format( + schema=project.test_schema + ) @freeze_time("2022-01-01 03:00:00", tz_offset=0) def test_run_started_at(self, project, query): - results = run_dbt(['run']) + results = run_dbt(["run"]) assert len(results) == 1 - result = project.run_sql(query, fetch='all')[0] + result = project.run_sql(query, fetch="all")[0] est, utc = result - assert utc == '2022-01-01 03:00:00+00:00' - assert est == '2021-12-31 22:00:00-05:00' + assert utc == "2022-01-01 03:00:00+00:00" + assert est == "2021-12-31 22:00:00-05:00" diff --git a/tests/unit/test_deprecations.py b/tests/unit/test_deprecations.py index 3f03e3e35a5..ce80ba3d040 100644 --- a/tests/unit/test_deprecations.py +++ b/tests/unit/test_deprecations.py @@ -13,13 +13,13 @@ def to_be_decorated(): # simpletest that the return value is not modified def test_deprecated_func(): - assert(hasattr(to_be_decorated, '__wrapped__')) - assert(to_be_decorated() == 5) + assert hasattr(to_be_decorated, "__wrapped__") + assert to_be_decorated() == 5 class TestDeprecatedFunctions: def is_deprecated(self, func): - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") # TODO: add in log check def test_warn(self): @@ -31,19 +31,19 @@ def runFunc(self, func, *args): return func(*args) def is_deprecated(self, func): - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") # TODO: add in log check def test_missing_config(self): func = dbt.exceptions.missing_config exception = dbt.exceptions.MissingConfigError model = argparse.Namespace() - model.unique_id = '' + model.unique_id = "" name = "" self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(model, name) @@ -52,12 +52,12 @@ def test_missing_materialization(self): exception = dbt.exceptions.MissingMaterializationError model = argparse.Namespace() model.config = argparse.Namespace() - model.config.materialized = '' + model.config.materialized = "" adapter_type = "" self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(model, adapter_type) @@ -68,7 +68,7 @@ def test_missing_relation(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(relation) @@ -85,7 +85,7 @@ def test_raise_ambiguous_alias(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(node_1, node_2, duped_name) @@ -98,7 +98,7 @@ def test_raise_ambiguous_catalog_match(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(unique_id, match_1, match_2) @@ -109,7 +109,7 @@ def test_raise_cache_inconsistent(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(msg) @@ -120,7 +120,7 @@ def test_raise_dataclass_not_dict(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(obj) @@ -131,7 +131,7 @@ def test_raise_compiler_error(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(msg) @@ -142,7 +142,7 @@ def test_raise_database_error(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(msg) @@ -155,7 +155,7 @@ def test_raise_dep_not_found(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(node, node_description, required_pkg) @@ -166,7 +166,7 @@ def test_raise_dependency_error(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(msg) @@ -180,7 +180,7 @@ def test_raise_duplicate_patch_name(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(patch_1, existing_patch_path) @@ -189,7 +189,7 @@ def test_raise_duplicate_resource_name(self): exception = dbt.exceptions.DuplicateResourceNameError node_1 = argparse.Namespace() node_1.name = "" - node_1.resource_type = NodeType('model') + node_1.resource_type = NodeType("model") node_1.column_name = "" node_1.unique_id = "" node_1.original_file_path = "" @@ -201,7 +201,7 @@ def test_raise_duplicate_resource_name(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(node_1, node_2) @@ -213,7 +213,7 @@ def test_raise_invalid_property_yml_version(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(path, issue) @@ -224,7 +224,7 @@ def test_raise_not_implemented(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(msg) @@ -238,7 +238,7 @@ def test_relation_wrong_type(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(relation, expected_type) @@ -251,7 +251,7 @@ def test_raise_duplicate_alias(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(kwargs, aliases, canonical_key) @@ -267,7 +267,7 @@ def test_raise_duplicate_source_patch_name(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(patch_1, patch_2) @@ -282,7 +282,7 @@ def test_raise_duplicate_macro_patch_name(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(patch_1, existing_patch_path) @@ -302,7 +302,7 @@ def test_raise_duplicate_macro_name(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(node_1, node_2, namespace) @@ -314,7 +314,7 @@ def test_approximate_relation_match(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(target, relation) @@ -326,7 +326,7 @@ def test_get_relation_returned_multiple_results(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(kwargs, matches) @@ -337,7 +337,7 @@ def test_system_error(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(operation_name) @@ -349,7 +349,7 @@ def test_invalid_materialization_argument(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(name, argument) @@ -363,7 +363,7 @@ def test_bad_package_spec(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(repo, spec, error) @@ -391,7 +391,7 @@ def test_raise_git_cloning_problem(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(repo) @@ -402,7 +402,7 @@ def test_macro_invalid_dispatch_arg(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(macro_name) @@ -415,7 +415,7 @@ def test_dependency_not_found(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(node, dependency) @@ -431,7 +431,7 @@ def test_target_not_found(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(node, target_name, target_kind) @@ -445,7 +445,7 @@ def test_doc_target_not_found(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(model, target_doc_name, target_doc_package) @@ -458,7 +458,7 @@ def test_ref_bad_context(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(model, args) @@ -471,7 +471,7 @@ def test_metric_invalid_args(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(model, args) @@ -484,7 +484,7 @@ def test_ref_invalid_args(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(model, args) @@ -496,7 +496,7 @@ def test_invalid_bool_error(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(return_value, macro_name) @@ -510,7 +510,7 @@ def test_invalid_type_error(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(method_name, arg_name, got_value, expected_type) @@ -521,7 +521,7 @@ def test_disallow_secret_env_var(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(env_var_name) @@ -532,7 +532,7 @@ def test_raise_parsing_error(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(msg) @@ -544,7 +544,7 @@ def test_raise_unrecognized_credentials_type(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(typename, supported_types) @@ -558,7 +558,7 @@ def test_raise_patch_targets_not_found(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(patches) @@ -570,7 +570,7 @@ def test_multiple_matching_relations(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(kwargs, matches) @@ -584,7 +584,7 @@ def test_materialization_not_available(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(model, adapter_type) @@ -597,6 +597,6 @@ def test_macro_not_found(self): self.is_deprecated(func) - assert(hasattr(func, '__wrapped__')) + assert hasattr(func, "__wrapped__") with pytest.raises(exception): func(model, target_macro_id) diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 2afee427c4d..5e412e34f33 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -78,10 +78,10 @@ def test_formatting(self): # ensure AdapterLogger and subclasses makes all base_msg members # of type string; when someone writes logger.debug(a) where a is # any non-string object - event = AdapterEventDebug(name="dbt_tests", base_msg=[1,2,3], args=(3,)) + event = AdapterEventDebug(name="dbt_tests", base_msg=[1, 2, 3], args=(3,)) assert isinstance(event.base_msg, str) - event = JinjaLogDebug(msg=[1,2,3]) + event = JinjaLogDebug(msg=[1, 2, 3]) assert isinstance(event.msg, str) @@ -124,7 +124,6 @@ def test_event_codes(self): InvalidProfileTemplateYAML(), ProjectNameAlreadyExists(name=""), ProjectCreated(project_name=""), - # D - Deprecations ====================== PackageRedirectDeprecation(old_name="", new_name=""), PackageInstallPathDeprecation(), @@ -134,7 +133,6 @@ def test_event_codes(self): MetricAttributesRenamed(metric_name=""), ExposureNameDeprecation(exposure=""), InternalDeprecation(name="", reason="", suggested_action="", version=""), - # E - DB Adapter ====================== AdapterEventDebug(), AdapterEventInfo(), @@ -178,7 +176,6 @@ def test_event_codes(self): DatabaseErrorRunningHook(hook_type=""), HooksRunning(num_hooks=0, hook_type=""), FinishedRunningStats(stat_line="", execution="", execution_time=0), - # I - Project parsing ====================== ParseCmdOut(msg="testing"), GenericTestFileParse(path=""), @@ -215,7 +212,9 @@ def test_event_codes(self): SeedExceedsLimitAndPathChanged(package_name="", name=""), SeedExceedsLimitChecksumChanged(package_name="", name="", checksum_name=""), UnusedTables(unused_tables=[]), - WrongResourceSchemaFile(patch_name="", resource_type="", file_path="", plural_resource_type=""), + WrongResourceSchemaFile( + patch_name="", resource_type="", file_path="", plural_resource_type="" + ), NoNodeForYamlKey(patch_name="", yaml_key="", file_path=""), MacroNotFoundForPatch(patch_name=""), NodeNotFoundOrDisabled( @@ -228,9 +227,7 @@ def test_event_codes(self): disabled="", ), JinjaLogWarning(), - # M - Deps generation ====================== - GitSparseCheckoutSubdirectory(subdir=""), GitProgressCheckoutRevision(revision=""), GitProgressUpdatingExistingDependency(dir=""), @@ -259,9 +256,7 @@ def test_event_codes(self): RegistryResponseMissingNestedKeys(response=""), RegistryResponseExtraNestedKeys(response=""), DepsSetDownloadDirectory(path=""), - # Q - Node execution ====================== - RunningOperationCaughtError(exc=""), CompileComplete(), FreshnessCheckComplete(), @@ -340,17 +335,13 @@ def test_event_codes(self): NoNodesSelected(), DepsUnpinned(revision="", git=""), NoNodesForSelectionCriteria(spec_raw=""), - # W - Node testing ====================== - CatchableExceptionOnRun(exc=""), InternalErrorOnRun(build_path="", exc=""), GenericExceptionOnRun(build_path="", unique_id="", exc=""), NodeConnectionReleaseError(node_name="", exc=""), FoundStats(stat_line=""), - # Z - misc ====================== - MainKeyboardInterrupt(), MainEncounteredError(exc=""), MainStackTrace(stack_trace=""), @@ -373,7 +364,7 @@ def test_event_codes(self): ServingDocsExitInfo(), RunResultWarning(resource_type="", node_name="", path=""), RunResultFailure(resource_type="", node_name="", path=""), - StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0,"total": 0}), + StatsLine(stats={"error": 0, "skip": 0, "pass": 0, "warn": 0, "total": 0}), RunResultError(msg=""), RunResultErrorNoMessage(status=""), SQLCompiledPath(path=""), @@ -392,7 +383,6 @@ def test_event_codes(self): FlushEventsFailure(), TrackingInitializeFailure(), RunResultWarningMessage(), - # T - tests ====================== IntegrationTestInfo(), IntegrationTestDebug(), @@ -400,12 +390,9 @@ def test_event_codes(self): IntegrationTestError(), IntegrationTestException(), UnitTestInfo(), - ] - - class TestEventJSONSerialization: # attempts to test that every event is serializable to json. diff --git a/tests/unit/test_functions.py b/tests/unit/test_functions.py index 4f7cb6845ac..a43361a7e94 100644 --- a/tests/unit/test_functions.py +++ b/tests/unit/test_functions.py @@ -13,15 +13,13 @@ ('{"include": "all"}', True), ('{"include": [NoNodesForSelectionCriteria]}', True), ('{"include": []}', False), - ('{}', False), + ("{}", False), ('{"include": [MainTrackingUserState]}', False), ('{"include": "all", "exclude": [NoNodesForSelectionCriteria]}', False), ], ) def test_warn_or_error_warn_error_options(warn_error_options, expect_compilation_exception): - args = Namespace( - warn_error_options=warn_error_options - ) + args = Namespace(warn_error_options=warn_error_options) flags.set_from_args(args, {}) if expect_compilation_exception: with pytest.raises(EventCompilationError): @@ -38,9 +36,7 @@ def test_warn_or_error_warn_error_options(warn_error_options, expect_compilation ], ) def test_warn_or_error_warn_error(warn_error, expect_compilation_exception): - args = Namespace( - warn_error=warn_error - ) + args = Namespace(warn_error=warn_error) flags.set_from_args(args, {}) if expect_compilation_exception: with pytest.raises(EventCompilationError): diff --git a/tests/unit/test_helper_types.py b/tests/unit/test_helper_types.py index 0c867f47255..f0aa077b46e 100644 --- a/tests/unit/test_helper_types.py +++ b/tests/unit/test_helper_types.py @@ -1,4 +1,3 @@ - import pytest from dbt.helper_types import IncludeExclude, WarnErrorOptions @@ -21,7 +20,7 @@ def test_init_invalid(self): ("*", ["ItemA"], False), (["ItemA"], [], True), (["ItemA", "ItemB"], [], True), - ] + ], ) def test_includes(self, include, exclude, expected_includes): include_exclude = IncludeExclude(include=include, exclude=exclude) diff --git a/tests/unit/test_proto_events.py b/tests/unit/test_proto_events.py index 2b25cd4985c..2b03cac453a 100644 --- a/tests/unit/test_proto_events.py +++ b/tests/unit/test_proto_events.py @@ -13,7 +13,18 @@ from dbt.version import installed -info_keys = {"name", "code", "msg", "level", "invocation_id", "pid", "thread", "ts", "extra", "category"} +info_keys = { + "name", + "code", + "msg", + "level", + "invocation_id", + "pid", + "thread", + "ts", + "extra", + "category", +} def test_events(): @@ -87,7 +98,13 @@ def test_exception_events(): def test_node_info_events(): - meta_dict = {"string-key1": ["value1", 2], "string-key2": {"nested-dict-key": "value2"}, 1: "value-from-non-string-key", "string-key3": 1, "string-key4": ["string1", 1, "string2", 2]} + meta_dict = { + "string-key1": ["value1", 2], + "string-key2": {"nested-dict-key": "value2"}, + 1: "value-from-non-string-key", + "string-key3": 1, + "string-key4": ["string1", 1, "string2", 2], + } node_info = { "node_path": "some_path", "node_name": "some_name", @@ -136,13 +153,7 @@ def test_extra_dict_on_event(monkeypatch): def test_dynamic_level_events(): - event = LogTestResult( - name="model_name", - status="pass", - index=1, - num_models=3, - num_failures=0 - ) + event = LogTestResult(name="model_name", status="pass", index=1, num_models=3, num_failures=0) msg = msg_from_base_event(event, level=EventLevel.INFO) assert msg assert msg.info.level == "info" From e1b5e68904f1fd576fe842b5aa4b517d494fe530 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Tue, 17 Jan 2023 12:22:31 -0500 Subject: [PATCH 115/156] Convert 068_partial_parsing_tests (#6614) * Convert partial parsing tests * reformat --- core/dbt/parser/partial.py | 7 +- .../local_dependency/dbt_project.yml | 23 - .../local_dependency/macros/dep_macro.sql | 3 - .../models/model_to_import.sql | 1 - .../local_dependency/models/schema.yml | 10 - .../local_dependency/seeds/seed.csv | 2 - .../test-files/custom_schema_tests1.sql | 19 - .../test-files/custom_schema_tests2.sql | 19 - .../test-files/customers.sql | 19 - .../test-files/customers1.md | 5 - .../test-files/customers2.md | 5 - .../test-files/empty_schema.yml | 0 .../test-files/empty_schema_with_version.yml | 1 - .../test-files/env_var-sources.yml | 18 - .../test-files/env_var_macro.sql | 7 - .../test-files/env_var_macros.yml | 7 - .../test-files/env_var_metrics.yml | 30 - .../test-files/env_var_model.sql | 1 - .../test-files/env_var_model_one.sql | 1 - .../test-files/env_var_model_test.yml | 8 - .../test-files/env_var_schema.yml | 6 - .../test-files/env_var_schema2.yml | 11 - .../test-files/env_var_schema3.yml | 21 - .../test-files/generic_schema.yml | 9 - .../test-files/generic_test.sql | 26 - .../test-files/generic_test_edited.sql | 26 - .../test-files/generic_test_schema.yml | 10 - .../test-files/gsm_override.sql | 6 - .../test-files/gsm_override2.sql | 6 - .../test-files/macros-schema.yml | 8 - .../test-files/macros.yml | 4 - .../test-files/metric_model_a.sql | 21 - .../test-files/model_a.sql | 1 - .../test-files/model_b.sql | 1 - .../test-files/model_color.sql | 1 - .../test-files/model_four1.sql | 1 - .../test-files/model_four2.sql | 1 - .../test-files/model_one.sql | 1 - .../test-files/model_three.sql | 12 - .../test-files/model_three_disabled.sql | 12 - .../test-files/model_three_disabled2.sql | 13 - .../test-files/model_three_modified.sql | 14 - .../test-files/model_two.sql | 1 - .../test-files/models-schema1.yml | 5 - .../test-files/models-schema2.yml | 11 - .../test-files/models-schema2b.yml | 11 - .../test-files/models-schema3.yml | 12 - .../test-files/models-schema4.yml | 13 - .../test-files/models-schema4b.yml | 13 - .../test-files/my_analysis.sql | 1 - .../test-files/my_macro.sql | 7 - .../test-files/my_macro2.sql | 7 - .../test-files/my_metric.yml | 23 - .../test-files/my_test.sql | 2 - .../test-files/orders.sql | 1 - .../test-files/people.sql | 3 - .../test-files/people_metrics.yml | 30 - .../test-files/people_metrics2.yml | 30 - .../test-files/people_metrics3.yml | 17 - .../test-files/raw_customers.csv | 11 - .../test-files/ref_override.sql | 4 - .../test-files/ref_override2.sql | 4 - .../test-files/schema-models-c.yml | 14 - .../test-files/schema-sources1.yml | 17 - .../test-files/schema-sources2.yml | 29 - .../test-files/schema-sources3.yml | 28 - .../test-files/schema-sources4.yml | 30 - .../test-files/schema-sources5.yml | 29 - .../test-files/snapshot.sql | 29 - .../test-files/snapshot2.sql | 30 - .../test-files/sources-tests1.sql | 9 - .../test-files/sources-tests2.sql | 9 - .../test-files/test-macro.sql | 5 - .../test-files/test-macro2.sql | 5 - .../test-files/test_color.sql | 7 - .../test_partial_parsing.py | 580 --------- .../test_pp_metrics.py | 106 -- .../068_partial_parsing_tests/test_pp_vars.py | 416 ------ tests/functional/partial_parsing/fixtures.py | 1126 +++++++++++++++++ .../partial_parsing/test_partial_parsing.py | 643 ++++++++++ .../partial_parsing/test_pp_metrics.py | 73 ++ .../partial_parsing/test_pp_vars.py | 386 ++++++ 82 files changed, 2234 insertions(+), 1940 deletions(-) delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv delete mode 100644 test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/customers.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/customers1.md delete mode 100644 test/integration/068_partial_parsing_tests/test-files/customers2.md delete mode 100644 test/integration/068_partial_parsing_tests/test-files/empty_schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_model.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/generic_schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/generic_test.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/gsm_override.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/macros-schema.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/macros.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_a.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_b.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_color.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_four1.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_four2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_one.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_three.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/model_two.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema1.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema2.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema3.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema4.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_analysis.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_macro.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_macro2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_metric.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/my_test.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/orders.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/people.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/people_metrics.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/raw_customers.csv delete mode 100644 test/integration/068_partial_parsing_tests/test-files/ref_override.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/ref_override2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml delete mode 100644 test/integration/068_partial_parsing_tests/test-files/snapshot.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/snapshot2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/test-macro.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/test-macro2.sql delete mode 100644 test/integration/068_partial_parsing_tests/test-files/test_color.sql delete mode 100644 test/integration/068_partial_parsing_tests/test_partial_parsing.py delete mode 100644 test/integration/068_partial_parsing_tests/test_pp_metrics.py delete mode 100644 test/integration/068_partial_parsing_tests/test_pp_vars.py create mode 100644 tests/functional/partial_parsing/fixtures.py create mode 100644 tests/functional/partial_parsing/test_partial_parsing.py create mode 100644 tests/functional/partial_parsing/test_pp_metrics.py create mode 100644 tests/functional/partial_parsing/test_pp_vars.py diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py index eafb49efe76..d6afe223278 100644 --- a/core/dbt/parser/partial.py +++ b/core/dbt/parser/partial.py @@ -8,6 +8,7 @@ parse_file_type_to_parser, ) from dbt.events.functions import fire_event +from dbt.events.base_types import EventLevel from dbt.events.types import ( PartialParsingEnabled, PartialParsingFile, @@ -155,7 +156,11 @@ def build_file_diff(self): self.macro_child_map = self.saved_manifest.build_macro_child_map() deleted = len(deleted) + len(deleted_schema_files) changed = len(changed) + len(changed_schema_files) - fire_event(PartialParsingEnabled(deleted=deleted, added=len(added), changed=changed)) + event = PartialParsingEnabled(deleted=deleted, added=len(added), changed=changed) + if os.environ.get("DBT_PP_TEST"): + fire_event(event, level=EventLevel.INFO) + else: + fire_event(event) self.file_diff = file_diff # generate the list of files that need parsing diff --git a/test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml b/test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml deleted file mode 100644 index d56280a5577..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/dbt_project.yml +++ /dev/null @@ -1,23 +0,0 @@ - -name: 'local_dep' -version: '1.0' -config-version: 2 - -profile: 'default' - -model-paths: ["models"] -analysis-paths: ["analyses"] -test-paths: ["tests"] -seed-paths: ["seeds"] -macro-paths: ["macros"] - -require-dbt-version: '>=0.1.0' - -target-path: "target" # directory which will store compiled SQL files -clean-targets: # directories to be removed by `dbt clean` - - "target" - - "dbt_packages" - - -seeds: - quote_columns: False diff --git a/test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql b/test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql deleted file mode 100644 index 81e9a0faeef..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/macros/dep_macro.sql +++ /dev/null @@ -1,3 +0,0 @@ -{% macro some_overridden_macro() -%} -100 -{%- endmacro %} diff --git a/test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql b/test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql deleted file mode 100644 index 4b91aa0f2fa..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/models/model_to_import.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('seed') }} diff --git a/test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml b/test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml deleted file mode 100644 index 3d804a7c153..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/models/schema.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 -sources: - - name: seed_source - schema: "{{ var('schema_override', target.schema) }}" - tables: - - name: "seed" - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv b/test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv deleted file mode 100644 index 3ff3deb87eb..00000000000 --- a/test/integration/068_partial_parsing_tests/local_dependency/seeds/seed.csv +++ /dev/null @@ -1,2 +0,0 @@ -id -1 diff --git a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql b/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql deleted file mode 100644 index 0f64eb17c0d..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests1.sql +++ /dev/null @@ -1,19 +0,0 @@ -{% test type_one(model) %} - - select * from ( - - select * from {{ model }} - union all - select * from {{ ref('model_b') }} - - ) as Foo - -{% endtest %} - -{% test type_two(model) %} - - {{ config(severity = "WARN") }} - - select * from {{ model }} - -{% endtest %} diff --git a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql b/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql deleted file mode 100644 index ba5b53fa5a9..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/custom_schema_tests2.sql +++ /dev/null @@ -1,19 +0,0 @@ -{% test type_one(model) %} - - select * from ( - - select * from {{ model }} - union all - select * from {{ ref('model_b') }} - - ) as Foo - -{% endtest %} - -{% test type_two(model) %} - - {{ config(severity = "ERROR") }} - - select * from {{ model }} - -{% endtest %} diff --git a/test/integration/068_partial_parsing_tests/test-files/customers.sql b/test/integration/068_partial_parsing_tests/test-files/customers.sql deleted file mode 100644 index 98e19b557eb..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/customers.sql +++ /dev/null @@ -1,19 +0,0 @@ -with source as ( - - select * from {{ source('seed_sources', 'raw_customers') }} - -), - -renamed as ( - - select - id as customer_id, - first_name, - last_name, - email - - from source - -) - -select * from renamed diff --git a/test/integration/068_partial_parsing_tests/test-files/customers1.md b/test/integration/068_partial_parsing_tests/test-files/customers1.md deleted file mode 100644 index bba48335825..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/customers1.md +++ /dev/null @@ -1,5 +0,0 @@ -{% docs customer_table %} - -This table contains customer data - -{% enddocs %} diff --git a/test/integration/068_partial_parsing_tests/test-files/customers2.md b/test/integration/068_partial_parsing_tests/test-files/customers2.md deleted file mode 100644 index f8306f34e49..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/customers2.md +++ /dev/null @@ -1,5 +0,0 @@ -{% docs customer_table %} - -LOTS of customer data - -{% enddocs %} diff --git a/test/integration/068_partial_parsing_tests/test-files/empty_schema.yml b/test/integration/068_partial_parsing_tests/test-files/empty_schema.yml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml b/test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml deleted file mode 100644 index 22817d2a9c7..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/empty_schema_with_version.yml +++ /dev/null @@ -1 +0,0 @@ -version: 2 diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml b/test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml deleted file mode 100644 index 2b5809b1cb9..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var-sources.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 2 -sources: - - name: seed_sources - schema: "{{ target.schema }}" - database: "{{ env_var('ENV_VAR_DATABASE') }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ env_var('ENV_VAR_SEVERITY') }}" - - unique - - name: first_name - - name: last_name - - name: email - - diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql b/test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql deleted file mode 100644 index 0bf3eda6c07..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_macro.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% macro do_something(foo2, bar2) %} - - select - '{{ foo2 }}' as foo2, - '{{ bar2 }}' as bar2 - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml deleted file mode 100644 index 8888f65237d..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_macros.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: 2 -macros: - - name: do_something - description: "This is a test macro" - meta: - some_key: "{{ env_var('ENV_VAR_SOME_KEY') }}" - diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml deleted file mode 100644 index b8112fea010..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_metrics.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: 2 - -metrics: - - - model: "ref('people')" - name: number_of_people - description: Total count of people - label: "Number of people" - calculation_method: count - expression: "*" - timestamp: created_at - time_grains: [day, week, month] - dimensions: - - favorite_color - - loves_dbt - meta: - my_meta: '{{ env_var("ENV_VAR_METRICS") }}' - - - model: "ref('people')" - name: collective_tenure - description: Total number of years of team experience - label: "Collective tenure" - calculation_method: sum - expression: tenure - timestamp: created_at - time_grains: [day] - filters: - - field: loves_dbt - operator: is - value: 'true' diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_model.sql b/test/integration/068_partial_parsing_tests/test-files/env_var_model.sql deleted file mode 100644 index a926d16d9d8..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_model.sql +++ /dev/null @@ -1 +0,0 @@ -select '{{ env_var('ENV_VAR_TEST') }}' as vartest diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql b/test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql deleted file mode 100644 index e1875231d2e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_model_one.sql +++ /dev/null @@ -1 +0,0 @@ -select 'blue' as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml deleted file mode 100644 index 147b96de1b6..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_model_test.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 -models: - - name: model_color - columns: - - name: fun - tests: - - unique: - enabled: "{{ env_var('ENV_VAR_ENABLED', True) }}" diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml deleted file mode 100644 index f8cf1ed9d67..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_schema.yml +++ /dev/null @@ -1,6 +0,0 @@ -version: 2 - -models: - - name: model_one - config: - materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml deleted file mode 100644 index b1f3f079f6a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_schema2.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 - -models: - - name: model_one - config: - materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" - tests: - - check_color: - column_name: fun - color: "env_var('ENV_VAR_COLOR')" - diff --git a/test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml b/test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml deleted file mode 100644 index 3b0409637db..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/env_var_schema3.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: 2 - -models: - - name: model_one - config: - materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" - tests: - - check_color: - column_name: fun - color: "env_var('ENV_VAR_COLOR')" - -exposures: - - name: proxy_for_dashboard - description: "This is for the XXX dashboard" - type: "dashboard" - owner: - name: "{{ env_var('ENV_VAR_OWNER') }}" - email: "tester@dashboard.com" - depends_on: - - ref("model_color") - - source("seed_sources", "raw_customers") diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_schema.yml b/test/integration/068_partial_parsing_tests/test-files/generic_schema.yml deleted file mode 100644 index 9a44074728a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/generic_schema.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 2 - -models: - - name: orders - description: "Some order data" - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_test.sql b/test/integration/068_partial_parsing_tests/test-files/generic_test.sql deleted file mode 100644 index ca09beaadb7..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/generic_test.sql +++ /dev/null @@ -1,26 +0,0 @@ -{% test is_odd(model, column_name) %} - -with validation as ( - - select - {{ column_name }} as odd_field - - from {{ model }} - -), - -validation_errors as ( - - select - odd_field - - from validation - -- if this is true, then odd_field is actually even! - where (odd_field % 2) = 0 - -) - -select * -from validation_errors - -{% endtest %} \ No newline at end of file diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql b/test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql deleted file mode 100644 index 5a3b611ff7a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/generic_test_edited.sql +++ /dev/null @@ -1,26 +0,0 @@ -{% test is_odd(model, column_name) %} - -with validation as ( - - select - {{ column_name }} as odd_field2 - - from {{ model }} - -), - -validation_errors as ( - - select - odd_field2 - - from validation - -- if this is true, then odd_field is actually even! - where (odd_field2 % 2) = 0 - -) - -select * -from validation_errors - -{% endtest %} \ No newline at end of file diff --git a/test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml b/test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml deleted file mode 100644 index c8307bc1021..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/generic_test_schema.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 - -models: - - name: orders - description: "Some order data" - columns: - - name: id - tests: - - unique - - is_odd diff --git a/test/integration/068_partial_parsing_tests/test-files/gsm_override.sql b/test/integration/068_partial_parsing_tests/test-files/gsm_override.sql deleted file mode 100644 index 46c7a39ddaa..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/gsm_override.sql +++ /dev/null @@ -1,6 +0,0 @@ -- custom macro -{% macro generate_schema_name(schema_name, node) %} - - {{ schema_name }}_{{ target.schema }} - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql b/test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql deleted file mode 100644 index 1bfddb9dadb..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/gsm_override2.sql +++ /dev/null @@ -1,6 +0,0 @@ -- custom macro xxxx -{% macro generate_schema_name(schema_name, node) %} - - {{ schema_name }}_{{ target.schema }} - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/macros-schema.yml b/test/integration/068_partial_parsing_tests/test-files/macros-schema.yml deleted file mode 100644 index cf221dec670..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/macros-schema.yml +++ /dev/null @@ -1,8 +0,0 @@ - -version: 2 - -models: - - name: model_a - tests: - - type_one - - type_two diff --git a/test/integration/068_partial_parsing_tests/test-files/macros.yml b/test/integration/068_partial_parsing_tests/test-files/macros.yml deleted file mode 100644 index 9ee72fad0ea..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/macros.yml +++ /dev/null @@ -1,4 +0,0 @@ -version: 2 -macros: - - name: do_something - description: "This is a test macro" diff --git a/test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql b/test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql deleted file mode 100644 index 010a0c29a02..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/metric_model_a.sql +++ /dev/null @@ -1,21 +0,0 @@ -{% - set metric_list = [ - metric('number_of_people'), - metric('collective_tenure') - ] -%} - -{% if not execute %} - - {% set metric_names = [] %} - {% for m in metric_list %} - {% do metric_names.append(m.metric_name) %} - {% endfor %} - - -- this config does nothing, but it lets us check these values - {{ config(metric_names = metric_names) }} - -{% endif %} - - -select 1 as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_a.sql b/test/integration/068_partial_parsing_tests/test-files/model_a.sql deleted file mode 100644 index 3bd54a4c1b6..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_a.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_b.sql b/test/integration/068_partial_parsing_tests/test-files/model_b.sql deleted file mode 100644 index 01f38b0698e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_b.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as notfun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_color.sql b/test/integration/068_partial_parsing_tests/test-files/model_color.sql deleted file mode 100644 index e1875231d2e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_color.sql +++ /dev/null @@ -1 +0,0 @@ -select 'blue' as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_four1.sql b/test/integration/068_partial_parsing_tests/test-files/model_four1.sql deleted file mode 100644 index 97c5b226d8c..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_four1.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('model_three') }} diff --git a/test/integration/068_partial_parsing_tests/test-files/model_four2.sql b/test/integration/068_partial_parsing_tests/test-files/model_four2.sql deleted file mode 100644 index c38a4c9194f..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_four2.sql +++ /dev/null @@ -1 +0,0 @@ -select fun from {{ ref('model_one') }} diff --git a/test/integration/068_partial_parsing_tests/test-files/model_one.sql b/test/integration/068_partial_parsing_tests/test-files/model_one.sql deleted file mode 100644 index 3bd54a4c1b6..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_one.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as fun diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three.sql b/test/integration/068_partial_parsing_tests/test-files/model_three.sql deleted file mode 100644 index 45aa2b750f7..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_three.sql +++ /dev/null @@ -1,12 +0,0 @@ -{{ config(materialized='table') }} - -with source_data as ( - - select 1 as id - union all - select null as id - -) - -select * -from source_data diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql b/test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql deleted file mode 100644 index a338a2ef4d2..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled.sql +++ /dev/null @@ -1,12 +0,0 @@ -{{ config(materialized='table', enabled=False) }} - -with source_data as ( - - select 1 as id - union all - select null as id - -) - -select * -from source_data diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql b/test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql deleted file mode 100644 index 4d416ab516e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_three_disabled2.sql +++ /dev/null @@ -1,13 +0,0 @@ -- Disabled model -{{ config(materialized='table', enabled=False) }} - -with source_data as ( - - select 1 as id - union all - select null as id - -) - -select * -from source_data diff --git a/test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql b/test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql deleted file mode 100644 index e2d2df486c5..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_three_modified.sql +++ /dev/null @@ -1,14 +0,0 @@ -{{ config(materialized='table') }} - -with source_data as ( - - {#- This is model three #} - - select 1 as id - union all - select null as id - -) - -select * -from source_data diff --git a/test/integration/068_partial_parsing_tests/test-files/model_two.sql b/test/integration/068_partial_parsing_tests/test-files/model_two.sql deleted file mode 100644 index 01f38b0698e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/model_two.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as notfun diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema1.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema1.yml deleted file mode 100644 index 36e5ce68a6e..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema1.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema2.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema2.yml deleted file mode 100644 index 7c9a890a481..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema2.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml deleted file mode 100644 index c9369126ffc..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema2b.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - columns: - - name: id - tests: - - not_null diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema3.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema3.yml deleted file mode 100644 index 11e4468d248..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema3.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - tests: - - unique -macros: - - name: do_something - description: "This is a test macro" diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema4.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema4.yml deleted file mode 100644 index 8087615fe49..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema4.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - config: - enabled: false - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml b/test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml deleted file mode 100644 index e73ffcef1de..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/models-schema4b.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: 2 - -models: - - name: model_one - description: "The first model" - - name: model_three - description: "The third model" - config: - enabled: true - columns: - - name: id - tests: - - unique diff --git a/test/integration/068_partial_parsing_tests/test-files/my_analysis.sql b/test/integration/068_partial_parsing_tests/test-files/my_analysis.sql deleted file mode 100644 index ec6959e9a68..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_analysis.sql +++ /dev/null @@ -1 +0,0 @@ -select * from customers diff --git a/test/integration/068_partial_parsing_tests/test-files/my_macro.sql b/test/integration/068_partial_parsing_tests/test-files/my_macro.sql deleted file mode 100644 index 0bf3eda6c07..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_macro.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% macro do_something(foo2, bar2) %} - - select - '{{ foo2 }}' as foo2, - '{{ bar2 }}' as bar2 - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/my_macro2.sql b/test/integration/068_partial_parsing_tests/test-files/my_macro2.sql deleted file mode 100644 index e64aafa5ab5..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_macro2.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% macro do_something(foo2, bar2) %} - - select - 'foo' as foo2, - 'var' as bar2 - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/my_metric.yml b/test/integration/068_partial_parsing_tests/test-files/my_metric.yml deleted file mode 100644 index 521bc92290f..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_metric.yml +++ /dev/null @@ -1,23 +0,0 @@ -version: 2 -metrics: - - name: new_customers - label: New Customers - model: customers - description: "The number of paid customers who are using the product" - calculation_method: count - expression: user_id - timestamp: signup_date - time_grains: [day, week, month] - dimensions: - - plan - - country - filters: - - field: is_paying - value: True - operator: '=' - +meta: - is_okr: True - tags: - - okrs - - diff --git a/test/integration/068_partial_parsing_tests/test-files/my_test.sql b/test/integration/068_partial_parsing_tests/test-files/my_test.sql deleted file mode 100644 index fbfb738bc9a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/my_test.sql +++ /dev/null @@ -1,2 +0,0 @@ -select - * from {{ ref('customers') }} where first_name = '{{ macro_something() }}' diff --git a/test/integration/068_partial_parsing_tests/test-files/orders.sql b/test/integration/068_partial_parsing_tests/test-files/orders.sql deleted file mode 100644 index ef61d616cc1..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/orders.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as id, 101 as user_id, 'pending' as status diff --git a/test/integration/068_partial_parsing_tests/test-files/people.sql b/test/integration/068_partial_parsing_tests/test-files/people.sql deleted file mode 100644 index ce58d41a599..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/people.sql +++ /dev/null @@ -1,3 +0,0 @@ -select 1 as id, 'Drew' as first_name, 'Banin' as last_name, 'yellow' as favorite_color, true as loves_dbt, 5 as tenure, current_timestamp as created_at -union all -select 1 as id, 'Jeremy' as first_name, 'Cohen' as last_name, 'indigo' as favorite_color, true as loves_dbt, 4 as tenure, current_timestamp as created_at diff --git a/test/integration/068_partial_parsing_tests/test-files/people_metrics.yml b/test/integration/068_partial_parsing_tests/test-files/people_metrics.yml deleted file mode 100644 index 99d31a4e632..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/people_metrics.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: 2 - -metrics: - - - model: "ref('people')" - name: number_of_people - description: Total count of people - label: "Number of people" - calculation_method: count - expression: "*" - timestamp: created_at - time_grains: [day, week, month] - dimensions: - - favorite_color - - loves_dbt - meta: - my_meta: 'testing' - - - model: "ref('people')" - name: collective_tenure - description: Total number of years of team experience - label: "Collective tenure" - calculation_method: sum - expression: tenure - timestamp: created_at - time_grains: [day] - filters: - - field: loves_dbt - operator: is - value: 'true' diff --git a/test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml b/test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml deleted file mode 100644 index 5f826e66e85..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/people_metrics2.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: 2 - -metrics: - - - model: "ref('people')" - name: number_of_people - description: Total count of people - label: "Number of people" - calculation_method: count - expression: "*" - timestamp: created_at - time_grains: [day, week, month] - dimensions: - - favorite_color - - loves_dbt - meta: - my_meta: 'replaced' - - - model: "ref('people')" - name: collective_tenure - description: Total number of years of team experience - label: "Collective tenure" - calculation_method: sum - expression: tenure - timestamp: created_at - time_grains: [day] - filters: - - field: loves_dbt - operator: is - value: 'true' diff --git a/test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml b/test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml deleted file mode 100644 index b9c640591fc..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/people_metrics3.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 - -metrics: - - - model: "ref('people')" - name: number_of_people - description: Total count of people - label: "Number of people" - calculation_method: count - expression: "*" - timestamp: created_at - time_grains: [day, week, month] - dimensions: - - favorite_color - - loves_dbt - meta: - my_meta: 'replaced' diff --git a/test/integration/068_partial_parsing_tests/test-files/raw_customers.csv b/test/integration/068_partial_parsing_tests/test-files/raw_customers.csv deleted file mode 100644 index 2315be73844..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/raw_customers.csv +++ /dev/null @@ -1,11 +0,0 @@ -id,first_name,last_name,email -1,Michael,Perez,mperez0@chronoengine.com -2,Shawn,Mccoy,smccoy1@reddit.com -3,Kathleen,Payne,kpayne2@cargocollective.com -4,Jimmy,Cooper,jcooper3@cargocollective.com -5,Katherine,Rice,krice4@typepad.com -6,Sarah,Ryan,sryan5@gnu.org -7,Martin,Mcdonald,mmcdonald6@opera.com -8,Frank,Robinson,frobinson7@wunderground.com -9,Jennifer,Franklin,jfranklin8@mail.ru -10,Henry,Welch,hwelch9@list-manage.com diff --git a/test/integration/068_partial_parsing_tests/test-files/ref_override.sql b/test/integration/068_partial_parsing_tests/test-files/ref_override.sql deleted file mode 100644 index cd16793d3c4..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/ref_override.sql +++ /dev/null @@ -1,4 +0,0 @@ -- Macro to override ref -{% macro ref(modelname) %} -{% do return(builtins.ref(modelname)) %} -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/ref_override2.sql b/test/integration/068_partial_parsing_tests/test-files/ref_override2.sql deleted file mode 100644 index 2e8027d8e80..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/ref_override2.sql +++ /dev/null @@ -1,4 +0,0 @@ -- Macro to override ref xxxx -{% macro ref(modelname) %} -{% do return(builtins.ref(modelname)) %} -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml b/test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml deleted file mode 100644 index 432b5e0efe3..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-models-c.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: 2 - -sources: - - name: seed_source - description: "This is a source override" - overrides: local_dep - schema: "{{ var('schema_override', target.schema) }}" - tables: - - name: "seed" - columns: - - name: id - tests: - - unique - - not_null diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml deleted file mode 100644 index 30363115e09..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources1.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - name: first_name - - name: last_name - - name: email - - diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml deleted file mode 100644 index 5927952917f..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources2.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: 2 - -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - name: first_name - - name: last_name - - name: email - -exposures: - - name: proxy_for_dashboard - description: "This is for the XXX dashboard" - type: "dashboard" - owner: - name: "Dashboard Tester" - email: "tester@dashboard.com" - depends_on: - - ref("model_one") - - ref("raw_customers") - - source("seed_sources", "raw_customers") - diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml deleted file mode 100644 index 54133a9a2f5..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources3.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: 2 - -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - name: first_name - - name: last_name - - name: email - -exposures: - - name: proxy_for_dashboard - description: "This is for the XXX dashboard" - type: "dashboard" - owner: - name: "Dashboard Tester" - email: "tester@dashboard.com" - depends_on: - - ref("model_one") - - source("seed_sources", "raw_customers") - diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml deleted file mode 100644 index af76a0f315a..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources4.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: 2 - -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - every_value_is_blue - - name: first_name - - name: last_name - - name: email - -seeds: - - name: raw_customers - description: "Raw customer data" - columns: - - name: id - tests: - - unique - - not_null - - name: first_name - - name: last_name - - name: email - diff --git a/test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml b/test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml deleted file mode 100644 index 57818771b71..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/schema-sources5.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: 2 - -sources: - - name: seed_sources - schema: "{{ target.schema }}" - tables: - - name: raw_customers - columns: - - name: id - tests: - - not_null: - severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" - - unique - - name: first_name - - name: last_name - - name: email - -seeds: - - name: rad_customers - description: "Raw customer data" - columns: - - name: id - tests: - - unique - - not_null - - name: first_name - - name: last_name - - name: email - diff --git a/test/integration/068_partial_parsing_tests/test-files/snapshot.sql b/test/integration/068_partial_parsing_tests/test-files/snapshot.sql deleted file mode 100644 index c82a2fa5906..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/snapshot.sql +++ /dev/null @@ -1,29 +0,0 @@ -{% snapshot orders_snapshot %} - -{{ - config( - target_schema=schema, - strategy='check', - unique_key='id', - check_cols=['status'], - ) -}} - -select * from {{ ref('orders') }} - -{% endsnapshot %} - -{% snapshot orders2_snapshot %} - -{{ - config( - target_schema=schema, - strategy='check', - unique_key='id', - check_cols=['order_date'], - ) -}} - -select * from {{ ref('orders') }} - -{% endsnapshot %} diff --git a/test/integration/068_partial_parsing_tests/test-files/snapshot2.sql b/test/integration/068_partial_parsing_tests/test-files/snapshot2.sql deleted file mode 100644 index 27d320618c9..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/snapshot2.sql +++ /dev/null @@ -1,30 +0,0 @@ -- add a comment -{% snapshot orders_snapshot %} - -{{ - config( - target_schema=schema, - strategy='check', - unique_key='id', - check_cols=['status'], - ) -}} - -select * from {{ ref('orders') }} - -{% endsnapshot %} - -{% snapshot orders2_snapshot %} - -{{ - config( - target_schema=schema, - strategy='check', - unique_key='id', - check_cols=['order_date'], - ) -}} - -select * from {{ ref('orders') }} - -{% endsnapshot %} diff --git a/test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql b/test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql deleted file mode 100644 index dd8710f0556..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/sources-tests1.sql +++ /dev/null @@ -1,9 +0,0 @@ - -{% test every_value_is_blue(model, column_name) %} - - select * - from {{ model }} - where {{ column_name }} = 9999 - -{% endtest %} - diff --git a/test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql b/test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql deleted file mode 100644 index 3abcf30a658..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/sources-tests2.sql +++ /dev/null @@ -1,9 +0,0 @@ - -{% test every_value_is_blue(model, column_name) %} - - select * - from {{ model }} - where {{ column_name }} != 99 - -{% endtest %} - diff --git a/test/integration/068_partial_parsing_tests/test-files/test-macro.sql b/test/integration/068_partial_parsing_tests/test-files/test-macro.sql deleted file mode 100644 index f2b1ecfc86b..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/test-macro.sql +++ /dev/null @@ -1,5 +0,0 @@ -{% macro macro_something() %} - - {% do return('macro_something') %} - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/test-macro2.sql b/test/integration/068_partial_parsing_tests/test-files/test-macro2.sql deleted file mode 100644 index 52b4469cd01..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/test-macro2.sql +++ /dev/null @@ -1,5 +0,0 @@ -{% macro macro_something() %} - - {% do return('some_name') %} - -{% endmacro %} diff --git a/test/integration/068_partial_parsing_tests/test-files/test_color.sql b/test/integration/068_partial_parsing_tests/test-files/test_color.sql deleted file mode 100644 index 0bb1cdcd96c..00000000000 --- a/test/integration/068_partial_parsing_tests/test-files/test_color.sql +++ /dev/null @@ -1,7 +0,0 @@ -{% test check_color(model, column_name, color) %} - - select * - from {{ model }} - where {{ column_name }} = '{{ color }}' - -{% endtest %} diff --git a/test/integration/068_partial_parsing_tests/test_partial_parsing.py b/test/integration/068_partial_parsing_tests/test_partial_parsing.py deleted file mode 100644 index d411a738602..00000000000 --- a/test/integration/068_partial_parsing_tests/test_partial_parsing.py +++ /dev/null @@ -1,580 +0,0 @@ -from dbt.exceptions import CompilationError -from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.files import ParseFileType -from dbt.contracts.results import TestStatus -from dbt.parser.partial import special_override_macros -from test.integration.base import DBTIntegrationTest, use_profile, normalize, get_manifest -import shutil -import os - - -# Note: every test case needs to have separate directories, otherwise -# they will interfere with each other when tests are multi-threaded - -class BasePPTest(DBTIntegrationTest): - - @property - def schema(self): - return "test_068A" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds'], - 'test-paths': ['tests'], - 'macro-paths': ['macros'], - 'analysis-paths': ['analyses'], - 'snapshot-paths': ['snapshots'], - 'seeds': { - 'quote_columns': False, - }, - } - - def setup_directories(self): - # Create the directories for the test in the `self.test_root_dir` - # directory after everything else is symlinked. We can copy to and - # delete files in this directory without tests interfering with each other. - os.mkdir(os.path.join(self.test_root_dir, 'models')) - os.mkdir(os.path.join(self.test_root_dir, 'tests')) - os.mkdir(os.path.join(self.test_root_dir, 'tests', 'generic')) - os.mkdir(os.path.join(self.test_root_dir, 'seeds')) - os.mkdir(os.path.join(self.test_root_dir, 'macros')) - os.mkdir(os.path.join(self.test_root_dir, 'analyses')) - os.mkdir(os.path.join(self.test_root_dir, 'snapshots')) - os.environ['DBT_PP_TEST'] = 'true' - - - -class ModelTest(BasePPTest): - - @use_profile('postgres') - def test_postgres_pp_models(self): - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - # initial run - self.run_dbt(['clean']) - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - - # add a model file - self.copy_file('test-files/model_two.sql', 'models/model_two.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # add a schema file - self.copy_file('test-files/models-schema1.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - self.assertIn('model.test.model_one', manifest.nodes) - model_one_node = manifest.nodes['model.test.model_one'] - self.assertEqual(model_one_node.description, 'The first model') - self.assertEqual(model_one_node.patch_path, 'test://' + normalize('models/schema.yml')) - - # add a model and a schema file (with a test) at the same time - self.copy_file('test-files/models-schema2.yml', 'models/schema.yml') - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "test"], expect_pass=False) - self.assertEqual(len(results), 1) - manifest = get_manifest() - project_files = [f for f in manifest.files if f.startswith('test://')] - self.assertEqual(len(project_files), 4) - model_3_file_id = 'test://' + normalize('models/model_three.sql') - self.assertIn(model_3_file_id, manifest.files) - model_three_file = manifest.files[model_3_file_id] - self.assertEqual(model_three_file.parse_file_type, ParseFileType.Model) - self.assertEqual(type(model_three_file).__name__, 'SourceFile') - model_three_node = manifest.nodes[model_three_file.nodes[0]] - schema_file_id = 'test://' + normalize('models/schema.yml') - self.assertEqual(model_three_node.patch_path, schema_file_id) - self.assertEqual(model_three_node.description, 'The third model') - schema_file = manifest.files[schema_file_id] - self.assertEqual(type(schema_file).__name__, 'SchemaSourceFile') - self.assertEqual(len(schema_file.tests), 1) - tests = schema_file.get_all_test_ids() - self.assertEqual(tests, ['test.test.unique_model_three_id.6776ac8160']) - unique_test_id = tests[0] - self.assertIn(unique_test_id, manifest.nodes) - - # modify model sql file, ensure description still there - self.copy_file('test-files/model_three_modified.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - model_id = 'model.test.model_three' - self.assertIn(model_id, manifest.nodes) - model_three_node = manifest.nodes[model_id] - self.assertEqual(model_three_node.description, 'The third model') - - # Change the model 3 test from unique to not_null - self.copy_file('test-files/models-schema2b.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "test"], expect_pass=False) - manifest = get_manifest() - schema_file_id = 'test://' + normalize('models/schema.yml') - schema_file = manifest.files[schema_file_id] - tests = schema_file.get_all_test_ids() - self.assertEqual(tests, ['test.test.not_null_model_three_id.3162ce0a6f']) - not_null_test_id = tests[0] - self.assertIn(not_null_test_id, manifest.nodes.keys()) - self.assertNotIn(unique_test_id, manifest.nodes.keys()) - self.assertEqual(len(results), 1) - - # go back to previous version of schema file, removing patch, test, and model for model three - self.copy_file('test-files/models-schema1.yml', 'models/schema.yml') - self.rm_file(normalize('models/model_three.sql')) - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # remove schema file, still have 3 models - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - self.rm_file(normalize('models/schema.yml')) - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - schema_file_id = 'test://' + normalize('models/schema.yml') - self.assertNotIn(schema_file_id, manifest.files) - project_files = [f for f in manifest.files if f.startswith('test://')] - self.assertEqual(len(project_files), 3) - - # Put schema file back and remove a model - # referred to in schema file - self.copy_file('test-files/models-schema2.yml', 'models/schema.yml') - self.rm_file('models/model_three.sql') - with self.assertRaises(CompilationError): - results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) - - # Put model back again - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add model four refing model three - self.copy_file('test-files/model_four1.sql', 'models/model_four.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 4) - - # Remove model_three and change model_four to ref model_one - # and change schema file to remove model_three - self.rm_file('models/model_three.sql') - self.copy_file('test-files/model_four2.sql', 'models/model_four.sql') - self.copy_file('test-files/models-schema1.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Remove model four, put back model three, put back schema file - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - self.copy_file('test-files/models-schema2.yml', 'models/schema.yml') - self.rm_file('models/model_four.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # disable model three in the schema file - self.copy_file('test-files/models-schema4.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # update enabled config to be true for model three in the schema file - self.copy_file('test-files/models-schema4b.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # disable model three in the schema file again - self.copy_file('test-files/models-schema4.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # remove disabled config for model three in the schema file to check it gets enabled - self.copy_file('test-files/models-schema3.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add a macro - self.copy_file('test-files/my_macro.sql', 'macros/my_macro.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - macro_id = 'macro.test.do_something' - self.assertIn(macro_id, manifest.macros) - - # Modify the macro - self.copy_file('test-files/my_macro2.sql', 'macros/my_macro.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add a macro patch - self.copy_file('test-files/models-schema3.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Remove the macro - self.rm_file('macros/my_macro.sql') - with self.assertRaises(CompilationError): - results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) - - # put back macro file, got back to schema file with no macro - # add separate macro patch schema file - self.copy_file('test-files/models-schema2.yml', 'models/schema.yml') - self.copy_file('test-files/my_macro.sql', 'macros/my_macro.sql') - self.copy_file('test-files/macros.yml', 'macros/macros.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # delete macro and schema file - self.rm_file('macros/my_macro.sql') - self.rm_file('macros/macros.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add an empty schema file - self.copy_file('test-files/empty_schema.yml', 'models/eschema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Add version to empty schema file - self.copy_file('test-files/empty_schema_with_version.yml', 'models/eschema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - - # Disable model_three - self.copy_file('test-files/model_three_disabled.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - model_id = 'model.test.model_three' - self.assertIn(model_id, manifest.disabled) - self.assertNotIn(model_id, manifest.nodes) - - # Edit disabled model three - self.copy_file('test-files/model_three_disabled2.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - model_id = 'model.test.model_three' - self.assertIn(model_id, manifest.disabled) - self.assertNotIn(model_id, manifest.nodes) - - # Remove disabled from model three - self.copy_file('test-files/model_three.sql', 'models/model_three.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - model_id = 'model.test.model_three' - self.assertIn(model_id, manifest.nodes) - self.assertNotIn(model_id, manifest.disabled) - - -class TestSources(BasePPTest): - - @use_profile('postgres') - def test_postgres_pp_sources(self): - self.setup_directories() - # initial run - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - self.run_dbt(['clean']) - self.copy_file('test-files/raw_customers.csv', 'seeds/raw_customers.csv') - self.copy_file('test-files/sources-tests1.sql', 'macros/tests.sql') - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - - # Partial parse running 'seed' - self.run_dbt(['--partial-parse', 'seed']) - manifest = get_manifest() - seed_file_id = 'test://' + normalize('seeds/raw_customers.csv') - self.assertIn(seed_file_id, manifest.files) - - # Add another seed file - self.copy_file('test-files/raw_customers.csv', 'seeds/more_customers.csv') - self.run_dbt(['--partial-parse', 'run']) - seed_file_id = 'test://' + normalize('seeds/more_customers.csv') - manifest = get_manifest() - self.assertIn(seed_file_id, manifest.files) - seed_id = 'seed.test.more_customers' - self.assertIn(seed_id, manifest.nodes) - - # Remove seed file and add a schema files with a source referring to raw_customers - self.rm_file(normalize('seeds/more_customers.csv')) - self.copy_file('test-files/schema-sources1.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - self.assertEqual(len(manifest.sources), 1) - file_id = 'test://' + normalize('models/sources.yml') - self.assertIn(file_id, manifest.files) - - # add a model referring to raw_customers source - self.copy_file('test-files/customers.sql', 'models/customers.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - - # remove sources schema file - self.rm_file(normalize('models/sources.yml')) - with self.assertRaises(CompilationError): - results = self.run_dbt(["--partial-parse", "run"]) - - # put back sources and add an exposures file - self.copy_file('test-files/schema-sources2.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # remove seed referenced in exposures file - self.rm_file(normalize('seeds/raw_customers.csv')) - with self.assertRaises(CompilationError): - results = self.run_dbt(["--partial-parse", "run"]) - - # put back seed and remove depends_on from exposure - self.copy_file('test-files/raw_customers.csv', 'seeds/raw_customers.csv') - self.copy_file('test-files/schema-sources3.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # Add seed config with test to schema.yml, remove exposure - self.copy_file('test-files/schema-sources4.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # Change seed name to wrong name - self.copy_file('test-files/schema-sources5.yml', 'models/sources.yml') - with self.assertRaises(CompilationError): - results = self.run_dbt(["--partial-parse", "--warn-error", "run"]) - - # Put back seed name to right name - self.copy_file('test-files/schema-sources4.yml', 'models/sources.yml') - results = self.run_dbt(["--partial-parse", "run"]) - - # Add docs file customers.md - self.copy_file('test-files/customers1.md', 'models/customers.md') - results = self.run_dbt(["--partial-parse", "run"]) - - # Change docs file customers.md - self.copy_file('test-files/customers2.md', 'models/customers.md') - results = self.run_dbt(["--partial-parse", "run"]) - - # Delete docs file - self.rm_file(normalize('models/customers.md')) - results = self.run_dbt(["--partial-parse", "run"]) - - # Add a data test - self.copy_file('test-files/test-macro.sql', 'macros/test-macro.sql') - self.copy_file('test-files/my_test.sql', 'tests/my_test.sql') - results = self.run_dbt(["--partial-parse", "test"]) - manifest = get_manifest() - self.assertEqual(len(manifest.nodes), 9) - test_id = 'test.test.my_test' - self.assertIn(test_id, manifest.nodes) - - # Change macro that data test depends on - self.copy_file('test-files/test-macro2.sql', 'macros/test-macro.sql') - results = self.run_dbt(["--partial-parse", "test"]) - manifest = get_manifest() - - # Add an analysis - self.copy_file('test-files/my_analysis.sql', 'analyses/my_analysis.sql') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - - # Remove data test - self.rm_file(normalize('tests/my_test.sql')) - results = self.run_dbt(["--partial-parse", "test"]) - manifest = get_manifest() - self.assertEqual(len(manifest.nodes), 9) - - # Remove analysis - self.rm_file(normalize('analyses/my_analysis.sql')) - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - self.assertEqual(len(manifest.nodes), 8) - - # Change source test - self.copy_file('test-files/sources-tests2.sql', 'macros/tests.sql') - results = self.run_dbt(["--partial-parse", "run"]) - - -class TestPartialParsingDependency(BasePPTest): - - @property - def packages_config(self): - return { - "packages": [ - { - 'local': 'local_dependency' - } - ] - } - - @use_profile("postgres") - def test_postgres_parsing_with_dependency(self): - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - self.run_dbt(["clean"]) - self.run_dbt(["deps"]) - self.run_dbt(["seed"]) - self.run_dbt(["run"]) - - # Add a source override - self.copy_file('test-files/schema-models-c.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - self.assertEqual(len(manifest.sources), 1) - source_id = 'source.local_dep.seed_source.seed' - self.assertIn(source_id, manifest.sources) - # We have 1 root model, 1 local_dep model, 1 local_dep seed, 1 local_dep source test, 2 root source tests - self.assertEqual(len(manifest.nodes), 5) - test_id = 'test.local_dep.source_unique_seed_source_seed_id.afa94935ed' - test_node = manifest.nodes[test_id] - - - # Remove a source override - self.rm_file(normalize('models/schema.yml')) - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - self.assertEqual(len(manifest.sources), 1) - - -class TestMacros(BasePPTest): - - @use_profile('postgres') - def test_postgres_nested_macros(self): - self.setup_directories() - self.copy_file('test-files/model_a.sql', 'models/model_a.sql') - self.copy_file('test-files/model_b.sql', 'models/model_b.sql') - self.copy_file('test-files/macros-schema.yml', 'models/schema.yml') - self.copy_file('test-files/custom_schema_tests1.sql', 'macros/custom_schema_tests.sql') - results = self.run_dbt() - self.assertEqual(len(results), 2) - manifest = get_manifest() - macro_child_map = manifest.build_macro_child_map() - macro_unique_id = 'macro.test.test_type_two' - - results = self.run_dbt(['test'], expect_pass=False) - results = sorted(results, key=lambda r: r.node.name) - self.assertEqual(len(results), 2) - # type_one_model_a_ - self.assertEqual(results[0].status, TestStatus.Fail) - self.assertRegex(results[0].node.compiled_code, r'union all') - # type_two_model_a_ - self.assertEqual(results[1].status, TestStatus.Warn) - self.assertEqual(results[1].node.config.severity, 'WARN') - - self.copy_file('test-files/custom_schema_tests2.sql', 'macros/custom_schema_tests.sql') - results = self.run_dbt(["--partial-parse", "test"], expect_pass=False) - manifest = get_manifest() - test_node_id = 'test.test.type_two_model_a_.842bc6c2a7' - self.assertIn(test_node_id, manifest.nodes) - results = sorted(results, key=lambda r: r.node.name) - self.assertEqual(len(results), 2) - # type_two_model_a_ - self.assertEqual(results[1].status, TestStatus.Fail) - self.assertEqual(results[1].node.config.severity, 'ERROR') - - @use_profile('postgres') - def test_postgres_skip_macros(self): - expected_special_override_macros = [ - 'ref', 'source', 'config', 'generate_schema_name', - 'generate_database_name', 'generate_alias_name' - ] - self.assertEqual(special_override_macros, expected_special_override_macros) - - # initial run so we have a msgpack file - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - # use empty_schema file for bug #4850 - self.copy_file('test-files/empty_schema.yml', 'models/eschema.yml') - results = self.run_dbt() - - # add a new ref override macro - self.copy_file('test-files/ref_override.sql', 'macros/ref_override.sql') - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - # modify a ref override macro - self.copy_file('test-files/ref_override2.sql', 'macros/ref_override.sql') - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - # remove a ref override macro - self.rm_file(normalize('macros/ref_override.sql')) - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - # custom generate_schema_name macro - self.copy_file('test-files/gsm_override.sql', 'macros/gsm_override.sql') - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - # change generate_schema_name macro - self.copy_file('test-files/gsm_override2.sql', 'macros/gsm_override.sql') - results, log_output = self.run_dbt_and_capture(['--partial-parse', 'run']) - self.assertTrue('Starting full parse.' in log_output) - - -class TestSnapshots(BasePPTest): - - @use_profile('postgres') - def test_postgres_pp_snapshots(self): - - # initial run - self.setup_directories() - self.copy_file('test-files/orders.sql', 'models/orders.sql') - results = self.run_dbt() - self.assertEqual(len(results), 1) - - # add snapshot - self.copy_file('test-files/snapshot.sql', 'snapshots/snapshot.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - snapshot_id = 'snapshot.test.orders_snapshot' - self.assertIn(snapshot_id, manifest.nodes) - snapshot2_id = 'snapshot.test.orders2_snapshot' - self.assertIn(snapshot2_id, manifest.nodes) - - # run snapshot - results = self.run_dbt(["--partial-parse", "snapshot"]) - self.assertEqual(len(results), 2) - - # modify snapshot - self.copy_file('test-files/snapshot2.sql', 'snapshots/snapshot.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - - # delete snapshot - self.rm_file(normalize('snapshots/snapshot.sql')) - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - - -class TestTests(BasePPTest): - - @use_profile('postgres') - def test_postgres_pp_generic_tests(self): - - # initial run - self.setup_directories() - self.copy_file('test-files/orders.sql', 'models/orders.sql') - self.copy_file('test-files/generic_schema.yml', 'models/schema.yml') - results = self.run_dbt() - self.assertEqual(len(results), 1) - manifest = get_manifest() - expected_nodes = ['model.test.orders', 'test.test.unique_orders_id.1360ecc70e'] - self.assertCountEqual(expected_nodes, list(manifest.nodes.keys())) - - # add generic test in test-path - self.copy_file('test-files/generic_test.sql', 'tests/generic/generic_test.sql') - self.copy_file('test-files/generic_test_schema.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - test_id = 'test.test.is_odd_orders_id.82834fdc5b' - self.assertIn(test_id, manifest.nodes) - expected_nodes = ['model.test.orders', 'test.test.unique_orders_id.1360ecc70e', 'test.test.is_odd_orders_id.82834fdc5b'] - self.assertCountEqual(expected_nodes, list(manifest.nodes.keys())) - - # edit generic test in test-path - self.copy_file('test-files/generic_test_edited.sql', 'tests/generic/generic_test.sql') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - test_id = 'test.test.is_odd_orders_id.82834fdc5b' - self.assertIn(test_id, manifest.nodes) - expected_nodes = ['model.test.orders', 'test.test.unique_orders_id.1360ecc70e', 'test.test.is_odd_orders_id.82834fdc5b'] - self.assertCountEqual(expected_nodes, list(manifest.nodes.keys())) diff --git a/test/integration/068_partial_parsing_tests/test_pp_metrics.py b/test/integration/068_partial_parsing_tests/test_pp_metrics.py deleted file mode 100644 index 5debe6d2b85..00000000000 --- a/test/integration/068_partial_parsing_tests/test_pp_metrics.py +++ /dev/null @@ -1,106 +0,0 @@ -from dbt.exceptions import CompilationError -from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.files import ParseFileType -from dbt.contracts.results import TestStatus -from dbt.parser.partial import special_override_macros -from test.integration.base import DBTIntegrationTest, use_profile, normalize, get_manifest -import shutil -import os - - -# Note: every test case needs to have separate directories, otherwise -# they will interfere with each other when tests are multi-threaded - -class BasePPTest(DBTIntegrationTest): - - @property - def schema(self): - return "test_068A" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'data-paths': ['seeds'], - 'test-paths': ['tests'], - 'macro-paths': ['macros'], - 'analysis-paths': ['analyses'], - 'snapshot-paths': ['snapshots'], - 'seeds': { - 'quote_columns': False, - }, - } - - def setup_directories(self): - # Create the directories for the test in the `self.test_root_dir` - # directory after everything else is symlinked. We can copy to and - # delete files in this directory without tests interfering with each other. - os.mkdir(os.path.join(self.test_root_dir, 'models')) - os.mkdir(os.path.join(self.test_root_dir, 'tests')) - os.mkdir(os.path.join(self.test_root_dir, 'seeds')) - os.mkdir(os.path.join(self.test_root_dir, 'macros')) - os.mkdir(os.path.join(self.test_root_dir, 'analyses')) - os.mkdir(os.path.join(self.test_root_dir, 'snapshots')) - os.environ['DBT_PP_TEST'] = 'true' - - - -class MetricsTest(BasePPTest): - - @use_profile('postgres') - def test_postgres_metrics(self): - self.setup_directories() - # initial run - self.copy_file('test-files/people.sql', 'models/people.sql') - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - self.assertEqual(len(manifest.nodes), 1) - - # Add metrics yaml file - self.copy_file('test-files/people_metrics.yml', 'models/people_metrics.yml') - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - self.assertEqual(len(manifest.metrics), 2) - metric_people_id = 'metric.test.number_of_people' - metric_tenure_id = 'metric.test.collective_tenure' - metric_people = manifest.metrics[metric_people_id] - metric_tenure = manifest.metrics[metric_tenure_id] - expected_meta = {'my_meta': 'testing'} - self.assertEqual(metric_people.meta, expected_meta) - self.assertEqual(metric_people.refs, [['people']]) - self.assertEqual(metric_tenure.refs, [['people']]) - expected_depends_on_nodes = ['model.test.people'] - self.assertEqual(metric_people.depends_on.nodes, expected_depends_on_nodes) - - # Change metrics yaml files - self.copy_file('test-files/people_metrics2.yml', 'models/people_metrics.yml') - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - metric_people = manifest.metrics[metric_people_id] - expected_meta = {'my_meta': 'replaced'} - self.assertEqual(metric_people.meta, expected_meta) - expected_depends_on_nodes = ['model.test.people'] - self.assertEqual(metric_people.depends_on.nodes, expected_depends_on_nodes) - - # Add model referring to metric - self.copy_file('test-files/metric_model_a.sql', 'models/metric_model_a.sql') - results = self.run_dbt(["run"]) - manifest = get_manifest() - model_a = manifest.nodes['model.test.metric_model_a'] - expected_depends_on_nodes = ['metric.test.number_of_people', 'metric.test.collective_tenure'] - self.assertEqual(model_a.depends_on.nodes, expected_depends_on_nodes) - - # Then delete a metric - self.copy_file('test-files/people_metrics3.yml', 'models/people_metrics.yml') - with self.assertRaises(CompilationError): - # We use "parse" here and not "run" because we're checking that the CompilationError - # occurs at parse time, not compilation - results = self.run_dbt(["parse"]) - diff --git a/test/integration/068_partial_parsing_tests/test_pp_vars.py b/test/integration/068_partial_parsing_tests/test_pp_vars.py deleted file mode 100644 index a73bfc43fa3..00000000000 --- a/test/integration/068_partial_parsing_tests/test_pp_vars.py +++ /dev/null @@ -1,416 +0,0 @@ -from dbt.exceptions import ParsingError -from dbt.constants import SECRET_ENV_PREFIX -from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.files import ParseFileType -from dbt.contracts.results import TestStatus -from dbt.parser.partial import special_override_macros -from test.integration.base import DBTIntegrationTest, use_profile, normalize, get_manifest -import shutil -import os - - -# Note: every test case needs to have separate directories, otherwise -# they will interfere with each other when tests are multi-threaded - -class BasePPTest(DBTIntegrationTest): - - @property - def schema(self): - return "test_068A" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'seed-paths': ['seeds'], - 'test-paths': ['tests'], - 'macro-paths': ['macros'], - 'seeds': { - 'quote_columns': False, - }, - } - - def setup_directories(self): - # Create the directories for the test in the `self.test_root_dir` - # directory after everything else is symlinked. We can copy to and - # delete files in this directory without tests interfering with each other. - os.mkdir(os.path.join(self.test_root_dir, 'models')) - os.mkdir(os.path.join(self.test_root_dir, 'tests')) - os.mkdir(os.path.join(self.test_root_dir, 'macros')) - os.mkdir(os.path.join(self.test_root_dir, 'seeds')) - os.environ['DBT_PP_TEST'] = 'true' - - -class EnvVarTest(BasePPTest): - - @use_profile('postgres') - def test_postgres_env_vars_models(self): - self.setup_directories() - self.copy_file('test-files/model_color.sql', 'models/model_color.sql') - # initial run - self.run_dbt(['clean']) - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - - # copy a file with an env_var call without an env_var - self.copy_file('test-files/env_var_model.sql', 'models/env_var_model.sql') - with self.assertRaises(ParsingError): - results = self.run_dbt(["--partial-parse", "run"]) - - # set the env var - os.environ['ENV_VAR_TEST'] = 'TestingEnvVars' - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "TestingEnvVars"} - self.assertEqual(expected_env_vars, manifest.env_vars) - model_id = 'model.test.env_var_model' - model = manifest.nodes[model_id] - model_created_at = model.created_at - - # change the env var - os.environ['ENV_VAR_TEST'] = 'second' - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 2) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second"} - self.assertEqual(expected_env_vars, manifest.env_vars) - self.assertNotEqual(model_created_at, manifest.nodes[model_id].created_at) - - # set an env_var in a schema file - self.copy_file('test-files/env_var_schema.yml', 'models/schema.yml') - self.copy_file('test-files/env_var_model_one.sql', 'models/model_one.sql') - with self.assertRaises(ParsingError): - results = self.run_dbt(["--partial-parse", "run"]) - - # actually set the env_var - os.environ['TEST_SCHEMA_VAR'] = 'view' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view"} - self.assertEqual(expected_env_vars, manifest.env_vars) - - # env vars in a source - os.environ['ENV_VAR_DATABASE'] = 'dbt' - os.environ['ENV_VAR_SEVERITY'] = 'warn' - self.copy_file('test-files/raw_customers.csv', 'seeds/raw_customers.csv') - self.copy_file('test-files/env_var-sources.yml', 'models/sources.yml') - self.run_dbt(['--partial-parse', 'seed']) - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view", "ENV_VAR_DATABASE": "dbt", "ENV_VAR_SEVERITY": "warn"} - self.assertEqual(expected_env_vars, manifest.env_vars) - self.assertEqual(len(manifest.sources), 1) - source_id = 'source.test.seed_sources.raw_customers' - source = manifest.sources[source_id] - self.assertEqual(source.database, 'dbt') - schema_file = manifest.files[source.file_id] - test_id = 'test.test.source_not_null_seed_sources_raw_customers_id.e39ee7bf0d' - test_node = manifest.nodes[test_id] - self.assertEqual(test_node.config.severity, 'WARN') - - # Change severity env var - os.environ['ENV_VAR_SEVERITY'] = 'error' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view", "ENV_VAR_DATABASE": "dbt", "ENV_VAR_SEVERITY": "error"} - self.assertEqual(expected_env_vars, manifest.env_vars) - source_id = 'source.test.seed_sources.raw_customers' - source = manifest.sources[source_id] - schema_file = manifest.files[source.file_id] - expected_schema_file_env_vars = {'sources': {'seed_sources': ['ENV_VAR_DATABASE', 'ENV_VAR_SEVERITY']}} - self.assertEqual(expected_schema_file_env_vars, schema_file.env_vars) - test_node = manifest.nodes[test_id] - self.assertEqual(test_node.config.severity, 'ERROR') - - # Change database env var - os.environ['ENV_VAR_DATABASE'] = 'test_dbt' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view", "ENV_VAR_DATABASE": "test_dbt", "ENV_VAR_SEVERITY": "error"} - self.assertEqual(expected_env_vars, manifest.env_vars) - source = manifest.sources[source_id] - self.assertEqual(source.database, 'test_dbt') - - # Delete database env var - del os.environ['ENV_VAR_DATABASE'] - with self.assertRaises(ParsingError): - results = self.run_dbt(["--partial-parse", "run"]) - os.environ['ENV_VAR_DATABASE'] = 'test_dbt' - - # Add generic test with test kwarg that's rendered late (no curly brackets) - os.environ['ENV_VAR_DATABASE'] = 'dbt' - self.copy_file('test-files/test_color.sql', 'macros/test_color.sql') - results = self.run_dbt(["--partial-parse", "run"]) - # Add source test using test_color and an env_var for color - self.copy_file('test-files/env_var_schema2.yml', 'models/schema.yml') - with self.assertRaises(ParsingError): - results = self.run_dbt(["--partial-parse", "run"]) - os.environ['ENV_VAR_COLOR'] = 'green' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - test_color_id = 'test.test.check_color_model_one_env_var_ENV_VAR_COLOR___fun.89638de387' - test_node = manifest.nodes[test_color_id] - # kwarg was rendered but not changed (it will be rendered again when compiled) - self.assertEqual(test_node.test_metadata.kwargs['color'], "env_var('ENV_VAR_COLOR')") - results = self.run_dbt(["--partial-parse", "test"]) - - # Add an exposure with an env_var - os.environ['ENV_VAR_OWNER'] = "John Doe" - self.copy_file('test-files/env_var_schema3.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = { - "ENV_VAR_TEST": "second", - "TEST_SCHEMA_VAR": "view", - "ENV_VAR_DATABASE": "dbt", - "ENV_VAR_SEVERITY": "error", - "ENV_VAR_COLOR": 'green', - "ENV_VAR_OWNER": "John Doe", - } - self.assertEqual(expected_env_vars, manifest.env_vars) - exposure = list(manifest.exposures.values())[0] - schema_file = manifest.files[exposure.file_id] - expected_sf_env_vars = { - 'models': { - 'model_one': ['TEST_SCHEMA_VAR', 'ENV_VAR_COLOR'] - }, - 'exposures': { - 'proxy_for_dashboard': ['ENV_VAR_OWNER'] - } - } - self.assertEqual(expected_sf_env_vars, schema_file.env_vars) - - # add a macro and a macro schema file - os.environ['ENV_VAR_SOME_KEY'] = 'toodles' - self.copy_file('test-files/env_var_macro.sql', 'macros/env_var_macro.sql') - self.copy_file('test-files/env_var_macros.yml', 'macros/env_var_macros.yml') - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - expected_env_vars = { - "ENV_VAR_TEST": "second", - "TEST_SCHEMA_VAR": "view", - "ENV_VAR_DATABASE": "dbt", - "ENV_VAR_SEVERITY": "error", - "ENV_VAR_COLOR": 'green', - "ENV_VAR_OWNER": "John Doe", - "ENV_VAR_SOME_KEY": "toodles", - } - self.assertEqual(expected_env_vars, manifest.env_vars) - macro_id = 'macro.test.do_something' - macro = manifest.macros[macro_id] - self.assertEqual(macro.meta, {"some_key": "toodles"}) - # change the env var - os.environ['ENV_VAR_SOME_KEY'] = 'dumdedum' - results = self.run_dbt(["--partial-parse", "run"]) - manifest = get_manifest() - macro = manifest.macros[macro_id] - self.assertEqual(macro.meta, {"some_key": "dumdedum"}) - - # Add a schema file with a test on model_color and env_var in test enabled config - self.copy_file('test-files/env_var_model_test.yml', 'models/schema.yml') - results = self.run_dbt(["--partial-parse", "run"]) - self.assertEqual(len(results), 3) - manifest = get_manifest() - model_color = manifest.nodes['model.test.model_color'] - schema_file = manifest.files[model_color.patch_path] - expected_env_vars = {'models': {'model_one': ['TEST_SCHEMA_VAR', 'ENV_VAR_COLOR'], 'model_color': ['ENV_VAR_ENABLED']}, 'exposures': {'proxy_for_dashboard': ['ENV_VAR_OWNER']}} - self.assertEqual(expected_env_vars, schema_file.env_vars) - - # Add a metrics file with env_vars - os.environ['ENV_VAR_METRICS'] = 'TeStInG' - self.copy_file('test-files/people.sql', 'models/people.sql') - self.copy_file('test-files/env_var_metrics.yml', 'models/metrics.yml') - results = self.run_dbt(["run"]) - manifest = get_manifest() - self.assertIn('ENV_VAR_METRICS', manifest.env_vars) - self.assertEqual(manifest.env_vars['ENV_VAR_METRICS'], 'TeStInG') - metric_node = manifest.metrics['metric.test.number_of_people'] - self.assertEqual(metric_node.meta, {'my_meta': 'TeStInG'}) - - # Change metrics env var - os.environ['ENV_VAR_METRICS'] = 'Changed!' - results = self.run_dbt(["run"]) - manifest = get_manifest() - metric_node = manifest.metrics['metric.test.number_of_people'] - self.assertEqual(metric_node.meta, {'my_meta': 'Changed!'}) - - # delete the env vars to cleanup - del os.environ['ENV_VAR_TEST'] - del os.environ['ENV_VAR_SEVERITY'] - del os.environ['ENV_VAR_DATABASE'] - del os.environ['TEST_SCHEMA_VAR'] - del os.environ['ENV_VAR_COLOR'] - del os.environ['ENV_VAR_SOME_KEY'] - del os.environ['ENV_VAR_OWNER'] - del os.environ['ENV_VAR_METRICS'] - - -class ProjectEnvVarTest(BasePPTest): - - @property - def project_config(self): - # Need to set the environment variable here initially because - # the unittest setup does a load_config. - os.environ['ENV_VAR_NAME'] = "Jane Smith" - return { - 'config-version': 2, - 'seed-paths': ['seeds'], - 'test-paths': ['tests'], - 'macro-paths': ['macros'], - 'seeds': { - 'quote_columns': False, - }, - 'models': { - '+meta': { - 'meta_name': "{{ env_var('ENV_VAR_NAME') }}" - } - } - } - - @use_profile('postgres') - def test_postgres_project_env_vars(self): - - # Initial run - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - self.run_dbt(['clean']) - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - state_check = manifest.state_check - model_id = 'model.test.model_one' - model = manifest.nodes[model_id] - self.assertEqual(model.config.meta['meta_name'], 'Jane Smith') - env_vars_hash_checksum = state_check.project_env_vars_hash.checksum - - # Change the environment variable - os.environ['ENV_VAR_NAME'] = "Jane Doe" - results = self.run_dbt(["run"]) - self.assertEqual(len(results), 1) - manifest = get_manifest() - model = manifest.nodes[model_id] - self.assertEqual(model.config.meta['meta_name'], 'Jane Doe') - self.assertNotEqual(env_vars_hash_checksum, manifest.state_check.project_env_vars_hash.checksum) - - # cleanup - del os.environ['ENV_VAR_NAME'] - - -class ProfileEnvVarTest(BasePPTest): - - @property - def profile_config(self): - # Need to set these here because the base integration test class - # calls 'load_config' before the tests are run. - # Note: only the specified profile is rendered, so there's no - # point it setting env_vars in non-used profiles. - os.environ['ENV_VAR_USER'] = 'root' - os.environ['ENV_VAR_PASS'] = 'password' - return { - 'config': { - 'send_anonymous_usage_stats': False - }, - 'test': { - 'outputs': { - 'dev': { - 'type': 'postgres', - 'threads': 1, - 'host': self.database_host, - 'port': 5432, - 'user': "root", - 'pass': "password", - 'user': "{{ env_var('ENV_VAR_USER') }}", - 'pass': "{{ env_var('ENV_VAR_PASS') }}", - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - }, - 'target': 'dev' - } - } - - @use_profile('postgres') - def test_postgres_profile_env_vars(self): - - # Initial run - os.environ['ENV_VAR_USER'] = 'root' - os.environ['ENV_VAR_PASS'] = 'password' - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - results = self.run_dbt(["run"]) - manifest = get_manifest() - env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum - - # Change env_vars, the user doesn't exist, this should fail - os.environ['ENV_VAR_USER'] = 'fake_user' - (results, log_output) = self.run_dbt_and_capture(["run"], expect_pass=False) - self.assertTrue('env vars used in profiles.yml have changed' in log_output) - manifest = get_manifest() - self.assertNotEqual(env_vars_checksum, manifest.state_check.profile_env_vars_hash.checksum) - - -class ProfileSecretEnvVarTest(BasePPTest): - - @property - def profile_config(self): - # Need to set these here because the base integration test class - # calls 'load_config' before the tests are run. - # Note: only the specified profile is rendered, so there's no - # point it setting env_vars in non-used profiles. - - # user is secret and password is not. postgres on macos doesn't care if the password - # changes so we have to change the user. related: https://github.com/dbt-labs/dbt-core/pull/4250 - os.environ[SECRET_ENV_PREFIX + 'USER'] = 'root' - os.environ['ENV_VAR_PASS'] = 'password' - return { - 'config': { - 'send_anonymous_usage_stats': False - }, - 'test': { - 'outputs': { - 'dev': { - 'type': 'postgres', - 'threads': 1, - 'host': self.database_host, - 'port': 5432, - 'user': "root", - 'pass': "password", - 'user': "{{ env_var('DBT_ENV_SECRET_USER') }}", - 'pass': "{{ env_var('ENV_VAR_PASS') }}", - 'dbname': 'dbt', - 'schema': self.unique_schema() - }, - }, - 'target': 'dev' - } - } - - @use_profile('postgres') - def test_postgres_profile_secret_env_vars(self): - - # Initial run - os.environ[SECRET_ENV_PREFIX + 'USER'] = 'root' - os.environ['ENV_VAR_PASS'] = 'password' - self.setup_directories() - self.copy_file('test-files/model_one.sql', 'models/model_one.sql') - results = self.run_dbt(["run"]) - manifest = get_manifest() - env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum - - # Change a secret var, it shouldn't register because we shouldn't save secrets. - os.environ[SECRET_ENV_PREFIX + 'USER'] = 'boop' - # this dbt run is going to fail because the password isn't actually the right one, - # but that doesn't matter because we just want to see if the manifest has included - # the secret in the hash of environment variables. - (results, log_output) = self.run_dbt_and_capture(["run"], expect_pass=False) - # I020 is the event code for "env vars used in profiles.yml have changed" - self.assertFalse('I020' in log_output) - manifest = get_manifest() - self.assertEqual(env_vars_checksum, manifest.state_check.profile_env_vars_hash.checksum) - diff --git a/tests/functional/partial_parsing/fixtures.py b/tests/functional/partial_parsing/fixtures.py new file mode 100644 index 00000000000..7681b9dcb8c --- /dev/null +++ b/tests/functional/partial_parsing/fixtures.py @@ -0,0 +1,1126 @@ +local_dependency__dbt_project_yml = """ + +name: 'local_dep' +version: '1.0' +config-version: 2 + +profile: 'default' + +model-paths: ["models"] +analysis-paths: ["analyses"] +test-paths: ["tests"] +seed-paths: ["seeds"] +macro-paths: ["macros"] + +require-dbt-version: '>=0.1.0' + +target-path: "target" # directory which will store compiled SQL files +clean-targets: # directories to be removed by `dbt clean` + - "target" + - "dbt_packages" + + +seeds: + quote_columns: False + +""" + +local_dependency__models__schema_yml = """ +version: 2 +sources: + - name: seed_source + schema: "{{ var('schema_override', target.schema) }}" + tables: + - name: "seed" + columns: + - name: id + tests: + - unique + +""" + +local_dependency__models__model_to_import_sql = """ +select * from {{ ref('seed') }} + +""" + +local_dependency__macros__dep_macro_sql = """ +{% macro some_overridden_macro() -%} +100 +{%- endmacro %} + +""" + +local_dependency__seeds__seed_csv = """id +1 +""" + +empty_schema_with_version_yml = """ +version: 2 + +""" + +schema_sources5_yml = """ +version: 2 + +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - name: first_name + - name: last_name + - name: email + +seeds: + - name: rad_customers + description: "Raw customer data" + columns: + - name: id + tests: + - unique + - not_null + - name: first_name + - name: last_name + - name: email + + +""" + +my_macro2_sql = """ +{% macro do_something(foo2, bar2) %} + + select + 'foo' as foo2, + 'var' as bar2 + +{% endmacro %} + +""" + +raw_customers_csv = """id,first_name,last_name,email +1,Michael,Perez,mperez0@chronoengine.com +2,Shawn,Mccoy,smccoy1@reddit.com +3,Kathleen,Payne,kpayne2@cargocollective.com +4,Jimmy,Cooper,jcooper3@cargocollective.com +5,Katherine,Rice,krice4@typepad.com +6,Sarah,Ryan,sryan5@gnu.org +7,Martin,Mcdonald,mmcdonald6@opera.com +8,Frank,Robinson,frobinson7@wunderground.com +9,Jennifer,Franklin,jfranklin8@mail.ru +10,Henry,Welch,hwelch9@list-manage.com +""" + +model_three_disabled2_sql = """ +- Disabled model +{{ config(materialized='table', enabled=False) }} + +with source_data as ( + + select 1 as id + union all + select null as id + +) + +select * +from source_data + +""" + +schema_sources4_yml = """ +version: 2 + +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - every_value_is_blue + - name: first_name + - name: last_name + - name: email + +seeds: + - name: raw_customers + description: "Raw customer data" + columns: + - name: id + tests: + - unique + - not_null + - name: first_name + - name: last_name + - name: email + + +""" + +env_var_schema_yml = """ +version: 2 + +models: + - name: model_one + config: + materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" + +""" + +my_test_sql = """ +select + * from {{ ref('customers') }} where first_name = '{{ macro_something() }}' + +""" + +empty_schema_yml = """ + +""" + +schema_models_c_yml = """ +version: 2 + +sources: + - name: seed_source + description: "This is a source override" + overrides: local_dep + schema: "{{ var('schema_override', target.schema) }}" + tables: + - name: "seed" + columns: + - name: id + tests: + - unique + - not_null + +""" + +env_var_sources_yml = """ +version: 2 +sources: + - name: seed_sources + schema: "{{ target.schema }}" + database: "{{ env_var('ENV_VAR_DATABASE') }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ env_var('ENV_VAR_SEVERITY') }}" + - unique + - name: first_name + - name: last_name + - name: email + + + +""" + +generic_test_edited_sql = """ +{% test is_odd(model, column_name) %} + +with validation as ( + + select + {{ column_name }} as odd_field2 + + from {{ model }} + +), + +validation_errors as ( + + select + odd_field2 + + from validation + -- if this is true, then odd_field is actually even! + where (odd_field2 % 2) = 0 + +) + +select * +from validation_errors + +{% endtest %} +""" + +schema_sources1_yml = """ +version: 2 +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - name: first_name + - name: last_name + - name: email + + + +""" + +schema_sources3_yml = """ +version: 2 + +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - name: first_name + - name: last_name + - name: email + +exposures: + - name: proxy_for_dashboard + description: "This is for the XXX dashboard" + type: "dashboard" + owner: + name: "Dashboard Tester" + email: "tester@dashboard.com" + depends_on: + - ref("model_one") + - source("seed_sources", "raw_customers") + + +""" + +my_analysis_sql = """ +select * from customers + +""" + +schema_sources2_yml = """ +version: 2 + +sources: + - name: seed_sources + schema: "{{ target.schema }}" + tables: + - name: raw_customers + columns: + - name: id + tests: + - not_null: + severity: "{{ 'error' if target.name == 'prod' else 'warn' }}" + - unique + - name: first_name + - name: last_name + - name: email + +exposures: + - name: proxy_for_dashboard + description: "This is for the XXX dashboard" + type: "dashboard" + owner: + name: "Dashboard Tester" + email: "tester@dashboard.com" + depends_on: + - ref("model_one") + - ref("raw_customers") + - source("seed_sources", "raw_customers") + + +""" + +model_color_sql = """ +select 'blue' as fun + +""" + +my_metric_yml = """ +version: 2 +metrics: + - name: new_customers + label: New Customers + model: customers + description: "The number of paid customers who are using the product" + calculation_method: count + expression: user_id + timestamp: signup_date + time_grains: [day, week, month] + dimensions: + - plan + - country + filters: + - field: is_paying + value: True + operator: '=' + +meta: + is_okr: True + tags: + - okrs + + + +""" + +env_var_schema2_yml = """ +version: 2 + +models: + - name: model_one + config: + materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" + tests: + - check_color: + column_name: fun + color: "env_var('ENV_VAR_COLOR')" + + +""" + +gsm_override_sql = """ +- custom macro +{% macro generate_schema_name(schema_name, node) %} + + {{ schema_name }}_{{ target.schema }} + +{% endmacro %} + +""" + +model_four1_sql = """ +select * from {{ ref('model_three') }} + +""" + +model_one_sql = """ +select 1 as fun + +""" + +env_var_schema3_yml = """ +version: 2 + +models: + - name: model_one + config: + materialized: "{{ env_var('TEST_SCHEMA_VAR') }}" + tests: + - check_color: + column_name: fun + color: "env_var('ENV_VAR_COLOR')" + +exposures: + - name: proxy_for_dashboard + description: "This is for the XXX dashboard" + type: "dashboard" + owner: + name: "{{ env_var('ENV_VAR_OWNER') }}" + email: "tester@dashboard.com" + depends_on: + - ref("model_color") + - source("seed_sources", "raw_customers") + +""" + +env_var_metrics_yml = """ +version: 2 + +metrics: + + - model: "ref('people')" + name: number_of_people + description: Total count of people + label: "Number of people" + calculation_method: count + expression: "*" + timestamp: created_at + time_grains: [day, week, month] + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: '{{ env_var("ENV_VAR_METRICS") }}' + + - model: "ref('people')" + name: collective_tenure + description: Total number of years of team experience + label: "Collective tenure" + calculation_method: sum + expression: tenure + timestamp: created_at + time_grains: [day] + filters: + - field: loves_dbt + operator: is + value: 'true' + +""" + +customers_sql = """ +with source as ( + + select * from {{ source('seed_sources', 'raw_customers') }} + +), + +renamed as ( + + select + id as customer_id, + first_name, + last_name, + email + + from source + +) + +select * from renamed + +""" + +model_four2_sql = """ +select fun from {{ ref('model_one') }} + +""" + +env_var_model_sql = """ +select '{{ env_var('ENV_VAR_TEST') }}' as vartest + +""" + +env_var_model_one_sql = """ +select 'blue' as fun + +""" + +custom_schema_tests2_sql = """ +{% test type_one(model) %} + + select * from ( + + select * from {{ model }} + union all + select * from {{ ref('model_b') }} + + ) as Foo + +{% endtest %} + +{% test type_two(model) %} + + {{ config(severity = "ERROR") }} + + select * from {{ model }} + +{% endtest %} + +""" + +metric_model_a_sql = """ +{% + set metric_list = [ + metric('number_of_people'), + metric('collective_tenure') + ] +%} + +{% if not execute %} + + {% set metric_names = [] %} + {% for m in metric_list %} + {% do metric_names.append(m.metric_name) %} + {% endfor %} + + -- this config does nothing, but it lets us check these values + {{ config(metric_names = metric_names) }} + +{% endif %} + + +select 1 as fun + +""" + +model_b_sql = """ +select 1 as notfun + +""" + +customers2_md = """ +{% docs customer_table %} + +LOTS of customer data + +{% enddocs %} + +""" + +custom_schema_tests1_sql = """ +{% test type_one(model) %} + + select * from ( + + select * from {{ model }} + union all + select * from {{ ref('model_b') }} + + ) as Foo + +{% endtest %} + +{% test type_two(model) %} + + {{ config(severity = "WARN") }} + + select * from {{ model }} + +{% endtest %} + +""" + +people_metrics_yml = """ +version: 2 + +metrics: + + - model: "ref('people')" + name: number_of_people + description: Total count of people + label: "Number of people" + calculation_method: count + expression: "*" + timestamp: created_at + time_grains: [day, week, month] + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: 'testing' + + - model: "ref('people')" + name: collective_tenure + description: Total number of years of team experience + label: "Collective tenure" + calculation_method: sum + expression: tenure + timestamp: created_at + time_grains: [day] + filters: + - field: loves_dbt + operator: is + value: 'true' + +""" + +people_sql = """ +select 1 as id, 'Drew' as first_name, 'Banin' as last_name, 'yellow' as favorite_color, true as loves_dbt, 5 as tenure, current_timestamp as created_at +union all +select 1 as id, 'Jeremy' as first_name, 'Cohen' as last_name, 'indigo' as favorite_color, true as loves_dbt, 4 as tenure, current_timestamp as created_at + +""" + +orders_sql = """ +select 1 as id, 101 as user_id, 'pending' as status + +""" + +model_a_sql = """ +select 1 as fun + +""" + +model_three_disabled_sql = """ +{{ config(materialized='table', enabled=False) }} + +with source_data as ( + + select 1 as id + union all + select null as id + +) + +select * +from source_data + +""" + +models_schema2b_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + columns: + - name: id + tests: + - not_null + +""" + +env_var_macros_yml = """ +version: 2 +macros: + - name: do_something + description: "This is a test macro" + meta: + some_key: "{{ env_var('ENV_VAR_SOME_KEY') }}" + + +""" + +models_schema4_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + config: + enabled: false + columns: + - name: id + tests: + - unique + +""" + +model_two_sql = """ +select 1 as notfun + +""" + +generic_test_schema_yml = """ +version: 2 + +models: + - name: orders + description: "Some order data" + columns: + - name: id + tests: + - unique + - is_odd + +""" + +customers1_md = """ +{% docs customer_table %} + +This table contains customer data + +{% enddocs %} + +""" + +model_three_modified_sql = """ +{{ config(materialized='table') }} + +with source_data as ( + + {#- This is model three #} + + select 1 as id + union all + select null as id + +) + +select * +from source_data + +""" + +macros_yml = """ +version: 2 +macros: + - name: do_something + description: "This is a test macro" + +""" + +test_color_sql = """ +{% test check_color(model, column_name, color) %} + + select * + from {{ model }} + where {{ column_name }} = '{{ color }}' + +{% endtest %} + +""" + +models_schema2_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + columns: + - name: id + tests: + - unique + +""" + +gsm_override2_sql = """ +- custom macro xxxx +{% macro generate_schema_name(schema_name, node) %} + + {{ schema_name }}_{{ target.schema }} + +{% endmacro %} + +""" + +models_schema3_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + tests: + - unique +macros: + - name: do_something + description: "This is a test macro" + +""" + +generic_test_sql = """ +{% test is_odd(model, column_name) %} + +with validation as ( + + select + {{ column_name }} as odd_field + + from {{ model }} + +), + +validation_errors as ( + + select + odd_field + + from validation + -- if this is true, then odd_field is actually even! + where (odd_field % 2) = 0 + +) + +select * +from validation_errors + +{% endtest %} +""" + +env_var_model_test_yml = """ +version: 2 +models: + - name: model_color + columns: + - name: fun + tests: + - unique: + enabled: "{{ env_var('ENV_VAR_ENABLED', True) }}" + +""" + +model_three_sql = """ +{{ config(materialized='table') }} + +with source_data as ( + + select 1 as id + union all + select null as id + +) + +select * +from source_data + +""" + +ref_override2_sql = """ +- Macro to override ref xxxx +{% macro ref(modelname) %} +{% do return(builtins.ref(modelname)) %} +{% endmacro %} + +""" + +models_schema1_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + +""" + +macros_schema_yml = """ + +version: 2 + +models: + - name: model_a + tests: + - type_one + - type_two + +""" + +my_macro_sql = """ +{% macro do_something(foo2, bar2) %} + + select + '{{ foo2 }}' as foo2, + '{{ bar2 }}' as bar2 + +{% endmacro %} + +""" + +snapshot_sql = """ +{% snapshot orders_snapshot %} + +{{ + config( + target_schema=schema, + strategy='check', + unique_key='id', + check_cols=['status'], + ) +}} + +select * from {{ ref('orders') }} + +{% endsnapshot %} + +{% snapshot orders2_snapshot %} + +{{ + config( + target_schema=schema, + strategy='check', + unique_key='id', + check_cols=['order_date'], + ) +}} + +select * from {{ ref('orders') }} + +{% endsnapshot %} + +""" + +models_schema4b_yml = """ +version: 2 + +models: + - name: model_one + description: "The first model" + - name: model_three + description: "The third model" + config: + enabled: true + columns: + - name: id + tests: + - unique + +""" + +test_macro_sql = """ +{% macro macro_something() %} + + {% do return('macro_something') %} + +{% endmacro %} + +""" + +people_metrics2_yml = """ +version: 2 + +metrics: + + - model: "ref('people')" + name: number_of_people + description: Total count of people + label: "Number of people" + calculation_method: count + expression: "*" + timestamp: created_at + time_grains: [day, week, month] + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: 'replaced' + + - model: "ref('people')" + name: collective_tenure + description: Total number of years of team experience + label: "Collective tenure" + calculation_method: sum + expression: tenure + timestamp: created_at + time_grains: [day] + filters: + - field: loves_dbt + operator: is + value: 'true' + +""" + +generic_schema_yml = """ +version: 2 + +models: + - name: orders + description: "Some order data" + columns: + - name: id + tests: + - unique + +""" + +snapshot2_sql = """ +- add a comment +{% snapshot orders_snapshot %} + +{{ + config( + target_schema=schema, + strategy='check', + unique_key='id', + check_cols=['status'], + ) +}} + +select * from {{ ref('orders') }} + +{% endsnapshot %} + +{% snapshot orders2_snapshot %} + +{{ + config( + target_schema=schema, + strategy='check', + unique_key='id', + check_cols=['order_date'], + ) +}} + +select * from {{ ref('orders') }} + +{% endsnapshot %} + +""" + +sources_tests2_sql = """ + +{% test every_value_is_blue(model, column_name) %} + + select * + from {{ model }} + where {{ column_name }} != 99 + +{% endtest %} + + +""" + +people_metrics3_yml = """ +version: 2 + +metrics: + + - model: "ref('people')" + name: number_of_people + description: Total count of people + label: "Number of people" + calculation_method: count + expression: "*" + timestamp: created_at + time_grains: [day, week, month] + dimensions: + - favorite_color + - loves_dbt + meta: + my_meta: 'replaced' + +""" + +ref_override_sql = """ +- Macro to override ref +{% macro ref(modelname) %} +{% do return(builtins.ref(modelname)) %} +{% endmacro %} + +""" + +test_macro2_sql = """ +{% macro macro_something() %} + + {% do return('some_name') %} + +{% endmacro %} + +""" + +env_var_macro_sql = """ +{% macro do_something(foo2, bar2) %} + + select + '{{ foo2 }}' as foo2, + '{{ bar2 }}' as bar2 + +{% endmacro %} + +""" + +sources_tests1_sql = """ + +{% test every_value_is_blue(model, column_name) %} + + select * + from {{ model }} + where {{ column_name }} = 9999 + +{% endtest %} + + +""" diff --git a/tests/functional/partial_parsing/test_partial_parsing.py b/tests/functional/partial_parsing/test_partial_parsing.py new file mode 100644 index 00000000000..f70b2e0f9fa --- /dev/null +++ b/tests/functional/partial_parsing/test_partial_parsing.py @@ -0,0 +1,643 @@ +import pytest + +from dbt.tests.util import run_dbt, get_manifest, write_file, rm_file, run_dbt_and_capture +from dbt.tests.fixtures.project import write_project_files +from tests.functional.partial_parsing.fixtures import ( + model_one_sql, + model_two_sql, + models_schema1_yml, + models_schema2_yml, + models_schema2b_yml, + model_three_sql, + model_three_modified_sql, + model_four1_sql, + model_four2_sql, + models_schema4_yml, + models_schema4b_yml, + models_schema3_yml, + my_macro_sql, + my_macro2_sql, + macros_yml, + empty_schema_yml, + empty_schema_with_version_yml, + model_three_disabled_sql, + model_three_disabled2_sql, + raw_customers_csv, + customers_sql, + sources_tests1_sql, + schema_sources1_yml, + schema_sources2_yml, + schema_sources3_yml, + schema_sources4_yml, + schema_sources5_yml, + customers1_md, + customers2_md, + test_macro_sql, + my_test_sql, + test_macro2_sql, + my_analysis_sql, + sources_tests2_sql, + local_dependency__dbt_project_yml, + local_dependency__models__schema_yml, + local_dependency__models__model_to_import_sql, + local_dependency__macros__dep_macro_sql, + local_dependency__seeds__seed_csv, + schema_models_c_yml, + model_a_sql, + model_b_sql, + macros_schema_yml, + custom_schema_tests1_sql, + custom_schema_tests2_sql, + ref_override_sql, + ref_override2_sql, + gsm_override_sql, + gsm_override2_sql, + orders_sql, + snapshot_sql, + snapshot2_sql, + generic_schema_yml, + generic_test_sql, + generic_test_schema_yml, + generic_test_edited_sql, +) + +from dbt.exceptions import CompilationError +from dbt.contracts.files import ParseFileType +from dbt.contracts.results import TestStatus +import re +import os + +os.environ["DBT_PP_TEST"] = "true" + + +def normalize(path): + return os.path.normcase(os.path.normpath(path)) + + +class TestModels: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + def test_pp_models(self, project): + # initial run + # run_dbt(['clean']) + results = run_dbt(["run"]) + assert len(results) == 1 + + # add a model file + write_file(model_two_sql, project.project_root, "models", "model_two.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # add a schema file + write_file(models_schema1_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + assert "model.test.model_one" in manifest.nodes + model_one_node = manifest.nodes["model.test.model_one"] + assert model_one_node.description == "The first model" + assert model_one_node.patch_path == "test://" + normalize("models/schema.yml") + + # add a model and a schema file (with a test) at the same time + write_file(models_schema2_yml, project.project_root, "models", "schema.yml") + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "test"], expect_pass=False) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + project_files = [f for f in manifest.files if f.startswith("test://")] + assert len(project_files) == 4 + model_3_file_id = "test://" + normalize("models/model_three.sql") + assert model_3_file_id in manifest.files + model_three_file = manifest.files[model_3_file_id] + assert model_three_file.parse_file_type == ParseFileType.Model + assert type(model_three_file).__name__ == "SourceFile" + model_three_node = manifest.nodes[model_three_file.nodes[0]] + schema_file_id = "test://" + normalize("models/schema.yml") + assert model_three_node.patch_path == schema_file_id + assert model_three_node.description == "The third model" + schema_file = manifest.files[schema_file_id] + assert type(schema_file).__name__ == "SchemaSourceFile" + assert len(schema_file.tests) == 1 + tests = schema_file.get_all_test_ids() + assert tests == ["test.test.unique_model_three_id.6776ac8160"] + unique_test_id = tests[0] + assert unique_test_id in manifest.nodes + + # modify model sql file, ensure description still there + write_file(model_three_modified_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + model_id = "model.test.model_three" + assert model_id in manifest.nodes + model_three_node = manifest.nodes[model_id] + assert model_three_node.description == "The third model" + + # Change the model 3 test from unique to not_null + write_file(models_schema2b_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "test"], expect_pass=False) + manifest = get_manifest(project.project_root) + schema_file_id = "test://" + normalize("models/schema.yml") + schema_file = manifest.files[schema_file_id] + tests = schema_file.get_all_test_ids() + assert tests == ["test.test.not_null_model_three_id.3162ce0a6f"] + not_null_test_id = tests[0] + assert not_null_test_id in manifest.nodes.keys() + assert unique_test_id not in manifest.nodes.keys() + assert len(results) == 1 + + # go back to previous version of schema file, removing patch, test, and model for model three + write_file(models_schema1_yml, project.project_root, "models", "schema.yml") + rm_file(project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # remove schema file, still have 3 models + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + rm_file(project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + schema_file_id = "test://" + normalize("models/schema.yml") + assert schema_file_id not in manifest.files + project_files = [f for f in manifest.files if f.startswith("test://")] + assert len(project_files) == 3 + + # Put schema file back and remove a model + # referred to in schema file + write_file(models_schema2_yml, project.project_root, "models", "schema.yml") + rm_file(project.project_root, "models", "model_three.sql") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "--warn-error", "run"]) + + # Put model back again + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add model four refing model three + write_file(model_four1_sql, project.project_root, "models", "model_four.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 4 + + # Remove model_three and change model_four to ref model_one + # and change schema file to remove model_three + rm_file(project.project_root, "models", "model_three.sql") + write_file(model_four2_sql, project.project_root, "models", "model_four.sql") + write_file(models_schema1_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Remove model four, put back model three, put back schema file + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + write_file(models_schema2_yml, project.project_root, "models", "schema.yml") + rm_file(project.project_root, "models", "model_four.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # disable model three in the schema file + write_file(models_schema4_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # update enabled config to be true for model three in the schema file + write_file(models_schema4b_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # disable model three in the schema file again + write_file(models_schema4_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # remove disabled config for model three in the schema file to check it gets enabled + write_file(models_schema4b_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add a macro + write_file(my_macro_sql, project.project_root, "macros", "my_macro.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + macro_id = "macro.test.do_something" + assert macro_id in manifest.macros + + # Modify the macro + write_file(my_macro2_sql, project.project_root, "macros", "my_macro.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add a macro patch + write_file(models_schema3_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Remove the macro + rm_file(project.project_root, "macros", "my_macro.sql") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "--warn-error", "run"]) + + # put back macro file, got back to schema file with no macro + # add separate macro patch schema file + write_file(models_schema2_yml, project.project_root, "models", "schema.yml") + write_file(my_macro_sql, project.project_root, "macros", "my_macro.sql") + write_file(macros_yml, project.project_root, "macros", "macros.yml") + results = run_dbt(["--partial-parse", "run"]) + + # delete macro and schema file + rm_file(project.project_root, "macros", "my_macro.sql") + rm_file(project.project_root, "macros", "macros.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add an empty schema file + write_file(empty_schema_yml, project.project_root, "models", "eschema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Add version to empty schema file + write_file(empty_schema_with_version_yml, project.project_root, "models", "eschema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + + # Disable model_three + write_file(model_three_disabled_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + model_id = "model.test.model_three" + assert model_id in manifest.disabled + assert model_id not in manifest.nodes + + # Edit disabled model three + write_file(model_three_disabled2_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + model_id = "model.test.model_three" + assert model_id in manifest.disabled + assert model_id not in manifest.nodes + + # Remove disabled from model three + write_file(model_three_sql, project.project_root, "models", "model_three.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + model_id = "model.test.model_three" + assert model_id in manifest.nodes + assert model_id not in manifest.disabled + + +class TestSources: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + def test_pp_sources(self, project): + # initial run + write_file(raw_customers_csv, project.project_root, "seeds", "raw_customers.csv") + write_file(sources_tests1_sql, project.project_root, "macros", "tests.sql") + results = run_dbt(["run"]) + assert len(results) == 1 + + # Partial parse running 'seed' + run_dbt(["--partial-parse", "seed"]) + manifest = get_manifest(project.project_root) + seed_file_id = "test://" + normalize("seeds/raw_customers.csv") + assert seed_file_id in manifest.files + + # Add another seed file + write_file(raw_customers_csv, project.project_root, "seeds", "more_customers.csv") + run_dbt(["--partial-parse", "run"]) + seed_file_id = "test://" + normalize("seeds/more_customers.csv") + manifest = get_manifest(project.project_root) + assert seed_file_id in manifest.files + seed_id = "seed.test.more_customers" + assert seed_id in manifest.nodes + + # Remove seed file and add a schema files with a source referring to raw_customers + rm_file(project.project_root, "seeds", "more_customers.csv") + write_file(schema_sources1_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + assert len(manifest.sources) == 1 + file_id = "test://" + normalize("models/sources.yml") + assert file_id in manifest.files + + # add a model referring to raw_customers source + write_file(customers_sql, project.project_root, "models", "customers.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + + # remove sources schema file + rm_file(project.project_root, "models", "sources.yml") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "run"]) + + # put back sources and add an exposures file + write_file(schema_sources2_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + + # remove seed referenced in exposures file + rm_file(project.project_root, "seeds", "raw_customers.csv") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "run"]) + + # put back seed and remove depends_on from exposure + write_file(raw_customers_csv, project.project_root, "seeds", "raw_customers.csv") + write_file(schema_sources3_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + + # Add seed config with test to schema.yml, remove exposure + write_file(schema_sources4_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + + # Change seed name to wrong name + write_file(schema_sources5_yml, project.project_root, "models", "sources.yml") + with pytest.raises(CompilationError): + results = run_dbt(["--partial-parse", "--warn-error", "run"]) + + # Put back seed name to right name + write_file(schema_sources4_yml, project.project_root, "models", "sources.yml") + results = run_dbt(["--partial-parse", "run"]) + + # Add docs file customers.md + write_file(customers1_md, project.project_root, "models", "customers.md") + results = run_dbt(["--partial-parse", "run"]) + + # Change docs file customers.md + write_file(customers2_md, project.project_root, "models", "customers.md") + results = run_dbt(["--partial-parse", "run"]) + + # Delete docs file + rm_file(project.project_root, "models", "customers.md") + results = run_dbt(["--partial-parse", "run"]) + + # Add a data test + write_file(test_macro_sql, project.project_root, "macros", "test-macro.sql") + write_file(my_test_sql, project.project_root, "tests", "my_test.sql") + results = run_dbt(["--partial-parse", "test"]) + manifest = get_manifest(project.project_root) + assert len(manifest.nodes) == 9 + test_id = "test.test.my_test" + assert test_id in manifest.nodes + + # Change macro that data test depends on + write_file(test_macro2_sql, project.project_root, "macros", "test-macro.sql") + results = run_dbt(["--partial-parse", "test"]) + manifest = get_manifest(project.project_root) + + # Add an analysis + write_file(my_analysis_sql, project.project_root, "analyses", "my_analysis.sql") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + + # Remove data test + rm_file(project.project_root, "tests", "my_test.sql") + results = run_dbt(["--partial-parse", "test"]) + manifest = get_manifest(project.project_root) + assert len(manifest.nodes) == 9 + + # Remove analysis + rm_file(project.project_root, "analyses", "my_analysis.sql") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + assert len(manifest.nodes) == 8 + + # Change source test + write_file(sources_tests2_sql, project.project_root, "macros", "tests.sql") + results = run_dbt(["--partial-parse", "run"]) + + +class TestPartialParsingDependency: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + @pytest.fixture(scope="class", autouse=True) + def setUp(self, project_root): + local_dependency_files = { + "dbt_project.yml": local_dependency__dbt_project_yml, + "models": { + "schema.yml": local_dependency__models__schema_yml, + "model_to_import.sql": local_dependency__models__model_to_import_sql, + }, + "macros": {"dep_macro.sql": local_dependency__macros__dep_macro_sql}, + "seeds": {"seed.csv": local_dependency__seeds__seed_csv}, + } + write_project_files(project_root, "local_dependency", local_dependency_files) + + @pytest.fixture(scope="class") + def packages(self): + return {"packages": [{"local": "local_dependency"}]} + + def test_parsing_with_dependency(self, project): + run_dbt(["clean"]) + run_dbt(["deps"]) + run_dbt(["seed"]) + run_dbt(["run"]) + + # Add a source override + write_file(schema_models_c_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + assert len(manifest.sources) == 1 + source_id = "source.local_dep.seed_source.seed" + assert source_id in manifest.sources + # We have 1 root model, 1 local_dep model, 1 local_dep seed, 1 local_dep source test, 2 root source tests + assert len(manifest.nodes) == 5 + test_id = "test.local_dep.source_unique_seed_source_seed_id.afa94935ed" + assert test_id in manifest.nodes + + # Remove a source override + rm_file(project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + assert len(manifest.sources) == 1 + + +class TestNestedMacros: + @pytest.fixture(scope="class") + def models(self): + return { + "model_a.sql": model_a_sql, + "model_b.sql": model_b_sql, + "schema.yml": macros_schema_yml, + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "custom_schema_tests.sql": custom_schema_tests1_sql, + } + + def test_nested_macros(self, project): + results = run_dbt() + assert len(results) == 2 + manifest = get_manifest(project.project_root) + macro_child_map = manifest.build_macro_child_map() + macro_unique_id = "macro.test.test_type_two" + assert macro_unique_id in macro_child_map + + results = run_dbt(["test"], expect_pass=False) + results = sorted(results, key=lambda r: r.node.name) + assert len(results) == 2 + # type_one_model_a_ + assert results[0].status == TestStatus.Fail + assert re.search(r"union all", results[0].node.compiled_code) + # type_two_model_a_ + assert results[1].status == TestStatus.Warn + assert results[1].node.config.severity == "WARN" + + write_file( + custom_schema_tests2_sql, project.project_root, "macros", "custom_schema_tests.sql" + ) + results = run_dbt(["--partial-parse", "test"], expect_pass=False) + manifest = get_manifest(project.project_root) + test_node_id = "test.test.type_two_model_a_.842bc6c2a7" + assert test_node_id in manifest.nodes + results = sorted(results, key=lambda r: r.node.name) + assert len(results) == 2 + # type_two_model_a_ + assert results[1].status == TestStatus.Fail + assert results[1].node.config.severity == "ERROR" + + +class TestSkipMacros: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + "eschema.yml": empty_schema_yml, + } + + def test_skip_macros(self, project): + # initial run so we have a msgpack file + # includes empty_schema file for bug #4850 + results = run_dbt() + + # add a new ref override macro + write_file(ref_override_sql, project.project_root, "macros", "ref_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + # modify a ref override macro + write_file(ref_override2_sql, project.project_root, "macros", "ref_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + # remove a ref override macro + rm_file(project.project_root, "macros", "ref_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + # custom generate_schema_name macro + write_file(gsm_override_sql, project.project_root, "macros", "gsm_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + # change generate_schema_name macro + write_file(gsm_override2_sql, project.project_root, "macros", "gsm_override.sql") + results, log_output = run_dbt_and_capture(["--partial-parse", "run"]) + assert "Starting full parse." in log_output + + +class TestSnapshots: + @pytest.fixture(scope="class") + def models(self): + return { + "orders.sql": orders_sql, + } + + def test_pp_snapshots(self, project): + + # initial run + results = run_dbt() + assert len(results) == 1 + + # add snapshot + write_file(snapshot_sql, project.project_root, "snapshots", "snapshot.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + snapshot_id = "snapshot.test.orders_snapshot" + assert snapshot_id in manifest.nodes + snapshot2_id = "snapshot.test.orders2_snapshot" + assert snapshot2_id in manifest.nodes + + # run snapshot + results = run_dbt(["--partial-parse", "snapshot"]) + assert len(results) == 2 + + # modify snapshot + write_file(snapshot2_sql, project.project_root, "snapshots", "snapshot.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + + # delete snapshot + rm_file(project.project_root, "snapshots", "snapshot.sql") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + + +class TestTests: + @pytest.fixture(scope="class") + def models(self): + return { + "orders.sql": orders_sql, + "schema.yml": generic_schema_yml, + } + + @pytest.fixture(scope="class") + def tests(self): + # Make sure "generic" directory is created + return {"generic": {"readme.md": ""}} + + def test_pp_generic_tests(self, project): + + # initial run + results = run_dbt() + assert len(results) == 1 + manifest = get_manifest(project.project_root) + expected_nodes = ["model.test.orders", "test.test.unique_orders_id.1360ecc70e"] + assert expected_nodes == list(manifest.nodes.keys()) + + # add generic test in test-path + write_file(generic_test_sql, project.project_root, "tests", "generic", "generic_test.sql") + write_file(generic_test_schema_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + test_id = "test.test.is_odd_orders_id.82834fdc5b" + assert test_id in manifest.nodes + expected_nodes = [ + "model.test.orders", + "test.test.unique_orders_id.1360ecc70e", + "test.test.is_odd_orders_id.82834fdc5b", + ] + assert expected_nodes == list(manifest.nodes.keys()) + + # edit generic test in test-path + write_file( + generic_test_edited_sql, project.project_root, "tests", "generic", "generic_test.sql" + ) + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + test_id = "test.test.is_odd_orders_id.82834fdc5b" + assert test_id in manifest.nodes + expected_nodes = [ + "model.test.orders", + "test.test.unique_orders_id.1360ecc70e", + "test.test.is_odd_orders_id.82834fdc5b", + ] + assert expected_nodes == list(manifest.nodes.keys()) diff --git a/tests/functional/partial_parsing/test_pp_metrics.py b/tests/functional/partial_parsing/test_pp_metrics.py new file mode 100644 index 00000000000..575c5ca613e --- /dev/null +++ b/tests/functional/partial_parsing/test_pp_metrics.py @@ -0,0 +1,73 @@ +import pytest + +from dbt.tests.util import run_dbt, write_file, get_manifest +from tests.functional.partial_parsing.fixtures import ( + people_sql, + people_metrics_yml, + people_metrics2_yml, + metric_model_a_sql, + people_metrics3_yml, +) + +from dbt.exceptions import CompilationError + + +class TestMetrics: + @pytest.fixture(scope="class") + def models(self): + return { + "people.sql": people_sql, + } + + def test_metrics(self, project): + # initial run + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + assert len(manifest.nodes) == 1 + + # Add metrics yaml file + write_file(people_metrics_yml, project.project_root, "models", "people_metrics.yml") + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + assert len(manifest.metrics) == 2 + metric_people_id = "metric.test.number_of_people" + metric_tenure_id = "metric.test.collective_tenure" + metric_people = manifest.metrics[metric_people_id] + metric_tenure = manifest.metrics[metric_tenure_id] + expected_meta = {"my_meta": "testing"} + assert metric_people.meta == expected_meta + assert metric_people.refs == [["people"]] + assert metric_tenure.refs == [["people"]] + expected_depends_on_nodes = ["model.test.people"] + assert metric_people.depends_on.nodes == expected_depends_on_nodes + + # Change metrics yaml files + write_file(people_metrics2_yml, project.project_root, "models", "people_metrics.yml") + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + metric_people = manifest.metrics[metric_people_id] + expected_meta = {"my_meta": "replaced"} + assert metric_people.meta == expected_meta + expected_depends_on_nodes = ["model.test.people"] + assert metric_people.depends_on.nodes == expected_depends_on_nodes + + # Add model referring to metric + write_file(metric_model_a_sql, project.project_root, "models", "metric_model_a.sql") + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + model_a = manifest.nodes["model.test.metric_model_a"] + expected_depends_on_nodes = [ + "metric.test.number_of_people", + "metric.test.collective_tenure", + ] + assert model_a.depends_on.nodes == expected_depends_on_nodes + + # Then delete a metric + write_file(people_metrics3_yml, project.project_root, "models", "people_metrics.yml") + with pytest.raises(CompilationError): + # We use "parse" here and not "run" because we're checking that the CompilationError + # occurs at parse time, not compilation + results = run_dbt(["parse"]) diff --git a/tests/functional/partial_parsing/test_pp_vars.py b/tests/functional/partial_parsing/test_pp_vars.py new file mode 100644 index 00000000000..19b3c7db849 --- /dev/null +++ b/tests/functional/partial_parsing/test_pp_vars.py @@ -0,0 +1,386 @@ +import pytest + +from dbt.tests.util import run_dbt, write_file, run_dbt_and_capture, get_manifest + +from tests.functional.partial_parsing.fixtures import ( + model_color_sql, + env_var_model_sql, + env_var_schema_yml, + env_var_model_one_sql, + raw_customers_csv, + env_var_sources_yml, + test_color_sql, + env_var_schema2_yml, + env_var_schema3_yml, + env_var_macro_sql, + env_var_macros_yml, + env_var_model_test_yml, + people_sql, + env_var_metrics_yml, + model_one_sql, +) + + +from dbt.exceptions import ParsingError +from dbt.constants import SECRET_ENV_PREFIX +import os + + +os.environ["DBT_PP_TEST"] = "true" + + +class TestEnvVars: + @pytest.fixture(scope="class") + def models(self): + return { + "model_color.sql": model_color_sql, + } + + def test_env_vars_models(self, project): + + # initial run + results = run_dbt(["run"]) + assert len(results) == 1 + + # copy a file with an env_var call without an env_var + write_file(env_var_model_sql, project.project_root, "models", "env_var_model.sql") + with pytest.raises(ParsingError): + results = run_dbt(["--partial-parse", "run"]) + + # set the env var + os.environ["ENV_VAR_TEST"] = "TestingEnvVars" + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + expected_env_vars = {"ENV_VAR_TEST": "TestingEnvVars"} + assert expected_env_vars == manifest.env_vars + model_id = "model.test.env_var_model" + model = manifest.nodes[model_id] + model_created_at = model.created_at + + # change the env var + os.environ["ENV_VAR_TEST"] = "second" + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 2 + manifest = get_manifest(project.project_root) + expected_env_vars = {"ENV_VAR_TEST": "second"} + assert expected_env_vars == manifest.env_vars + assert model_created_at != manifest.nodes[model_id].created_at + + # set an env_var in a schema file + write_file(env_var_schema_yml, project.project_root, "models", "schema.yml") + write_file(env_var_model_one_sql, project.project_root, "models", "model_one.sql") + with pytest.raises(ParsingError): + results = run_dbt(["--partial-parse", "run"]) + + # actually set the env_var + os.environ["TEST_SCHEMA_VAR"] = "view" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = {"ENV_VAR_TEST": "second", "TEST_SCHEMA_VAR": "view"} + assert expected_env_vars == manifest.env_vars + + # env vars in a source + os.environ["ENV_VAR_DATABASE"] = "dbt" + os.environ["ENV_VAR_SEVERITY"] = "warn" + write_file(raw_customers_csv, project.project_root, "seeds", "raw_customers.csv") + write_file(env_var_sources_yml, project.project_root, "models", "sources.yml") + run_dbt(["--partial-parse", "seed"]) + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "dbt", + "ENV_VAR_SEVERITY": "warn", + } + assert expected_env_vars == manifest.env_vars + assert len(manifest.sources) == 1 + source_id = "source.test.seed_sources.raw_customers" + source = manifest.sources[source_id] + assert source.database == "dbt" + schema_file = manifest.files[source.file_id] + test_id = "test.test.source_not_null_seed_sources_raw_customers_id.e39ee7bf0d" + test_node = manifest.nodes[test_id] + assert test_node.config.severity == "WARN" + + # Change severity env var + os.environ["ENV_VAR_SEVERITY"] = "error" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "dbt", + "ENV_VAR_SEVERITY": "error", + } + assert expected_env_vars == manifest.env_vars + source_id = "source.test.seed_sources.raw_customers" + source = manifest.sources[source_id] + schema_file = manifest.files[source.file_id] + expected_schema_file_env_vars = { + "sources": {"seed_sources": ["ENV_VAR_DATABASE", "ENV_VAR_SEVERITY"]} + } + assert expected_schema_file_env_vars == schema_file.env_vars + test_node = manifest.nodes[test_id] + assert test_node.config.severity == "ERROR" + + # Change database env var + os.environ["ENV_VAR_DATABASE"] = "test_dbt" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "test_dbt", + "ENV_VAR_SEVERITY": "error", + } + assert expected_env_vars == manifest.env_vars + source = manifest.sources[source_id] + assert source.database == "test_dbt" + + # Delete database env var + del os.environ["ENV_VAR_DATABASE"] + with pytest.raises(ParsingError): + results = run_dbt(["--partial-parse", "run"]) + os.environ["ENV_VAR_DATABASE"] = "test_dbt" + + # Add generic test with test kwarg that's rendered late (no curly brackets) + os.environ["ENV_VAR_DATABASE"] = "dbt" + write_file(test_color_sql, project.project_root, "macros", "test_color.sql") + results = run_dbt(["--partial-parse", "run"]) + # Add source test using test_color and an env_var for color + write_file(env_var_schema2_yml, project.project_root, "models/schema.yml") + with pytest.raises(ParsingError): + results = run_dbt(["--partial-parse", "run"]) + os.environ["ENV_VAR_COLOR"] = "green" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + test_color_id = "test.test.check_color_model_one_env_var_ENV_VAR_COLOR___fun.89638de387" + test_node = manifest.nodes[test_color_id] + # kwarg was rendered but not changed (it will be rendered again when compiled) + assert test_node.test_metadata.kwargs["color"] == "env_var('ENV_VAR_COLOR')" + results = run_dbt(["--partial-parse", "test"]) + + # Add an exposure with an env_var + os.environ["ENV_VAR_OWNER"] = "John Doe" + write_file(env_var_schema3_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "dbt", + "ENV_VAR_SEVERITY": "error", + "ENV_VAR_COLOR": "green", + "ENV_VAR_OWNER": "John Doe", + } + assert expected_env_vars == manifest.env_vars + exposure = list(manifest.exposures.values())[0] + schema_file = manifest.files[exposure.file_id] + expected_sf_env_vars = { + "models": {"model_one": ["TEST_SCHEMA_VAR", "ENV_VAR_COLOR"]}, + "exposures": {"proxy_for_dashboard": ["ENV_VAR_OWNER"]}, + } + assert expected_sf_env_vars == schema_file.env_vars + + # add a macro and a macro schema file + os.environ["ENV_VAR_SOME_KEY"] = "toodles" + write_file(env_var_macro_sql, project.project_root, "macros", "env_var_macro.sql") + write_file(env_var_macros_yml, project.project_root, "macros", "env_var_macros.yml") + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + expected_env_vars = { + "ENV_VAR_TEST": "second", + "TEST_SCHEMA_VAR": "view", + "ENV_VAR_DATABASE": "dbt", + "ENV_VAR_SEVERITY": "error", + "ENV_VAR_COLOR": "green", + "ENV_VAR_OWNER": "John Doe", + "ENV_VAR_SOME_KEY": "toodles", + } + assert expected_env_vars == manifest.env_vars + macro_id = "macro.test.do_something" + macro = manifest.macros[macro_id] + assert macro.meta == {"some_key": "toodles"} + # change the env var + os.environ["ENV_VAR_SOME_KEY"] = "dumdedum" + results = run_dbt(["--partial-parse", "run"]) + manifest = get_manifest(project.project_root) + macro = manifest.macros[macro_id] + assert macro.meta == {"some_key": "dumdedum"} + + # Add a schema file with a test on model_color and env_var in test enabled config + write_file(env_var_model_test_yml, project.project_root, "models", "schema.yml") + results = run_dbt(["--partial-parse", "run"]) + assert len(results) == 3 + manifest = get_manifest(project.project_root) + model_color = manifest.nodes["model.test.model_color"] + schema_file = manifest.files[model_color.patch_path] + expected_env_vars = { + "models": { + "model_one": ["TEST_SCHEMA_VAR", "ENV_VAR_COLOR"], + "model_color": ["ENV_VAR_ENABLED"], + }, + "exposures": {"proxy_for_dashboard": ["ENV_VAR_OWNER"]}, + } + assert expected_env_vars == schema_file.env_vars + + # Add a metrics file with env_vars + os.environ["ENV_VAR_METRICS"] = "TeStInG" + write_file(people_sql, project.project_root, "models", "people.sql") + write_file(env_var_metrics_yml, project.project_root, "models", "metrics.yml") + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + assert "ENV_VAR_METRICS" in manifest.env_vars + assert manifest.env_vars["ENV_VAR_METRICS"] == "TeStInG" + metric_node = manifest.metrics["metric.test.number_of_people"] + assert metric_node.meta == {"my_meta": "TeStInG"} + + # Change metrics env var + os.environ["ENV_VAR_METRICS"] = "Changed!" + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + metric_node = manifest.metrics["metric.test.number_of_people"] + assert metric_node.meta == {"my_meta": "Changed!"} + + # delete the env vars to cleanup + del os.environ["ENV_VAR_TEST"] + del os.environ["ENV_VAR_SEVERITY"] + del os.environ["ENV_VAR_DATABASE"] + del os.environ["TEST_SCHEMA_VAR"] + del os.environ["ENV_VAR_COLOR"] + del os.environ["ENV_VAR_SOME_KEY"] + del os.environ["ENV_VAR_OWNER"] + del os.environ["ENV_VAR_METRICS"] + + +class TestProjectEnvVars: + @pytest.fixture(scope="class") + def project_config_update(self): + # Need to set the environment variable here initially because + # the project fixture loads the config. + os.environ["ENV_VAR_NAME"] = "Jane Smith" + return {"models": {"+meta": {"meta_name": "{{ env_var('ENV_VAR_NAME') }}"}}} + + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + def test_project_env_vars(self, project): + # Initial run + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + state_check = manifest.state_check + model_id = "model.test.model_one" + model = manifest.nodes[model_id] + assert model.config.meta["meta_name"] == "Jane Smith" + env_vars_hash_checksum = state_check.project_env_vars_hash.checksum + + # Change the environment variable + os.environ["ENV_VAR_NAME"] = "Jane Doe" + results = run_dbt(["run"]) + assert len(results) == 1 + manifest = get_manifest(project.project_root) + model = manifest.nodes[model_id] + assert model.config.meta["meta_name"] == "Jane Doe" + assert env_vars_hash_checksum != manifest.state_check.project_env_vars_hash.checksum + + # cleanup + del os.environ["ENV_VAR_NAME"] + + +class TestProfileEnvVars: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + @pytest.fixture(scope="class") + def dbt_profile_target(self): + # Need to set these here because the base integration test class + # calls 'load_config' before the tests are run. + # Note: only the specified profile is rendered, so there's no + # point it setting env_vars in non-used profiles. + os.environ["ENV_VAR_USER"] = "root" + os.environ["ENV_VAR_PASS"] = "password" + return { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": 5432, + "user": "{{ env_var('ENV_VAR_USER') }}", + "pass": "{{ env_var('ENV_VAR_PASS') }}", + "dbname": "dbt", + } + + def test_profile_env_vars(self, project): + + # Initial run + os.environ["ENV_VAR_USER"] = "root" + os.environ["ENV_VAR_PASS"] = "password" + + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum + + # Change env_vars, the user doesn't exist, this should fail + os.environ["ENV_VAR_USER"] = "fake_user" + (results, log_output) = run_dbt_and_capture(["run"], expect_pass=False) + assert "env vars used in profiles.yml have changed" in log_output + manifest = get_manifest(project.project_root) + assert env_vars_checksum != manifest.state_check.profile_env_vars_hash.checksum + + +class TestProfileSecretEnvVars: + @pytest.fixture(scope="class") + def models(self): + return { + "model_one.sql": model_one_sql, + } + + @property + def dbt_profile_target(self): + # Need to set these here because the base integration test class + # calls 'load_config' before the tests are run. + # Note: only the specified profile is rendered, so there's no + # point in setting env_vars in non-used profiles. + + # user is secret and password is not. postgres on macos doesn't care if the password + # changes so we have to change the user. related: https://github.com/dbt-labs/dbt-core/pull/4250 + os.environ[SECRET_ENV_PREFIX + "USER"] = "root" + os.environ["ENV_VAR_PASS"] = "password" + return { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": 5432, + "user": "{{ env_var('DBT_ENV_SECRET_USER') }}", + "pass": "{{ env_var('ENV_VAR_PASS') }}", + "dbname": "dbt", + } + + def test_profile_secret_env_vars(self, project): + + # Initial run + os.environ[SECRET_ENV_PREFIX + "USER"] = "root" + os.environ["ENV_VAR_PASS"] = "password" + + results = run_dbt(["run"]) + manifest = get_manifest(project.project_root) + env_vars_checksum = manifest.state_check.profile_env_vars_hash.checksum + + # Change a secret var, it shouldn't register because we shouldn't save secrets. + os.environ[SECRET_ENV_PREFIX + "USER"] = "fake_user" + # we just want to see if the manifest has included + # the secret in the hash of environment variables. + (results, log_output) = run_dbt_and_capture(["run"], expect_pass=True) + # I020 is the event code for "env vars used in profiles.yml have changed" + assert not ("I020" in log_output) + manifest = get_manifest(project.project_root) + assert env_vars_checksum == manifest.state_check.profile_env_vars_hash.checksum From 89d111a5f6b0ddf925f3cb62c90164f6a038a1e3 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Tue, 17 Jan 2023 13:18:07 -0500 Subject: [PATCH 116/156] CT 1440 Fix code to emit ConnectionReused event (#6605) * Refactor "set_connection_name" to properly handle reused connection * Update test * Changie * Limit test of ConnectionUsed events to non-Windows --- .../Under the Hood-20230113-132513.yaml | 6 +++ core/dbt/adapters/base/connections.py | 52 +++++++++---------- core/dbt/events/proto_types.py | 1 + core/dbt/events/types.proto | 1 + core/dbt/events/types.py | 2 +- tests/functional/logging/test_logging.py | 13 +++++ 6 files changed, 48 insertions(+), 27 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230113-132513.yaml diff --git a/.changes/unreleased/Under the Hood-20230113-132513.yaml b/.changes/unreleased/Under the Hood-20230113-132513.yaml new file mode 100644 index 00000000000..2274fbc01a7 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230113-132513.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Fix use of ConnectionReused logging event +time: 2023-01-13T13:25:13.023168-05:00 +custom: + Author: gshank + Issue: "6168" diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py index 73e87ae9600..d449b27e5e6 100644 --- a/core/dbt/adapters/base/connections.py +++ b/core/dbt/adapters/base/connections.py @@ -142,44 +142,44 @@ def exception_handler(self, sql: str) -> ContextManager: ) def set_connection_name(self, name: Optional[str] = None) -> Connection: - conn_name: str - if name is None: - # if a name isn't specified, we'll re-use a single handle - # named 'master' - conn_name = "master" - else: - if not isinstance(name, str): - raise dbt.exceptions.CompilerException( - f"For connection name, got {name} - not a string!" - ) - assert isinstance(name, str) - conn_name = name + """Called by 'acquire_connection' in BaseAdapter, which is called by + 'connection_named', called by 'connection_for(node)'. + Creates a connection for this thread if one doesn't already + exist, and will rename an existing connection.""" + conn_name: str = "master" if name is None else name + + # Get a connection for this thread conn = self.get_if_exists() + + if conn and conn.name == conn_name and conn.state == "open": + # Found a connection and nothing to do, so just return it + return conn + if conn is None: + # Create a new connection conn = Connection( type=Identifier(self.TYPE), - name=None, + name=conn_name, state=ConnectionState.INIT, transaction_open=False, handle=None, credentials=self.profile.credentials, ) - self.set_thread_connection(conn) - - if conn.name == conn_name and conn.state == "open": - return conn - - fire_event( - NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info()) - ) - - if conn.state == "open": - fire_event(ConnectionReused(conn_name=conn_name)) - else: conn.handle = LazyHandle(self.open) + # Add the connection to thread_connections for this thread + self.set_thread_connection(conn) + fire_event( + NewConnection(conn_name=conn_name, conn_type=self.TYPE, node_info=get_node_info()) + ) + else: # existing connection either wasn't open or didn't have the right name + if conn.state != "open": + conn.handle = LazyHandle(self.open) + if conn.name != conn_name: + orig_conn_name: str = conn.name or "" + conn.name = conn_name + fire_event(ConnectionReused(orig_conn_name=orig_conn_name, conn_name=conn_name)) - conn.name = conn_name return conn @classmethod diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index da8721d55b9..3fb92eeda51 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -531,6 +531,7 @@ class ConnectionReused(betterproto.Message): """E006""" conn_name: str = betterproto.string_field(1) + orig_conn_name: str = betterproto.string_field(2) @dataclass diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 71e7fc3176c..87a10c19eda 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -419,6 +419,7 @@ message NewConnectionMsg { // E006 message ConnectionReused { string conn_name = 1; + string orig_conn_name = 2; } message ConnectionReusedMsg { diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 4a2a0fb99ee..043ed8d40ff 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -449,7 +449,7 @@ def code(self): return "E006" def message(self) -> str: - return f"Re-using an available connection from the pool (formerly {self.conn_name})" + return f"Re-using an available connection from the pool (formerly {self.orig_conn_name}, now {self.conn_name})" @dataclass diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py index fe98d68e676..afcd90d4afb 100644 --- a/tests/functional/logging/test_logging.py +++ b/tests/functional/logging/test_logging.py @@ -1,6 +1,7 @@ import pytest from dbt.tests.util import run_dbt, get_manifest, read_file import json +import os my_model_sql = """ @@ -26,6 +27,7 @@ def test_basic(project, logs_dir): assert log_file node_start = False node_finished = False + connection_reused_data = [] for log_line in log_file.split("\n"): # skip empty lines if len(log_line) == 0: @@ -36,6 +38,8 @@ def test_basic(project, logs_dir): log_dct = json.loads(log_line) log_data = log_dct["data"] log_event = log_dct["info"]["name"] + if log_event == "ConnectionReused": + connection_reused_data.append(log_data) if log_event == "NodeStart": node_start = True if log_event == "NodeFinished": @@ -50,3 +54,12 @@ def test_basic(project, logs_dir): if log_event == "TimingInfoCollected": assert "node_info" in log_data assert "timing_info" in log_data + + # windows doesn't have the same thread/connection flow so the ConnectionReused + # events don't show up + if os.name != "nt": + # Verify the ConnectionReused event occurs and has the right data + assert connection_reused_data + for data in connection_reused_data: + assert "conn_name" in data and data["conn_name"] + assert "orig_conn_name" in data and data["orig_conn_name"] From 43e24c5ae63cefdac4784b330948f98cba6ab68a Mon Sep 17 00:00:00 2001 From: David Bloss Date: Wed, 18 Jan 2023 11:23:13 -0600 Subject: [PATCH 117/156] update gh action set-output variables (#6635) * update gh action set-output variables * add changie file --- .../Under the Hood-20230117-111737.yaml | 6 ++++++ .github/_README.md | 20 +++++++++---------- .github/actions/latest-wrangler/main.py | 17 +++++++++------- .github/workflows/main.yml | 8 ++++++-- .github/workflows/release-docker.yml | 12 ++++++----- .github/workflows/release.yml | 2 +- .github/workflows/version-bump.yml | 2 +- 7 files changed, 41 insertions(+), 26 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230117-111737.yaml diff --git a/.changes/unreleased/Under the Hood-20230117-111737.yaml b/.changes/unreleased/Under the Hood-20230117-111737.yaml new file mode 100644 index 00000000000..126a25ea28a --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230117-111737.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Update deprecated github action command +time: 2023-01-17T11:17:37.046095-06:00 +custom: + Author: davidbloss + Issue: "6153" diff --git a/.github/_README.md b/.github/_README.md index 4da081fe2b6..f624fc5fec6 100644 --- a/.github/_README.md +++ b/.github/_README.md @@ -63,12 +63,12 @@ permissions: contents: read pull-requests: write ``` - + ### Secrets - When to use a [Personal Access Token (PAT)](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) vs the [GITHUB_TOKEN](https://docs.github.com/en/actions/security-guides/automatic-token-authentication) generated for the action? The `GITHUB_TOKEN` is used by default. In most cases it is sufficient for what you need. - + If you expect the workflow to result in a commit to that should retrigger workflows, you will need to use a Personal Access Token for the bot to commit the file. When using the GITHUB_TOKEN, the resulting commit will not trigger another GitHub Actions Workflow run. This is due to limitations set by GitHub. See [the docs](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#using-the-github_token-in-a-workflow) for a more detailed explanation. For example, we must use a PAT in our workflow to commit a new changelog yaml file for bot PRs. Once the file has been committed to the branch, it should retrigger the check to validate that a changelog exists on the PR. Otherwise, it would stay in a failed state since the check would never retrigger. @@ -105,7 +105,7 @@ Some triggers of note that we use: ``` # **what?** - # Describe what the action does. + # Describe what the action does. # **why?** # Why does this action exist? @@ -138,7 +138,7 @@ Some triggers of note that we use: id: fp run: | FILEPATH=.changes/unreleased/Dependencies-${{ steps.filename_time.outputs.time }}.yaml - echo "::set-output name=FILEPATH::$FILEPATH" + echo "FILEPATH=$FILEPATH" >> $GITHUB_OUTPUT ``` - Print out all variables you will reference as the first step of a job. This allows for easier debugging. The first job should log all inputs. Subsequent jobs should reference outputs of other jobs, if present. @@ -158,14 +158,14 @@ Some triggers of note that we use: echo "The build_script_path: ${{ inputs.build_script_path }}" echo "The s3_bucket_name: ${{ inputs.s3_bucket_name }}" echo "The package_test_command: ${{ inputs.package_test_command }}" - + # collect all the variables that need to be used in subsequent jobs - name: Set Variables id: variables run: | - echo "::set-output name=important_path::'performance/runner/Cargo.toml'" - echo "::set-output name=release_id::${{github.event.inputs.release_id}}" - echo "::set-output name=open_prs::${{github.event.inputs.open_prs}}" + echo "important_path='performance/runner/Cargo.toml'" >> $GITHUB_OUTPUT + echo "release_id=${{github.event.inputs.release_id}}" >> $GITHUB_OUTPUT + echo "open_prs=${{github.event.inputs.open_prs}}" >> $GITHUB_OUTPUT job2: needs: [job1] @@ -190,7 +190,7 @@ ___ ### Actions from the Marketplace - Don’t use external actions for things that can easily be accomplished manually. - Always read through what an external action does before using it! Often an action in the GitHub Actions Marketplace can be replaced with a few lines in bash. This is much more maintainable (and won’t change under us) and clear as to what’s actually happening. It also prevents any -- Pin actions _we don't control_ to tags. +- Pin actions _we don't control_ to tags. ### Connecting to AWS - Authenticate with the aws managed workflow @@ -208,7 +208,7 @@ ___ ```yaml - name: Copy Artifacts from S3 via CLI - run: aws s3 cp ${{ env.s3_bucket }} . --recursive + run: aws s3 cp ${{ env.s3_bucket }} . --recursive ``` ### Testing diff --git a/.github/actions/latest-wrangler/main.py b/.github/actions/latest-wrangler/main.py index 23e14cf5abe..db91cf8354b 100644 --- a/.github/actions/latest-wrangler/main.py +++ b/.github/actions/latest-wrangler/main.py @@ -28,11 +28,12 @@ if package_request.status_code == 404: if halt_on_missing: sys.exit(1) - else: - # everything is the latest if the package doesn't exist - print(f"::set-output name=latest::{True}") - print(f"::set-output name=minor_latest::{True}") - sys.exit(0) + # everything is the latest if the package doesn't exist + github_output = os.environ.get("GITHUB_OUTPUT") + with open(github_output, "at", encoding="utf-8") as gh_output: + gh_output.write("latest=True") + gh_output.write("minor_latest=True") + sys.exit(0) # TODO: verify package meta is "correct" # https://github.com/dbt-labs/dbt-core/issues/4640 @@ -91,5 +92,7 @@ def is_latest( latest = is_latest(pre_rel, new_version, current_latest) minor_latest = is_latest(pre_rel, new_version, current_minor_latest) - print(f"::set-output name=latest::{latest}") - print(f"::set-output name=minor_latest::{minor_latest}") + github_output = os.environ.get("GITHUB_OUTPUT") + with open(github_output, "at", encoding="utf-8") as gh_output: + gh_output.write(f"latest={latest}") + gh_output.write(f"minor_latest={minor_latest}") diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 8138b730d34..c8347f6b069 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -101,7 +101,9 @@ jobs: - name: Get current date if: always() id: date - run: echo "::set-output name=date::$(date +'%Y-%m-%dT%H_%M_%S')" #no colons allowed for artifacts + run: | + CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts + echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT - uses: actions/upload-artifact@v2 if: always() @@ -168,7 +170,9 @@ jobs: - name: Get current date if: always() id: date - run: echo "::set-output name=date::$(date +'%Y_%m_%dT%H_%M_%S')" #no colons allowed for artifacts + run: | + CURRENT_DATE=$(date +'%Y-%m-%dT%H_%M_%S') # no colons allowed for artifacts + echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT - uses: actions/upload-artifact@v2 if: always() diff --git a/.github/workflows/release-docker.yml b/.github/workflows/release-docker.yml index f47f110aeb1..f7b8dc29543 100644 --- a/.github/workflows/release-docker.yml +++ b/.github/workflows/release-docker.yml @@ -41,9 +41,9 @@ jobs: id: version run: | IFS="." read -r MAJOR MINOR PATCH <<< ${{ github.event.inputs.version_number }} - echo "::set-output name=major::$MAJOR" - echo "::set-output name=minor::$MINOR" - echo "::set-output name=patch::$PATCH" + echo "major=$MAJOR" >> $GITHUB_OUTPUT + echo "minor=$MINOR" >> $GITHUB_OUTPUT + echo "patch=$PATCH" >> $GITHUB_OUTPUT - name: Is pkg 'latest' id: latest @@ -70,8 +70,10 @@ jobs: - name: Get docker build arg id: build_arg run: | - echo "::set-output name=build_arg_name::"$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g') - echo "::set-output name=build_arg_value::"$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g') + BUILD_ARG_NAME=$(echo ${{ github.event.inputs.package }} | sed 's/\-/_/g') + BUILD_ARG_VALUE=$(echo ${{ github.event.inputs.package }} | sed 's/postgres/core/g') + echo "build_arg_name=$BUILD_ARG_NAME" >> $GITHUB_OUTPUT + echo "build_arg_value=$BUILD_ARG_VALUE" >> $GITHUB_OUTPUT - name: Log in to the GHCR uses: docker/login-action@v1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1abab3e5013..ade939b6ee3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -165,7 +165,7 @@ jobs: env: IS_PRERELEASE: ${{ contains(github.event.inputs.version_number, 'rc') || contains(github.event.inputs.version_number, 'b') }} run: | - echo ::set-output name=isPrerelease::$IS_PRERELEASE + echo "isPrerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT - name: Creating GitHub Release uses: softprops/action-gh-release@v1 diff --git a/.github/workflows/version-bump.yml b/.github/workflows/version-bump.yml index 1a5be6aefb1..2bbaf1cef82 100644 --- a/.github/workflows/version-bump.yml +++ b/.github/workflows/version-bump.yml @@ -65,7 +65,7 @@ jobs: - name: Set branch value id: variables run: | - echo "::set-output name=BRANCH_NAME::prep-release/${{ github.event.inputs.version_number }}_$GITHUB_RUN_ID" + echo "BRANCH_NAME=prep-release/${{ github.event.inputs.version_number }}_$GITHUB_RUN_ID" >> $GITHUB_OUTPUT - name: Create PR branch run: | From 0a03355cebdbf0553f8e0db6ed1f5248cf0bc2b8 Mon Sep 17 00:00:00 2001 From: Emily Rockman Date: Wed, 18 Jan 2023 14:16:34 -0600 Subject: [PATCH 118/156] update test matrix (#6604) --- .github/workflows/release-branch-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-branch-tests.yml b/.github/workflows/release-branch-tests.yml index 3b329f17b6c..bdd01aa495a 100644 --- a/.github/workflows/release-branch-tests.yml +++ b/.github/workflows/release-branch-tests.yml @@ -39,7 +39,7 @@ jobs: max-parallel: 1 fail-fast: false matrix: - branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, main] + branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, 1.4.latest, main] steps: - name: Call CI workflow for ${{ matrix.branch }} branch From 066346faa25c8598730f58054df5d731e353017b Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Wed, 18 Jan 2023 22:37:50 +0100 Subject: [PATCH 119/156] convert 038_caching_tests (#6612) * convert 038_caching_tests * Adapt for dbt-snowflake * PR feedback * Reformat --- .../038_caching_tests/test_caching.py | 67 ------------ .../dbt/tests/adapter/caching/test_caching.py | 103 ++++++++++++++++++ 2 files changed, 103 insertions(+), 67 deletions(-) delete mode 100644 test/integration/038_caching_tests/test_caching.py create mode 100644 tests/adapter/dbt/tests/adapter/caching/test_caching.py diff --git a/test/integration/038_caching_tests/test_caching.py b/test/integration/038_caching_tests/test_caching.py deleted file mode 100644 index 1967e912628..00000000000 --- a/test/integration/038_caching_tests/test_caching.py +++ /dev/null @@ -1,67 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -from dbt.adapters.factory import FACTORY - -class TestBaseCaching(DBTIntegrationTest): - @property - def schema(self): - return "caching_038" - - @property - def project_config(self): - return { - 'config-version': 2, - 'quoting': { - 'identifier': False, - 'schema': False, - } - } - - def run_and_get_adapter(self): - # we want to inspect the adapter that dbt used for the run, which is - # not self.adapter. You can't do this until after you've run dbt once. - self.run_dbt(['run']) - return FACTORY.adapters[self.adapter_type] - - def cache_run(self): - adapter = self.run_and_get_adapter() - self.assertEqual(len(adapter.cache.relations), 1) - relation = next(iter(adapter.cache.relations.values())) - self.assertEqual(relation.inner.schema, self.unique_schema()) - self.assertEqual(relation.schema, self.unique_schema().lower()) - - self.run_dbt(['run']) - self.assertEqual(len(adapter.cache.relations), 1) - second_relation = next(iter(adapter.cache.relations.values())) - self.assertEqual(relation, second_relation) - -class TestCachingLowercaseModel(TestBaseCaching): - @property - def models(self): - return "models" - - @use_profile('postgres') - def test_postgres_cache(self): - self.cache_run() - -class TestCachingUppercaseModel(TestBaseCaching): - @property - def models(self): - return "shouting_models" - - @use_profile('postgres') - def test_postgres_cache(self): - self.cache_run() - -class TestCachingSelectedSchemaOnly(TestBaseCaching): - @property - def models(self): - return "models_multi_schemas" - - def run_and_get_adapter(self): - # select only the 'model' in the default schema - self.run_dbt(['--cache-selected-only', 'run', '--select', 'model']) - return FACTORY.adapters[self.adapter_type] - - @use_profile('postgres') - def test_postgres_cache(self): - self.cache_run() diff --git a/tests/adapter/dbt/tests/adapter/caching/test_caching.py b/tests/adapter/dbt/tests/adapter/caching/test_caching.py new file mode 100644 index 00000000000..9cf02309c4c --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/caching/test_caching.py @@ -0,0 +1,103 @@ +import pytest + +from dbt.tests.util import run_dbt + +model_sql = """ +{{ + config( + materialized='table' + ) +}} +select 1 as id +""" + +another_schema_model_sql = """ +{{ + config( + materialized='table', + schema='another_schema' + ) +}} +select 1 as id +""" + + +class BaseCachingTest: + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "config-version": 2, + "quoting": { + "identifier": False, + "schema": False, + }, + } + + def run_and_inspect_cache(self, project, run_args=None): + run_dbt(run_args) + + # the cache was empty at the start of the run. + # the model materialization returned an unquoted relation and added to the cache. + adapter = project.adapter + assert len(adapter.cache.relations) == 1 + relation = list(adapter.cache.relations).pop() + assert relation.schema == project.test_schema + assert relation.schema == project.test_schema.lower() + + # on the second run, dbt will find a relation in the database during cache population. + # this relation will be quoted, because list_relations_without_caching (by default) uses + # quote_policy = {"database": True, "schema": True, "identifier": True} + # when adding relations to the cache. + run_dbt(run_args) + adapter = project.adapter + assert len(adapter.cache.relations) == 1 + second_relation = list(adapter.cache.relations).pop() + + # perform a case-insensitive + quote-insensitive comparison + for key in ["database", "schema", "identifier"]: + assert getattr(relation, key).lower() == getattr(second_relation, key).lower() + + def test_cache(self, project): + self.run_and_inspect_cache(project, run_args=["run"]) + + +class BaseCachingLowercaseModel(BaseCachingTest): + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": model_sql, + } + + +class BaseCachingUppercaseModel(BaseCachingTest): + @pytest.fixture(scope="class") + def models(self): + return { + "MODEL.sql": model_sql, + } + + +class BaseCachingSelectedSchemaOnly(BaseCachingTest): + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": model_sql, + "another_schema_model.sql": another_schema_model_sql, + } + + def test_cache(self, project): + # this should only cache the schema containing the selected model + run_args = ["--cache-selected-only", "run", "--select", "model"] + self.run_and_inspect_cache(project, run_args) + + +class TestCachingLowerCaseModel(BaseCachingLowercaseModel): + pass + + +class TestCachingUppercaseModel(BaseCachingUppercaseModel): + pass + + +class TestCachingSelectedSchemaOnly(BaseCachingSelectedSchemaOnly): + pass From fa7c4d19f0c4f8c76ece09c14105561c88eec503 Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Thu, 19 Jan 2023 09:34:08 +0100 Subject: [PATCH 120/156] Respect quoting config in dbt-py models (#6620) * Respect quoting for 'this' in dbt-py models #6619 * Respect quoting for ref/source in dbt-py models #6103 * Add changelog entries --- .changes/unreleased/Fixes-20230116-123645.yaml | 6 ++++++ .changes/unreleased/Fixes-20230116-123709.yaml | 6 ++++++ core/dbt/compilation.py | 9 --------- .../macros/python_model/python.sql | 17 +++++++++-------- 4 files changed, 21 insertions(+), 17 deletions(-) create mode 100644 .changes/unreleased/Fixes-20230116-123645.yaml create mode 100644 .changes/unreleased/Fixes-20230116-123709.yaml diff --git a/.changes/unreleased/Fixes-20230116-123645.yaml b/.changes/unreleased/Fixes-20230116-123645.yaml new file mode 100644 index 00000000000..ee15803a297 --- /dev/null +++ b/.changes/unreleased/Fixes-20230116-123645.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Respect quoting config for dbt.ref() + dbt.source() in dbt-py models +time: 2023-01-16T12:36:45.63092+01:00 +custom: + Author: jtcohen6 + Issue: "6103" diff --git a/.changes/unreleased/Fixes-20230116-123709.yaml b/.changes/unreleased/Fixes-20230116-123709.yaml new file mode 100644 index 00000000000..56788519d0a --- /dev/null +++ b/.changes/unreleased/Fixes-20230116-123709.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Respect quoting config for dbt.this() in dbt-py models +time: 2023-01-16T12:37:09.000659+01:00 +custom: + Author: jtcohen6 + Issue: "6619" diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 19e603b6312..c0237b0a993 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -351,13 +351,6 @@ def _compile_node( ) if node.language == ModelLanguage.python: - # TODO could we also 'minify' this code at all? just aesthetic, not functional - - # quoating seems like something very specific to sql so far - # for all python implementations we are seeing there's no quating. - # TODO try to find better way to do this, given that - original_quoting = self.config.quoting - self.config.quoting = {key: False for key in original_quoting.keys()} context = self._create_node_context(node, manifest, extra_context) postfix = jinja.get_rendered( @@ -367,8 +360,6 @@ def _compile_node( ) # we should NOT jinja render the python model's 'raw code' node.compiled_code = f"{node.raw_code}\n\n{postfix}" - # restore quoting settings in the end since context is lazy evaluated - self.config.quoting = original_quoting else: context = self._create_node_context(node, manifest, extra_context) diff --git a/core/dbt/include/global_project/macros/python_model/python.sql b/core/dbt/include/global_project/macros/python_model/python.sql index c56ff7f31c8..64da81ae646 100644 --- a/core/dbt/include/global_project/macros/python_model/python.sql +++ b/core/dbt/include/global_project/macros/python_model/python.sql @@ -3,7 +3,7 @@ {%- set ref_dict = {} -%} {%- for _ref in model.refs -%} {%- set resolved = ref(*_ref) -%} - {%- do ref_dict.update({_ref | join("."): resolved.quote(database=False, schema=False, identifier=False) | string}) -%} + {%- do ref_dict.update({_ref | join("."): resolved | string | replace('"', '\"')}) -%} {%- endfor -%} def ref(*args,dbt_load_df_function): @@ -18,7 +18,7 @@ def ref(*args,dbt_load_df_function): {%- set source_dict = {} -%} {%- for _source in model.sources -%} {%- set resolved = source(*_source) -%} - {%- do source_dict.update({_source | join("."): resolved.quote(database=False, schema=False, identifier=False) | string}) -%} + {%- do source_dict.update({_source | join("."): resolved | string | replace('"', '\"')}) -%} {%- endfor -%} def source(*args, dbt_load_df_function): @@ -33,8 +33,8 @@ def source(*args, dbt_load_df_function): {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %} {%- for key, default in config_dbt_used -%} {# weird type testing with enum, would be much easier to write this logic in Python! #} - {%- if key == 'language' -%} - {%- set value = 'python' -%} + {%- if key == "language" -%} + {%- set value = "python" -%} {%- endif -%} {%- set value = model.config.get(key, default) -%} {%- do config_dict.update({key: value}) -%} @@ -62,11 +62,12 @@ class config: class this: """dbt.this() or dbt.this.identifier""" - database = '{{ this.database }}' - schema = '{{ this.schema }}' - identifier = '{{ this.identifier }}' + database = "{{ this.database }}" + schema = "{{ this.schema }}" + identifier = "{{ this.identifier }}" + {% set this_relation_name = this | string | replace('"', '\\"') %} def __repr__(self): - return '{{ this }}' + return "{{ this_relation_name }}" class dbtObj: From b05582de39371509282844fde5c3e7c2bce824c9 Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Thu, 19 Jan 2023 10:12:59 +0100 Subject: [PATCH 121/156] mv `on_schema_change` tests -> "adapter zone" (#6618) * Mv incremental on_schema_change tests to 'adapter zone' * Use type_string() * Cleanup --- .../tests/adapter/incremental}/fixtures.py | 105 ++---------------- .../test_incremental_on_schema_change.py} | 36 ++---- 2 files changed, 16 insertions(+), 125 deletions(-) rename tests/{functional/incremental_schema_tests => adapter/dbt/tests/adapter/incremental}/fixtures.py (73%) rename tests/{functional/incremental_schema_tests/test_incremental_schema.py => adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py} (76%) diff --git a/tests/functional/incremental_schema_tests/fixtures.py b/tests/adapter/dbt/tests/adapter/incremental/fixtures.py similarity index 73% rename from tests/functional/incremental_schema_tests/fixtures.py rename to tests/adapter/dbt/tests/adapter/incremental/fixtures.py index b80bea45e80..6e130266df2 100644 --- a/tests/functional/incremental_schema_tests/fixtures.py +++ b/tests/adapter/dbt/tests/adapter/incremental/fixtures.py @@ -1,60 +1,3 @@ -# -# Properties -# -_PROPERTIES__SCHEMA = """ -version: 2 - -models: - - name: model_a - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_ignore_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_append_new_columns_target - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns - columns: - - name: id - tags: [column_level_tag] - tests: - - unique - - - name: incremental_sync_all_columns_target - columns: - - name: id - tags: [column_leveL_tag] - tests: - - unique -""" - # # Models # @@ -70,7 +13,7 @@ WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} {% if is_incremental() %} @@ -123,7 +66,7 @@ ) -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} select id ,cast(field1 as {{string_type}}) as field1 @@ -184,7 +127,7 @@ WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} {% if is_incremental() %} @@ -215,7 +158,7 @@ ) }} -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) @@ -267,7 +210,7 @@ config(materialized='table') }} -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} with source_data as ( @@ -293,7 +236,7 @@ ) }} -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} WITH source_data AS (SELECT * FROM {{ ref('model_a') }} ) @@ -327,7 +270,7 @@ ) -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} select id ,cast(field1 as {{string_type}}) as field1 @@ -344,7 +287,7 @@ config(materialized='table') }} -{% set string_type = 'varchar(10)' %} +{% set string_type = dbt.type_string() %} with source_data as ( @@ -360,35 +303,3 @@ from source_data """ - -# -# Tests -# - -_TESTS__SELECT_FROM_INCREMENTAL_IGNORE = """ -select * from {{ ref('incremental_ignore') }} where false -""" - -_TESTS__SELECT_FROM_A = """ -select * from {{ ref('model_a') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET = """ -select * from {{ ref('incremental_append_new_columns_target') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS = """ -select * from {{ ref('incremental_sync_all_columns') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET = """ -select * from {{ ref('incremental_sync_all_columns_target') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET = """ -select * from {{ ref('incremental_ignore_target') }} where false -""" - -_TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS = """ -select * from {{ ref('incremental_append_new_columns') }} where false -""" diff --git a/tests/functional/incremental_schema_tests/test_incremental_schema.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py similarity index 76% rename from tests/functional/incremental_schema_tests/test_incremental_schema.py rename to tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py index 8203f497331..4fbefbe7651 100644 --- a/tests/functional/incremental_schema_tests/test_incremental_schema.py +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_on_schema_change.py @@ -5,8 +5,7 @@ run_dbt, ) -from tests.functional.incremental_schema_tests.fixtures import ( - _PROPERTIES__SCHEMA, +from dbt.tests.adapter.incremental.fixtures import ( _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY, _MODELS__INCREMENTAL_IGNORE, _MODELS__INCREMENTAL_SYNC_REMOVE_ONLY_TARGET, @@ -19,23 +18,10 @@ _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS, _MODELS__INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, - _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, - _TESTS__SELECT_FROM_A, - _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, - _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, - _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, - _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, - _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, ) -class TestIncrementalSchemaChange: - @pytest.fixture(scope="class") - def properties(self): - return { - "schema.yml": _PROPERTIES__SCHEMA, - } - +class BaseIncrementalOnSchemaChangeSetup: @pytest.fixture(scope="class") def models(self): return { @@ -53,18 +39,6 @@ def models(self): "incremental_append_new_columns_remove_one_target.sql": _MODELS__INCREMENTAL_APPEND_NEW_COLUMNS_REMOVE_ONE_TARGET, } - @pytest.fixture(scope="class") - def tests(self): - return { - "select_from_incremental.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE, - "select_from_a.sql": _TESTS__SELECT_FROM_A, - "select_from_incremental_append_new_columns_target.sql": _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS_TARGET, - "select_from_incremental_sync_all_columns.sql": _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS, - "select_from_incremental_sync_all_columns_target.sql": _TESTS__SELECT_FROM_INCREMENTAL_SYNC_ALL_COLUMNS_TARGET, - "select_from_incremental_ignore_target.sql": _TESTS__SELECT_FROM_INCREMENTAL_IGNORE_TARGET, - "select_from_incremental_append_new_columns.sql": _TESTS__SELECT_FROM_INCREMENTAL_APPEND_NEW_COLUMNS, - } - def run_twice_and_assert(self, include, compare_source, compare_target, project): # dbt run (twice) @@ -103,6 +77,8 @@ def run_incremental_sync_remove_only(self, project): compare_target = "incremental_sync_remove_only_target" self.run_twice_and_assert(select, compare_source, compare_target, project) + +class BaseIncrementalOnSchemaChange(BaseIncrementalOnSchemaChangeSetup): def test_run_incremental_ignore(self, project): select = "model_a incremental_ignore incremental_ignore_target" compare_source = "incremental_ignore" @@ -122,3 +98,7 @@ def test_run_incremental_fail_on_schema_change(self, project): run_dbt(["run", "--models", select, "--full-refresh"]) results_two = run_dbt(["run", "--models", select], expect_pass=False) assert "Compilation Error" in results_two[1].message + + +class TestIncrementalOnSchemaChange(BaseIncrementalOnSchemaChange): + pass From 07a004b3013f159cfd2da2509dd71ce3fd4b86fa Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Thu, 19 Jan 2023 11:00:09 +0100 Subject: [PATCH 122/156] convert 062_defer_state_tests (#6616) * Fix --favor-state flag * Convert 062_defer_state_tests * Revert "Fix --favor-state flag" This reverts commit ccbdcbad98b26822629364e6fdbd2780db0c20d3. * Reformat * Revert "Revert "Fix --favor-state flag"" This reverts commit fa9d2a09d693b1870bd724a694fce2883748c987. --- core/dbt/main.py | 12 +- core/dbt/task/compile.py | 1 + .../changed_models/ephemeral_model.sql | 2 - .../changed_models/schema.yml | 9 - .../changed_models/table_model.sql | 5 - .../changed_models/view_model.sql | 1 - .../changed_models_bad/ephemeral_model.sql | 2 - .../changed_models_bad/schema.yml | 9 - .../changed_models_bad/table_model.sql | 5 - .../changed_models_bad/view_model.sql | 1 - .../changed_models_missing/schema.yml | 9 - .../changed_models_missing/table_model.sql | 2 - .../changed_models_missing/view_model.sql | 1 - .../macros/infinite_macros.sql | 13 - .../062_defer_state_tests/macros/macros.sql | 3 - .../models/ephemeral_model.sql | 2 - .../models/exposures.yml | 8 - .../062_defer_state_tests/models/schema.yml | 10 - .../models/table_model.sql | 5 - .../models/view_model.sql | 4 - .../previous_state/manifest.json | 6 - .../062_defer_state_tests/seeds/seed.csv | 3 - .../snapshots/my_snapshot.sql | 14 - .../062_defer_state_tests/test_defer_state.py | 354 ------------- .../test_modified_state.py | 211 -------- .../test_run_results_state.py | 434 --------------- tests/functional/defer_state/fixtures.py | 101 ++++ .../defer_state/test_defer_state.py | 273 ++++++++++ .../defer_state/test_modified_state.py | 263 ++++++++++ .../defer_state/test_run_results_state.py | 494 ++++++++++++++++++ 30 files changed, 1135 insertions(+), 1122 deletions(-) delete mode 100644 test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models/schema.yml delete mode 100644 test/integration/062_defer_state_tests/changed_models/table_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models/view_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_bad/schema.yml delete mode 100644 test/integration/062_defer_state_tests/changed_models_bad/table_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_bad/view_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_missing/schema.yml delete mode 100644 test/integration/062_defer_state_tests/changed_models_missing/table_model.sql delete mode 100644 test/integration/062_defer_state_tests/changed_models_missing/view_model.sql delete mode 100644 test/integration/062_defer_state_tests/macros/infinite_macros.sql delete mode 100644 test/integration/062_defer_state_tests/macros/macros.sql delete mode 100644 test/integration/062_defer_state_tests/models/ephemeral_model.sql delete mode 100644 test/integration/062_defer_state_tests/models/exposures.yml delete mode 100644 test/integration/062_defer_state_tests/models/schema.yml delete mode 100644 test/integration/062_defer_state_tests/models/table_model.sql delete mode 100644 test/integration/062_defer_state_tests/models/view_model.sql delete mode 100644 test/integration/062_defer_state_tests/previous_state/manifest.json delete mode 100644 test/integration/062_defer_state_tests/seeds/seed.csv delete mode 100644 test/integration/062_defer_state_tests/snapshots/my_snapshot.sql delete mode 100644 test/integration/062_defer_state_tests/test_defer_state.py delete mode 100644 test/integration/062_defer_state_tests/test_modified_state.py delete mode 100644 test/integration/062_defer_state_tests/test_run_results_state.py create mode 100644 tests/functional/defer_state/fixtures.py create mode 100644 tests/functional/defer_state/test_defer_state.py create mode 100644 tests/functional/defer_state/test_modified_state.py create mode 100644 tests/functional/defer_state/test_run_results_state.py diff --git a/core/dbt/main.py b/core/dbt/main.py index 8368ab9f723..a6c6f0b013d 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -486,7 +486,7 @@ def _build_snapshot_subparser(subparsers, base_subparser): return sub -def _add_defer_argument(*subparsers): +def _add_defer_arguments(*subparsers): for sub in subparsers: sub.add_optional_argument_inverse( "--defer", @@ -499,10 +499,6 @@ def _add_defer_argument(*subparsers): """, default=flags.DEFER_MODE, ) - - -def _add_favor_state_argument(*subparsers): - for sub in subparsers: sub.add_optional_argument_inverse( "--favor-state", enable_help=""" @@ -580,7 +576,7 @@ def _build_docs_generate_subparser(subparsers, base_subparser): Do not run "dbt compile" as part of docs generation """, ) - _add_defer_argument(generate_sub) + _add_defer_arguments(generate_sub) return generate_sub @@ -1192,9 +1188,7 @@ def parse_args(args, cls=DBTArgumentParser): # list_sub sets up its own arguments. _add_selection_arguments(run_sub, compile_sub, generate_sub, test_sub, snapshot_sub, seed_sub) # --defer - _add_defer_argument(run_sub, test_sub, build_sub, snapshot_sub, compile_sub) - # --favor-state - _add_favor_state_argument(run_sub, test_sub, build_sub, snapshot_sub) + _add_defer_arguments(run_sub, test_sub, build_sub, snapshot_sub, compile_sub) # --full-refresh _add_table_mutability_arguments(run_sub, compile_sub, build_sub) diff --git a/core/dbt/task/compile.py b/core/dbt/task/compile.py index 995063491f6..7d2bc0482db 100644 --- a/core/dbt/task/compile.py +++ b/core/dbt/task/compile.py @@ -83,6 +83,7 @@ def defer_to_manifest(self, adapter, selected_uids: AbstractSet[str]): adapter=adapter, other=deferred_manifest, selected=selected_uids, + favor_state=bool(self.args.favor_state), ) # TODO: is it wrong to write the manifest here? I think it's right... self.write_manifest() diff --git a/test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql b/test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql deleted file mode 100644 index 2f976e3a9b5..00000000000 --- a/test/integration/062_defer_state_tests/changed_models/ephemeral_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='ephemeral') }} -select * from {{ ref('view_model') }} diff --git a/test/integration/062_defer_state_tests/changed_models/schema.yml b/test/integration/062_defer_state_tests/changed_models/schema.yml deleted file mode 100644 index 1ec506d3d19..00000000000 --- a/test/integration/062_defer_state_tests/changed_models/schema.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 2 -models: - - name: view_model - columns: - - name: id - tests: - - unique - - not_null - - name: name diff --git a/test/integration/062_defer_state_tests/changed_models/table_model.sql b/test/integration/062_defer_state_tests/changed_models/table_model.sql deleted file mode 100644 index 65909318bab..00000000000 --- a/test/integration/062_defer_state_tests/changed_models/table_model.sql +++ /dev/null @@ -1,5 +0,0 @@ -{{ config(materialized='table') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} \ No newline at end of file diff --git a/test/integration/062_defer_state_tests/changed_models/view_model.sql b/test/integration/062_defer_state_tests/changed_models/view_model.sql deleted file mode 100644 index bddbbb23cc2..00000000000 --- a/test/integration/062_defer_state_tests/changed_models/view_model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from no.such.table diff --git a/test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql b/test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql deleted file mode 100644 index 5155dfa475e..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_bad/ephemeral_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='ephemeral') }} -select * from no.such.table diff --git a/test/integration/062_defer_state_tests/changed_models_bad/schema.yml b/test/integration/062_defer_state_tests/changed_models_bad/schema.yml deleted file mode 100644 index 1ec506d3d19..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_bad/schema.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 2 -models: - - name: view_model - columns: - - name: id - tests: - - unique - - not_null - - name: name diff --git a/test/integration/062_defer_state_tests/changed_models_bad/table_model.sql b/test/integration/062_defer_state_tests/changed_models_bad/table_model.sql deleted file mode 100644 index 65909318bab..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_bad/table_model.sql +++ /dev/null @@ -1,5 +0,0 @@ -{{ config(materialized='table') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} \ No newline at end of file diff --git a/test/integration/062_defer_state_tests/changed_models_bad/view_model.sql b/test/integration/062_defer_state_tests/changed_models_bad/view_model.sql deleted file mode 100644 index bddbbb23cc2..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_bad/view_model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from no.such.table diff --git a/test/integration/062_defer_state_tests/changed_models_missing/schema.yml b/test/integration/062_defer_state_tests/changed_models_missing/schema.yml deleted file mode 100644 index 1ec506d3d19..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_missing/schema.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 2 -models: - - name: view_model - columns: - - name: id - tests: - - unique - - not_null - - name: name diff --git a/test/integration/062_defer_state_tests/changed_models_missing/table_model.sql b/test/integration/062_defer_state_tests/changed_models_missing/table_model.sql deleted file mode 100644 index 22b040d2c8b..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_missing/table_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='table') }} -select 1 as fun diff --git a/test/integration/062_defer_state_tests/changed_models_missing/view_model.sql b/test/integration/062_defer_state_tests/changed_models_missing/view_model.sql deleted file mode 100644 index 4b91aa0f2fa..00000000000 --- a/test/integration/062_defer_state_tests/changed_models_missing/view_model.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('seed') }} diff --git a/test/integration/062_defer_state_tests/macros/infinite_macros.sql b/test/integration/062_defer_state_tests/macros/infinite_macros.sql deleted file mode 100644 index 81d2083d3bb..00000000000 --- a/test/integration/062_defer_state_tests/macros/infinite_macros.sql +++ /dev/null @@ -1,13 +0,0 @@ -{# trigger infinite recursion if not handled #} - -{% macro my_infinitely_recursive_macro() %} - {{ return(adapter.dispatch('my_infinitely_recursive_macro')()) }} -{% endmacro %} - -{% macro default__my_infinitely_recursive_macro() %} - {% if unmet_condition %} - {{ my_infinitely_recursive_macro() }} - {% else %} - {{ return('') }} - {% endif %} -{% endmacro %} diff --git a/test/integration/062_defer_state_tests/macros/macros.sql b/test/integration/062_defer_state_tests/macros/macros.sql deleted file mode 100644 index 79519c1b60b..00000000000 --- a/test/integration/062_defer_state_tests/macros/macros.sql +++ /dev/null @@ -1,3 +0,0 @@ -{% macro my_macro() %} - {% do log('in a macro' ) %} -{% endmacro %} diff --git a/test/integration/062_defer_state_tests/models/ephemeral_model.sql b/test/integration/062_defer_state_tests/models/ephemeral_model.sql deleted file mode 100644 index 2f976e3a9b5..00000000000 --- a/test/integration/062_defer_state_tests/models/ephemeral_model.sql +++ /dev/null @@ -1,2 +0,0 @@ -{{ config(materialized='ephemeral') }} -select * from {{ ref('view_model') }} diff --git a/test/integration/062_defer_state_tests/models/exposures.yml b/test/integration/062_defer_state_tests/models/exposures.yml deleted file mode 100644 index 489dec3c3c4..00000000000 --- a/test/integration/062_defer_state_tests/models/exposures.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 -exposures: - - name: my_exposure - type: application - depends_on: - - ref('view_model') - owner: - email: test@example.com diff --git a/test/integration/062_defer_state_tests/models/schema.yml b/test/integration/062_defer_state_tests/models/schema.yml deleted file mode 100644 index 342335148bf..00000000000 --- a/test/integration/062_defer_state_tests/models/schema.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 -models: - - name: view_model - columns: - - name: id - tests: - - unique: - severity: error - - not_null - - name: name diff --git a/test/integration/062_defer_state_tests/models/table_model.sql b/test/integration/062_defer_state_tests/models/table_model.sql deleted file mode 100644 index 65909318bab..00000000000 --- a/test/integration/062_defer_state_tests/models/table_model.sql +++ /dev/null @@ -1,5 +0,0 @@ -{{ config(materialized='table') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} \ No newline at end of file diff --git a/test/integration/062_defer_state_tests/models/view_model.sql b/test/integration/062_defer_state_tests/models/view_model.sql deleted file mode 100644 index 72cb07a5ef4..00000000000 --- a/test/integration/062_defer_state_tests/models/view_model.sql +++ /dev/null @@ -1,4 +0,0 @@ -select * from {{ ref('seed') }} - --- establish a macro dependency that trips infinite recursion if not handled --- depends on: {{ my_infinitely_recursive_macro() }} \ No newline at end of file diff --git a/test/integration/062_defer_state_tests/previous_state/manifest.json b/test/integration/062_defer_state_tests/previous_state/manifest.json deleted file mode 100644 index 6ab63f3f563..00000000000 --- a/test/integration/062_defer_state_tests/previous_state/manifest.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "metadata": { - "dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v3.json", - "dbt_version": "0.21.1" - } -} diff --git a/test/integration/062_defer_state_tests/seeds/seed.csv b/test/integration/062_defer_state_tests/seeds/seed.csv deleted file mode 100644 index 1a728c8ab74..00000000000 --- a/test/integration/062_defer_state_tests/seeds/seed.csv +++ /dev/null @@ -1,3 +0,0 @@ -id,name -1,Alice -2,Bob diff --git a/test/integration/062_defer_state_tests/snapshots/my_snapshot.sql b/test/integration/062_defer_state_tests/snapshots/my_snapshot.sql deleted file mode 100644 index 6a7d2b31bfa..00000000000 --- a/test/integration/062_defer_state_tests/snapshots/my_snapshot.sql +++ /dev/null @@ -1,14 +0,0 @@ -{% snapshot my_cool_snapshot %} - - {{ - config( - target_database=database, - target_schema=schema, - unique_key='id', - strategy='check', - check_cols=['id'], - ) - }} - select * from {{ ref('view_model') }} - -{% endsnapshot %} diff --git a/test/integration/062_defer_state_tests/test_defer_state.py b/test/integration/062_defer_state_tests/test_defer_state.py deleted file mode 100644 index 593dc034036..00000000000 --- a/test/integration/062_defer_state_tests/test_defer_state.py +++ /dev/null @@ -1,354 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import copy -import json -import os -import shutil - -import pytest -import dbt.exceptions - - -class TestDeferState(DBTIntegrationTest): - @property - def schema(self): - return "defer_state_062" - - @property - def models(self): - return "models" - - def setUp(self): - self.other_schema = None - super().setUp() - self._created_schemas.add(self.other_schema) - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'test': { - 'quote_columns': False, - } - } - } - - def get_profile(self, adapter_type): - if self.other_schema is None: - self.other_schema = self.unique_schema() + '_other' - profile = super().get_profile(adapter_type) - default_name = profile['test']['target'] - profile['test']['outputs']['otherschema'] = copy.deepcopy(profile['test']['outputs'][default_name]) - profile['test']['outputs']['otherschema']['schema'] = self.other_schema - return profile - - def copy_state(self): - assert not os.path.exists('state') - os.makedirs('state') - shutil.copyfile('target/manifest.json', 'state/manifest.json') - - def run_and_compile_defer(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['run']) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['test']) - assert len(results) == 2 - - # copy files - self.copy_state() - - # defer test, it succeeds - results, success = self.run_dbt_and_check(['compile', '--state', 'state', '--defer']) - self.assertEqual(len(results.results), 6) - self.assertEqual(results.results[0].node.name, "seed") - self.assertTrue(success) - - def run_and_snapshot_defer(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['run']) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['test']) - assert len(results) == 2 - - # snapshot succeeds without --defer - results = self.run_dbt(['snapshot']) - - # no state, snapshot fails - with pytest.raises(dbt.exceptions.DbtRuntimeError): - results = self.run_dbt(['snapshot', '--state', 'state', '--defer']) - - # copy files - self.copy_state() - - # defer test, it succeeds - results = self.run_dbt(['snapshot', '--state', 'state', '--defer']) - - # favor_state test, it succeeds - results = self.run_dbt(['snapshot', '--state', 'state', '--defer', '--favor-state']) - - def run_and_defer(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['run']) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['test']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - # test tests first, because run will change things - # no state, wrong schema, failure. - self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False) - - # test generate docs - # no state, wrong schema, empty nodes - catalog = self.run_dbt(['docs','generate','--target', 'otherschema']) - assert not catalog.nodes - - # no state, run also fails - self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False) - - # defer test, it succeeds - results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--target', 'otherschema']) - - # defer docs generate with state, catalog refers schema from the happy times - catalog = self.run_dbt(['docs','generate', '-m', 'view_model+', '--state', 'state', '--defer','--target', 'otherschema']) - assert self.other_schema not in catalog.nodes["seed.test.seed"].metadata.schema - assert self.unique_schema() in catalog.nodes["seed.test.seed"].metadata.schema - - # with state it should work though - results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema']) - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - with open('target/manifest.json') as fp: - data = json.load(fp) - assert data['nodes']['seed.test.seed']['deferred'] - - assert len(results) == 1 - - def run_and_defer_favor_state(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['run']) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = self.run_dbt(['test']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - # test tests first, because run will change things - # no state, wrong schema, failure. - self.run_dbt(['test', '--target', 'otherschema'], expect_pass=False) - - # no state, run also fails - self.run_dbt(['run', '--target', 'otherschema'], expect_pass=False) - - # defer test, it succeeds - results = self.run_dbt(['test', '-m', 'view_model+', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) - - # with state it should work though - results = self.run_dbt(['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - with open('target/manifest.json') as fp: - data = json.load(fp) - assert data['nodes']['seed.test.seed']['deferred'] - - assert len(results) == 1 - - def run_switchdirs_defer(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - self.use_default_project({'model-paths': ['changed_models']}) - # the sql here is just wrong, so it should fail - self.run_dbt( - ['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'], - expect_pass=False, - ) - # but this should work since we just use the old happy model - self.run_dbt( - ['run', '-m', 'table_model', '--state', 'state', '--defer', '--target', 'otherschema'], - expect_pass=True, - ) - - self.use_default_project({'model-paths': ['changed_models_bad']}) - # this should fail because the table model refs a broken ephemeral - # model, which it should see - self.run_dbt( - ['run', '-m', 'table_model', '--state', 'state', '--defer', '--target', 'otherschema'], - expect_pass=False, - ) - - def run_switchdirs_defer_favor_state(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - self.use_default_project({'model-paths': ['changed_models']}) - # the sql here is just wrong, so it should fail - self.run_dbt( - ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], - expect_pass=False, - ) - # but this should work since we just use the old happy model - self.run_dbt( - ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], - expect_pass=True, - ) - - self.use_default_project({'model-paths': ['changed_models_bad']}) - # this should fail because the table model refs a broken ephemeral - # model, which it should see - self.run_dbt( - ['run', '-m', 'table_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], - expect_pass=False, - ) - - def run_defer_iff_not_exists(self): - results = self.run_dbt(['seed', '--target', 'otherschema']) - assert len(results) == 1 - results = self.run_dbt(['run', '--target', 'otherschema']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run', '--state', 'state', '--defer']) - assert len(results) == 2 - - # because the seed now exists in our schema, we shouldn't defer it - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - def run_defer_iff_not_exists_favor_state(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema']) - assert len(results) == 2 - - # because the seed exists in other schema, we should defer it - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - def run_defer_deleted_upstream(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - self.use_default_project({'model-paths': ['changed_models_missing']}) - # ephemeral_model is now gone. previously this caused a - # keyerror (dbt#2875), now it should pass - self.run_dbt( - ['run', '-m', 'view_model', '--state', 'state', '--defer', '--target', 'otherschema'], - expect_pass=True, - ) - - # despite deferral, test should use models just created in our schema - results = self.run_dbt(['test', '--state', 'state', '--defer']) - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - def run_defer_deleted_upstream_favor_state(self): - results = self.run_dbt(['seed']) - assert len(results) == 1 - results = self.run_dbt(['run']) - assert len(results) == 2 - - # copy files over from the happy times when we had a good target - self.copy_state() - - self.use_default_project({'model-paths': ['changed_models_missing']}) - - self.run_dbt( - ['run', '-m', 'view_model', '--state', 'state', '--defer', '--favor-state', '--target', 'otherschema'], - expect_pass=True, - ) - - # despite deferral, test should use models just created in our schema - results = self.run_dbt(['test', '--state', 'state', '--defer', '--favor-state']) - assert self.other_schema not in results[0].node.compiled_code - assert self.unique_schema() in results[0].node.compiled_code - - @use_profile('postgres') - def test_postgres_state_changetarget(self): - self.run_and_defer() - - # make sure these commands don't work with --defer - with pytest.raises(SystemExit): - self.run_dbt(['seed', '--defer']) - - @use_profile('postgres') - def test_postgres_state_changetarget_favor_state(self): - self.run_and_defer_favor_state() - - # make sure these commands don't work with --defer - with pytest.raises(SystemExit): - self.run_dbt(['seed', '--defer']) - - @use_profile('postgres') - def test_postgres_state_changedir(self): - self.run_switchdirs_defer() - - @use_profile('postgres') - def test_postgres_state_changedir_favor_state(self): - self.run_switchdirs_defer_favor_state() - - @use_profile('postgres') - def test_postgres_state_defer_iffnotexists(self): - self.run_defer_iff_not_exists() - - @use_profile('postgres') - def test_postgres_state_defer_iffnotexists_favor_state(self): - self.run_defer_iff_not_exists_favor_state() - - @use_profile('postgres') - def test_postgres_state_defer_deleted_upstream(self): - self.run_defer_deleted_upstream() - - @use_profile('postgres') - def test_postgres_state_defer_deleted_upstream_favor_state(self): - self.run_defer_deleted_upstream_favor_state() - - @use_profile('postgres') - def test_postgres_state_snapshot_defer(self): - self.run_and_snapshot_defer() - - @use_profile('postgres') - def test_postgres_state_compile_defer(self): - self.run_and_compile_defer() diff --git a/test/integration/062_defer_state_tests/test_modified_state.py b/test/integration/062_defer_state_tests/test_modified_state.py deleted file mode 100644 index 085faf11d5b..00000000000 --- a/test/integration/062_defer_state_tests/test_modified_state.py +++ /dev/null @@ -1,211 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os -import random -import shutil -import string - -import pytest - -from dbt.exceptions import CompilationError, IncompatibleSchemaError - - -class TestModifiedState(DBTIntegrationTest): - @property - def schema(self): - return "modified_state_062" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'macro-paths': ['macros'], - 'seeds': { - 'test': { - 'quote_columns': True, - } - } - } - - def _symlink_test_folders(self): - # dbt's normal symlink behavior breaks this test. Copy the files - # so we can freely modify them. - for entry in os.listdir(self.test_original_source_path): - src = os.path.join(self.test_original_source_path, entry) - tst = os.path.join(self.test_root_dir, entry) - if entry in {'models', 'seeds', 'macros', 'previous_state'}: - shutil.copytree(src, tst) - elif os.path.isdir(entry) or entry.endswith('.sql'): - os.symlink(src, tst) - - def copy_state(self): - assert not os.path.exists('state') - os.makedirs('state') - shutil.copyfile('target/manifest.json', 'state/manifest.json') - - def setUp(self): - super().setUp() - self.run_dbt(['seed']) - self.run_dbt(['run']) - self.copy_state() - - @use_profile('postgres') - def test_postgres_changed_seed_contents_state(self): - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True) - assert len(results) == 0 - with open('seeds/seed.csv') as fp: - fp.readline() - newline = fp.newlines - with open('seeds/seed.csv', 'a') as fp: - fp.write(f'3,carl{newline}') - - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'state:modified+', '--state', './state']) - assert len(results) == 7 - assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - shutil.rmtree('./state') - self.copy_state() - - with open('seeds/seed.csv', 'a') as fp: - # assume each line is ~2 bytes + len(name) - target_size = 1*1024*1024 - line_size = 64 - - num_lines = target_size // line_size - - maxlines = num_lines + 4 - - for idx in range(4, maxlines): - value = ''.join(random.choices(string.ascii_letters, k=62)) - fp.write(f'{idx},{value}{newline}') - - # now if we run again, we should get a warning - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - with pytest.raises(CompilationError) as exc: - self.run_dbt(['--warn-error', 'ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) - assert '>1MB' in str(exc.value) - - shutil.rmtree('./state') - self.copy_state() - - # once it's in path mode, we don't mark it as modified if it changes - with open('seeds/seed.csv', 'a') as fp: - fp.write(f'{random},test{newline}') - - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True) - assert len(results) == 0 - - @use_profile('postgres') - def test_postgres_changed_seed_config(self): - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state'], expect_pass=True) - assert len(results) == 0 - - self.use_default_project({'seeds': {'test': {'quote_columns': False}}}) - - # quoting change -> seed changed - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - @use_profile('postgres') - def test_postgres_unrendered_config_same(self): - results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state'], expect_pass=True) - assert len(results) == 0 - - # although this is the default value, dbt will recognize it as a change - # for previously-unconfigured models, because it's been explicitly set - self.use_default_project({'models': {'test': {'materialized': 'view'}}}) - results = self.run_dbt(['ls', '--resource-type', 'model', '--select', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.view_model' - - @use_profile('postgres') - def test_postgres_changed_model_contents(self): - results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 0 - - with open('models/table_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/table_model.sql', 'w') as fp: - fp.write("{{ config(materialized='table') }}") - fp.write(newline) - fp.write("select * from {{ ref('seed') }}") - fp.write(newline) - - results = self.run_dbt(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0].node.name == 'table_model' - - @use_profile('postgres') - def test_postgres_new_macro(self): - with open('macros/macros.sql') as fp: - fp.readline() - newline = fp.newlines - - new_macro = '{% macro my_other_macro() %}{% endmacro %}' + newline - - # add a new macro to a new file - with open('macros/second_macro.sql', 'w') as fp: - fp.write(new_macro) - - results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 0 - - os.remove('macros/second_macro.sql') - # add a new macro to the existing file - with open('macros/macros.sql', 'a') as fp: - fp.write(new_macro) - - results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 0 - - @use_profile('postgres') - def test_postgres_changed_macro_contents(self): - with open('macros/macros.sql') as fp: - fp.readline() - newline = fp.newlines - - # modify an existing macro - with open('macros/macros.sql', 'w') as fp: - fp.write("{% macro my_macro() %}") - fp.write(newline) - fp.write(" {% do log('in a macro', info=True) %}") - fp.write(newline) - fp.write('{% endmacro %}') - fp.write(newline) - - # table_model calls this macro - results, stdout = self.run_dbt_and_capture(['run', '--models', 'state:modified', '--state', './state']) - assert len(results) == 1 - - @use_profile('postgres') - def test_postgres_changed_exposure(self): - with open('models/exposures.yml', 'a') as fp: - fp.write(' name: John Doe\n') - - results, stdout = self.run_dbt_and_capture(['run', '--models', '+state:modified', '--state', './state']) - assert len(results) == 1 - assert results[0].node.name == 'view_model' - - @use_profile('postgres') - def test_postgres_previous_version_manifest(self): - # This tests that a different schema version in the file throws an error - with self.assertRaises(IncompatibleSchemaError) as exc: - results = self.run_dbt(['ls', '-s', 'state:modified', '--state', './previous_state']) - self.assertEqual(exc.CODE, 10014) diff --git a/test/integration/062_defer_state_tests/test_run_results_state.py b/test/integration/062_defer_state_tests/test_run_results_state.py deleted file mode 100644 index 58215009ad7..00000000000 --- a/test/integration/062_defer_state_tests/test_run_results_state.py +++ /dev/null @@ -1,434 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os -import random -import shutil -import string - -import pytest - - -class TestRunResultsState(DBTIntegrationTest): - @property - def schema(self): - return "run_results_state_062" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'macro-paths': ['macros'], - 'seeds': { - 'test': { - 'quote_columns': True, - } - } - } - - def _symlink_test_folders(self): - # dbt's normal symlink behavior breaks this test. Copy the files - # so we can freely modify them. - for entry in os.listdir(self.test_original_source_path): - src = os.path.join(self.test_original_source_path, entry) - tst = os.path.join(self.test_root_dir, entry) - if entry in {'models', 'seeds', 'macros'}: - shutil.copytree(src, tst) - elif os.path.isdir(entry) or entry.endswith('.sql'): - os.symlink(src, tst) - - def copy_state(self): - assert not os.path.exists('state') - os.makedirs('state') - shutil.copyfile('target/manifest.json', 'state/manifest.json') - shutil.copyfile('target/run_results.json', 'state/run_results.json') - - def setUp(self): - super().setUp() - self.run_dbt(['build']) - self.copy_state() - - def rebuild_run_dbt(self, expect_pass=True): - shutil.rmtree('./state') - self.run_dbt(['build'], expect_pass=expect_pass) - self.copy_state() - - @use_profile('postgres') - def test_postgres_seed_run_results_state(self): - shutil.rmtree('./state') - self.run_dbt(['seed']) - self.copy_state() - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:success', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:success', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:success+', '--state', './state']) - assert len(results) == 7 - assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - with open('seeds/seed.csv') as fp: - fp.readline() - newline = fp.newlines - with open('seeds/seed.csv', 'a') as fp: - fp.write(f'\"\'\'3,carl{newline}') - shutil.rmtree('./state') - self.run_dbt(['seed'], expect_pass=False) - self.copy_state() - - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:error', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state']) - assert len(results) == 7 - assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - - with open('seeds/seed.csv') as fp: - fp.readline() - newline = fp.newlines - with open('seeds/seed.csv', 'a') as fp: - # assume each line is ~2 bytes + len(name) - target_size = 1*1024*1024 - line_size = 64 - - num_lines = target_size // line_size - - maxlines = num_lines + 4 - - for idx in range(4, maxlines): - value = ''.join(random.choices(string.ascii_letters, k=62)) - fp.write(f'{idx},{value}{newline}') - shutil.rmtree('./state') - self.run_dbt(['seed'], expect_pass=False) - self.copy_state() - - results = self.run_dbt(['ls', '--resource-type', 'seed', '--select', 'result:error', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.seed' - - results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state']) - assert len(results) == 7 - assert set(results) == {'test.seed', 'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - @use_profile('postgres') - def test_postgres_build_run_results_state(self): - results = self.run_dbt(['build', '--select', 'result:error', '--state', './state']) - assert len(results) == 0 - - with open('models/view_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - self.rebuild_run_dbt(expect_pass=False) - - results = self.run_dbt(['build', '--select', 'result:error', '--state', './state'], expect_pass=False) - assert len(results) == 3 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'view_model', 'not_null_view_model_id','unique_view_model_id'} - - results = self.run_dbt(['ls', '--select', 'result:error', '--state', './state']) - assert len(results) == 3 - assert set(results) == {'test.view_model', 'test.not_null_view_model_id', 'test.unique_view_model_id'} - - results = self.run_dbt(['build', '--select', 'result:error+', '--state', './state'], expect_pass=False) - assert len(results) == 4 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'table_model','view_model', 'not_null_view_model_id','unique_view_model_id'} - - results = self.run_dbt(['ls', '--select', 'result:error+', '--state', './state']) - assert len(results) == 6 # includes exposure - assert set(results) == {'test.table_model', 'test.view_model', 'test.ephemeral_model', 'test.not_null_view_model_id', 'test.unique_view_model_id', 'exposure:test.my_exposure'} - - # test failure on build tests - # fail the unique test - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select 1 as id union all select 1 as id") - fp.write(newline) - - self.rebuild_run_dbt(expect_pass=False) - - results = self.run_dbt(['build', '--select', 'result:fail', '--state', './state'], expect_pass=False) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - results = self.run_dbt(['ls', '--select', 'result:fail', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.unique_view_model_id' - - results = self.run_dbt(['build', '--select', 'result:fail+', '--state', './state'], expect_pass=False) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'table_model', 'unique_view_model_id'} - - results = self.run_dbt(['ls', '--select', 'result:fail+', '--state', './state']) - assert len(results) == 1 - assert set(results) == {'test.unique_view_model_id'} - - # change the unique test severity from error to warn and reuse the same view_model.sql changes above - f = open('models/schema.yml', 'r') - filedata = f.read() - f.close() - newdata = filedata.replace('error','warn') - f = open('models/schema.yml', 'w') - f.write(newdata) - f.close() - - self.rebuild_run_dbt(expect_pass=True) - - results = self.run_dbt(['build', '--select', 'result:warn', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - results = self.run_dbt(['ls', '--select', 'result:warn', '--state', './state']) - assert len(results) == 1 - assert results[0] == 'test.unique_view_model_id' - - results = self.run_dbt(['build', '--select', 'result:warn+', '--state', './state'], expect_pass=True) - assert len(results) == 2 # includes table_model to be run - nodes = set([elem.node.name for elem in results]) - assert nodes == {'table_model', 'unique_view_model_id'} - - results = self.run_dbt(['ls', '--select', 'result:warn+', '--state', './state']) - assert len(results) == 1 - assert set(results) == {'test.unique_view_model_id'} - - @use_profile('postgres') - def test_postgres_run_run_results_state(self): - results = self.run_dbt(['run', '--select', 'result:success', '--state', './state'], expect_pass=True) - assert len(results) == 2 - assert results[0].node.name == 'view_model' - assert results[1].node.name == 'table_model' - - # clear state and rerun upstream view model to test + operator - shutil.rmtree('./state') - self.run_dbt(['run', '--select', 'view_model'], expect_pass=True) - self.copy_state() - results = self.run_dbt(['run', '--select', 'result:success+', '--state', './state'], expect_pass=True) - assert len(results) == 2 - assert results[0].node.name == 'view_model' - assert results[1].node.name == 'table_model' - - # check we are starting from a place with 0 errors - results = self.run_dbt(['run', '--select', 'result:error', '--state', './state']) - assert len(results) == 0 - - # force an error in the view model to test error and skipped states - with open('models/view_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - shutil.rmtree('./state') - self.run_dbt(['run'], expect_pass=False) - self.copy_state() - - # test single result selector on error - results = self.run_dbt(['run', '--select', 'result:error', '--state', './state'], expect_pass=False) - assert len(results) == 1 - assert results[0].node.name == 'view_model' - - # test + operator selection on error - results = self.run_dbt(['run', '--select', 'result:error+', '--state', './state'], expect_pass=False) - assert len(results) == 2 - assert results[0].node.name == 'view_model' - assert results[1].node.name == 'table_model' - - # single result selector on skipped. Expect this to pass becase underlying view already defined above - results = self.run_dbt(['run', '--select', 'result:skipped', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0].node.name == 'table_model' - - # add a downstream model that depends on table_model for skipped+ selector - with open('models/table_model_downstream.sql', 'w') as fp: - fp.write("select * from {{ref('table_model')}}") - - shutil.rmtree('./state') - self.run_dbt(['run'], expect_pass=False) - self.copy_state() - - results = self.run_dbt(['run', '--select', 'result:skipped+', '--state', './state'], expect_pass=True) - assert len(results) == 2 - assert results[0].node.name == 'table_model' - assert results[1].node.name == 'table_model_downstream' - - - @use_profile('postgres') - def test_postgres_test_run_results_state(self): - # run passed nodes - results = self.run_dbt(['test', '--select', 'result:pass', '--state', './state'], expect_pass=True) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'unique_view_model_id', 'not_null_view_model_id'} - - # run passed nodes with + operator - results = self.run_dbt(['test', '--select', 'result:pass+', '--state', './state'], expect_pass=True) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'unique_view_model_id', 'not_null_view_model_id'} - - # update view model to generate a failure case - os.remove('./models/view_model.sql') - with open('models/view_model.sql', 'w') as fp: - fp.write("select 1 as id union all select 1 as id") - - self.rebuild_run_dbt(expect_pass=False) - - # test with failure selector - results = self.run_dbt(['test', '--select', 'result:fail', '--state', './state'], expect_pass=False) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - # test with failure selector and + operator - results = self.run_dbt(['test', '--select', 'result:fail+', '--state', './state'], expect_pass=False) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - # change the unique test severity from error to warn and reuse the same view_model.sql changes above - with open('models/schema.yml', 'r+') as f: - filedata = f.read() - newdata = filedata.replace('error','warn') - f.seek(0) - f.write(newdata) - f.truncate() - - # rebuild - expect_pass = True because we changed the error to a warning this time around - self.rebuild_run_dbt(expect_pass=True) - - # test with warn selector - results = self.run_dbt(['test', '--select', 'result:warn', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - # test with warn selector and + operator - results = self.run_dbt(['test', '--select', 'result:warn+', '--state', './state'], expect_pass=True) - assert len(results) == 1 - assert results[0].node.name == 'unique_view_model_id' - - - @use_profile('postgres') - def test_postgres_concurrent_selectors_run_run_results_state(self): - results = self.run_dbt(['run', '--select', 'state:modified+', 'result:error+', '--state', './state']) - assert len(results) == 0 - - # force an error on a dbt model - with open('models/view_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - shutil.rmtree('./state') - self.run_dbt(['run'], expect_pass=False) - self.copy_state() - - # modify another dbt model - with open('models/table_model_modified_example.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - results = self.run_dbt(['run', '--select', 'state:modified+', 'result:error+', '--state', './state'], expect_pass=False) - assert len(results) == 3 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'view_model', 'table_model_modified_example', 'table_model'} - - - @use_profile('postgres') - def test_postgres_concurrent_selectors_test_run_results_state(self): - # create failure test case for result:fail selector - os.remove('./models/view_model.sql') - with open('./models/view_model.sql', 'w') as f: - f.write('select 1 as id union all select 1 as id union all select null as id') - - # run dbt build again to trigger test errors - self.rebuild_run_dbt(expect_pass=False) - - # get the failures from - results = self.run_dbt(['test', '--select', 'result:fail', '--exclude', 'not_null_view_model_id', '--state', './state'], expect_pass=False) - assert len(results) == 1 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'unique_view_model_id'} - - - @use_profile('postgres') - def test_postgres_concurrent_selectors_build_run_results_state(self): - results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', '--state', './state']) - assert len(results) == 0 - - # force an error on a dbt model - with open('models/view_model.sql') as fp: - fp.readline() - newline = fp.newlines - - with open('models/view_model.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - self.rebuild_run_dbt(expect_pass=False) - - # modify another dbt model - with open('models/table_model_modified_example.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_error") - fp.write(newline) - - results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', '--state', './state'], expect_pass=False) - assert len(results) == 5 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'table_model_modified_example', 'view_model', 'table_model', 'not_null_view_model_id', 'unique_view_model_id'} - - # create failure test case for result:fail selector - os.remove('./models/view_model.sql') - with open('./models/view_model.sql', 'w') as f: - f.write('select 1 as id union all select 1 as id') - - # create error model case for result:error selector - with open('./models/error_model.sql', 'w') as f: - f.write('select 1 as id from not_exists') - - # create something downstream from the error model to rerun - with open('./models/downstream_of_error_model.sql', 'w') as f: - f.write('select * from {{ ref("error_model") }} )') - - # regenerate build state - self.rebuild_run_dbt(expect_pass=False) - - # modify model again to trigger the state:modified selector - with open('models/table_model_modified_example.sql', 'w') as fp: - fp.write(newline) - fp.write("select * from forced_another_error") - fp.write(newline) - - results = self.run_dbt(['build', '--select', 'state:modified+', 'result:error+', 'result:fail+', '--state', './state'], expect_pass=False) - assert len(results) == 5 - nodes = set([elem.node.name for elem in results]) - assert nodes == {'error_model', 'downstream_of_error_model', 'table_model_modified_example', 'table_model', 'unique_view_model_id'} diff --git a/tests/functional/defer_state/fixtures.py b/tests/functional/defer_state/fixtures.py new file mode 100644 index 00000000000..17f46f842d9 --- /dev/null +++ b/tests/functional/defer_state/fixtures.py @@ -0,0 +1,101 @@ +seed_csv = """id,name +1,Alice +2,Bob +""" + +table_model_sql = """ +{{ config(materialized='table') }} +select * from {{ ref('ephemeral_model') }} + +-- establish a macro dependency to trigger state:modified.macros +-- depends on: {{ my_macro() }} +""" + +changed_table_model_sql = """ +{{ config(materialized='table') }} +select 1 as fun +""" + +view_model_sql = """ +select * from {{ ref('seed') }} + +-- establish a macro dependency that trips infinite recursion if not handled +-- depends on: {{ my_infinitely_recursive_macro() }} +""" + +changed_view_model_sql = """ +select * from no.such.table +""" + +ephemeral_model_sql = """ +{{ config(materialized='ephemeral') }} +select * from {{ ref('view_model') }} +""" + +changed_ephemeral_model_sql = """ +{{ config(materialized='ephemeral') }} +select * from no.such.table +""" + +schema_yml = """ +version: 2 +models: + - name: view_model + columns: + - name: id + tests: + - unique: + severity: error + - not_null + - name: name +""" + +exposures_yml = """ +version: 2 +exposures: + - name: my_exposure + type: application + depends_on: + - ref('view_model') + owner: + email: test@example.com +""" + +macros_sql = """ +{% macro my_macro() %} + {% do log('in a macro' ) %} +{% endmacro %} +""" + +infinite_macros_sql = """ +{# trigger infinite recursion if not handled #} + +{% macro my_infinitely_recursive_macro() %} + {{ return(adapter.dispatch('my_infinitely_recursive_macro')()) }} +{% endmacro %} + +{% macro default__my_infinitely_recursive_macro() %} + {% if unmet_condition %} + {{ my_infinitely_recursive_macro() }} + {% else %} + {{ return('') }} + {% endif %} +{% endmacro %} +""" + +snapshot_sql = """ +{% snapshot my_cool_snapshot %} + + {{ + config( + target_database=database, + target_schema=schema, + unique_key='id', + strategy='check', + check_cols=['id'], + ) + }} + select * from {{ ref('view_model') }} + +{% endsnapshot %} +""" diff --git a/tests/functional/defer_state/test_defer_state.py b/tests/functional/defer_state/test_defer_state.py new file mode 100644 index 00000000000..134cae1c626 --- /dev/null +++ b/tests/functional/defer_state/test_defer_state.py @@ -0,0 +1,273 @@ +import json +import os +import shutil +from copy import deepcopy + +import pytest + +from dbt.tests.util import run_dbt, write_file, rm_file + +from dbt.exceptions import DbtRuntimeError + +from tests.functional.defer_state.fixtures import ( + seed_csv, + table_model_sql, + changed_table_model_sql, + view_model_sql, + changed_view_model_sql, + ephemeral_model_sql, + changed_ephemeral_model_sql, + schema_yml, + exposures_yml, + macros_sql, + infinite_macros_sql, + snapshot_sql, +) + + +class BaseDeferState: + @pytest.fixture(scope="class") + def models(self): + return { + "table_model.sql": table_model_sql, + "view_model.sql": view_model_sql, + "ephemeral_model.sql": ephemeral_model_sql, + "schema.yml": schema_yml, + "exposures.yml": exposures_yml, + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + "infinite_macros.sql": infinite_macros_sql, + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "seed.csv": seed_csv, + } + + @pytest.fixture(scope="class") + def snapshots(self): + return { + "snapshot.sql": snapshot_sql, + } + + @pytest.fixture(scope="class") + def other_schema(self, unique_schema): + return unique_schema + "_other" + + @property + def project_config_update(self): + return { + "seeds": { + "test": { + "quote_columns": False, + } + } + } + + @pytest.fixture(scope="class") + def profiles_config_update(self, dbt_profile_target, unique_schema, other_schema): + outputs = {"default": dbt_profile_target, "otherschema": deepcopy(dbt_profile_target)} + outputs["default"]["schema"] = unique_schema + outputs["otherschema"]["schema"] = other_schema + return {"test": {"outputs": outputs, "target": "default"}} + + def copy_state(self): + if not os.path.exists("state"): + os.makedirs("state") + shutil.copyfile("target/manifest.json", "state/manifest.json") + + def run_and_save_state(self): + results = run_dbt(["seed"]) + assert len(results) == 1 + assert not any(r.node.deferred for r in results) + results = run_dbt(["run"]) + assert len(results) == 2 + assert not any(r.node.deferred for r in results) + results = run_dbt(["test"]) + assert len(results) == 2 + + # copy files + self.copy_state() + + +class TestDeferStateUnsupportedCommands(BaseDeferState): + def test_unsupported_commands(self, project): + # make sure these commands don"t work with --defer + with pytest.raises(SystemExit): + run_dbt(["seed", "--defer"]) + + def test_no_state(self, project): + # no "state" files present, snapshot fails + with pytest.raises(DbtRuntimeError): + run_dbt(["snapshot", "--state", "state", "--defer"]) + + +class TestRunCompileState(BaseDeferState): + def test_run_and_compile_defer(self, project): + self.run_and_save_state() + + # defer test, it succeeds + results = run_dbt(["compile", "--state", "state", "--defer"]) + assert len(results.results) == 6 + assert results.results[0].node.name == "seed" + + +class TestSnapshotState(BaseDeferState): + def test_snapshot_state_defer(self, project): + self.run_and_save_state() + # snapshot succeeds without --defer + run_dbt(["snapshot"]) + # copy files + self.copy_state() + # defer test, it succeeds + run_dbt(["snapshot", "--state", "state", "--defer"]) + # favor_state test, it succeeds + run_dbt(["snapshot", "--state", "state", "--defer", "--favor-state"]) + + +class TestRunDeferState(BaseDeferState): + def test_run_and_defer(self, project, unique_schema, other_schema): + project.create_test_schema(other_schema) + self.run_and_save_state() + + # test tests first, because run will change things + # no state, wrong schema, failure. + run_dbt(["test", "--target", "otherschema"], expect_pass=False) + + # test generate docs + # no state, wrong schema, empty nodes + catalog = run_dbt(["docs", "generate", "--target", "otherschema"]) + assert not catalog.nodes + + # no state, run also fails + run_dbt(["run", "--target", "otherschema"], expect_pass=False) + + # defer test, it succeeds + results = run_dbt( + ["test", "-m", "view_model+", "--state", "state", "--defer", "--target", "otherschema"] + ) + + # defer docs generate with state, catalog refers schema from the happy times + catalog = run_dbt( + [ + "docs", + "generate", + "-m", + "view_model+", + "--state", + "state", + "--defer", + "--target", + "otherschema", + ] + ) + assert other_schema not in catalog.nodes["seed.test.seed"].metadata.schema + assert unique_schema in catalog.nodes["seed.test.seed"].metadata.schema + + # with state it should work though + results = run_dbt( + ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"] + ) + assert other_schema not in results[0].node.compiled_code + assert unique_schema in results[0].node.compiled_code + + with open("target/manifest.json") as fp: + data = json.load(fp) + assert data["nodes"]["seed.test.seed"]["deferred"] + + assert len(results) == 1 + + +class TestRunDeferStateChangedModel(BaseDeferState): + def test_run_defer_state_changed_model(self, project): + self.run_and_save_state() + + # change "view_model" + write_file(changed_view_model_sql, "models", "view_model.sql") + + # the sql here is just wrong, so it should fail + run_dbt( + ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"], + expect_pass=False, + ) + # but this should work since we just use the old happy model + run_dbt( + ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"], + expect_pass=True, + ) + + # change "ephemeral_model" + write_file(changed_ephemeral_model_sql, "models", "ephemeral_model.sql") + # this should fail because the table model refs a broken ephemeral + # model, which it should see + run_dbt( + ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"], + expect_pass=False, + ) + + +class TestRunDeferStateIFFNotExists(BaseDeferState): + def test_run_defer_iff_not_exists(self, project, unique_schema, other_schema): + project.create_test_schema(other_schema) + self.run_and_save_state() + + results = run_dbt(["seed", "--target", "otherschema"]) + assert len(results) == 1 + results = run_dbt(["run", "--state", "state", "--defer", "--target", "otherschema"]) + assert len(results) == 2 + + # because the seed now exists in our "other" schema, we should prefer it over the one + # available from state + assert other_schema in results[0].node.compiled_code + + # this time with --favor-state: even though the seed now exists in our "other" schema, + # we should still favor the one available from state + results = run_dbt( + ["run", "--state", "state", "--defer", "--favor-state", "--target", "otherschema"] + ) + assert len(results) == 2 + assert other_schema not in results[0].node.compiled_code + + +class TestDeferStateDeletedUpstream(BaseDeferState): + def test_run_defer_deleted_upstream(self, project, unique_schema, other_schema): + project.create_test_schema(other_schema) + self.run_and_save_state() + + # remove "ephemeral_model" + change "table_model" + rm_file("models", "ephemeral_model.sql") + write_file(changed_table_model_sql, "models", "table_model.sql") + + # ephemeral_model is now gone. previously this caused a + # keyerror (dbt#2875), now it should pass + run_dbt( + ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"], + expect_pass=True, + ) + + # despite deferral, we should use models just created in our schema + results = run_dbt(["test", "--state", "state", "--defer", "--target", "otherschema"]) + assert other_schema in results[0].node.compiled_code + + # this time with --favor-state: prefer the models in the "other" schema, even though they exist in ours + run_dbt( + [ + "run", + "-m", + "view_model", + "--state", + "state", + "--defer", + "--favor-state", + "--target", + "otherschema", + ], + expect_pass=True, + ) + results = run_dbt(["test", "--state", "state", "--defer", "--favor-state"]) + assert other_schema not in results[0].node.compiled_code diff --git a/tests/functional/defer_state/test_modified_state.py b/tests/functional/defer_state/test_modified_state.py new file mode 100644 index 00000000000..80e3d455da1 --- /dev/null +++ b/tests/functional/defer_state/test_modified_state.py @@ -0,0 +1,263 @@ +import os +import random +import shutil +import string + +import pytest + +from dbt.tests.util import run_dbt, update_config_file, write_file + +from dbt.exceptions import CompilationError + +from tests.functional.defer_state.fixtures import ( + seed_csv, + table_model_sql, + view_model_sql, + ephemeral_model_sql, + schema_yml, + exposures_yml, + macros_sql, + infinite_macros_sql, +) + + +class BaseModifiedState: + @pytest.fixture(scope="class") + def models(self): + return { + "table_model.sql": table_model_sql, + "view_model.sql": view_model_sql, + "ephemeral_model.sql": ephemeral_model_sql, + "schema.yml": schema_yml, + "exposures.yml": exposures_yml, + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + "infinite_macros.sql": infinite_macros_sql, + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "seed.csv": seed_csv, + } + + @property + def project_config_update(self): + return { + "seeds": { + "test": { + "quote_columns": False, + } + } + } + + def copy_state(self): + if not os.path.exists("state"): + os.makedirs("state") + shutil.copyfile("target/manifest.json", "state/manifest.json") + + def run_and_save_state(self): + run_dbt(["seed"]) + run_dbt(["run"]) + self.copy_state() + + +class TestChangedSeedContents(BaseModifiedState): + def test_changed_seed_contents_state(self, project): + self.run_and_save_state() + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 0 + + # add a new row to the seed + changed_seed_contents = seed_csv + "\n" + "3,carl" + write_file(changed_seed_contents, "seeds", "seed.csv") + + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "state:modified", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "state:modified+", "--state", "./state"]) + assert len(results) == 7 + assert set(results) == { + "test.seed", + "test.table_model", + "test.view_model", + "test.ephemeral_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + "exposure:test.my_exposure", + } + + shutil.rmtree("./state") + self.copy_state() + + # make a very big seed + # assume each line is ~2 bytes + len(name) + target_size = 1 * 1024 * 1024 + line_size = 64 + num_lines = target_size // line_size + maxlines = num_lines + 4 + seed_lines = [seed_csv] + for idx in range(4, maxlines): + value = "".join(random.choices(string.ascii_letters, k=62)) + seed_lines.append(f"{idx},{value}") + seed_contents = "\n".join(seed_lines) + write_file(seed_contents, "seeds", "seed.csv") + + # now if we run again, we should get a warning + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + with pytest.raises(CompilationError) as exc: + run_dbt( + [ + "--warn-error", + "ls", + "--resource-type", + "seed", + "--select", + "state:modified", + "--state", + "./state", + ] + ) + assert ">1MB" in str(exc.value) + + shutil.rmtree("./state") + self.copy_state() + + # once it"s in path mode, we don"t mark it as modified if it changes + write_file(seed_contents + "\n1,test", "seeds", "seed.csv") + + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 0 + + +class TestChangedSeedConfig(BaseModifiedState): + def test_changed_seed_config(self, project): + self.run_and_save_state() + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 0 + + update_config_file({"seeds": {"test": {"quote_columns": False}}}, "dbt_project.yml") + + # quoting change -> seed changed + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + +class TestUnrenderedConfigSame(BaseModifiedState): + def test_unrendered_config_same(self, project): + self.run_and_save_state() + results = run_dbt( + ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 0 + + # although this is the default value, dbt will recognize it as a change + # for previously-unconfigured models, because it"s been explicitly set + update_config_file({"models": {"test": {"materialized": "view"}}}, "dbt_project.yml") + results = run_dbt( + ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"] + ) + assert len(results) == 1 + assert results[0] == "test.view_model" + + +class TestChangedModelContents(BaseModifiedState): + def test_changed_model_contents(self, project): + self.run_and_save_state() + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 0 + + table_model_update = """ + {{ config(materialized="table") }} + + select * from {{ ref("seed") }} + """ + + write_file(table_model_update, "models", "table_model.sql") + + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 1 + assert results[0].node.name == "table_model" + + +class TestNewMacro(BaseModifiedState): + def test_new_macro(self, project): + self.run_and_save_state() + + new_macro = """ + {% macro my_other_macro() %} + {% endmacro %} + """ + + # add a new macro to a new file + write_file(new_macro, "macros", "second_macro.sql") + + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 0 + + os.remove("macros/second_macro.sql") + # add a new macro to the existing file + with open("macros/macros.sql", "a") as fp: + fp.write(new_macro) + + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 0 + + +class TestChangedMacroContents(BaseModifiedState): + def test_changed_macro_contents(self, project): + self.run_and_save_state() + + # modify an existing macro + updated_macro = """ + {% macro my_macro() %} + {% do log("in a macro", info=True) %} + {% endmacro %} + """ + write_file(updated_macro, "macros", "macros.sql") + + # table_model calls this macro + results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) + assert len(results) == 1 + + +class TestChangedExposure(BaseModifiedState): + def test_changed_exposure(self, project): + self.run_and_save_state() + + # add an "owner.name" to existing exposure + updated_exposure = exposures_yml + "\n name: John Doe\n" + write_file(updated_exposure, "models", "exposures.yml") + + results = run_dbt(["run", "--models", "+state:modified", "--state", "./state"]) + assert len(results) == 1 + assert results[0].node.name == "view_model" diff --git a/tests/functional/defer_state/test_run_results_state.py b/tests/functional/defer_state/test_run_results_state.py new file mode 100644 index 00000000000..aa1dc549272 --- /dev/null +++ b/tests/functional/defer_state/test_run_results_state.py @@ -0,0 +1,494 @@ +import os +import shutil + +import pytest + +from dbt.tests.util import run_dbt, write_file + +from tests.functional.defer_state.fixtures import ( + seed_csv, + table_model_sql, + view_model_sql, + ephemeral_model_sql, + schema_yml, + exposures_yml, + macros_sql, + infinite_macros_sql, +) + + +class BaseRunResultsState: + @pytest.fixture(scope="class") + def models(self): + return { + "table_model.sql": table_model_sql, + "view_model.sql": view_model_sql, + "ephemeral_model.sql": ephemeral_model_sql, + "schema.yml": schema_yml, + "exposures.yml": exposures_yml, + } + + @pytest.fixture(scope="class") + def macros(self): + return { + "macros.sql": macros_sql, + "infinite_macros.sql": infinite_macros_sql, + } + + @pytest.fixture(scope="class") + def seeds(self): + return { + "seed.csv": seed_csv, + } + + @property + def project_config_update(self): + return { + "seeds": { + "test": { + "quote_columns": False, + } + } + } + + def clear_state(self): + shutil.rmtree("./state") + + def copy_state(self): + if not os.path.exists("state"): + os.makedirs("state") + shutil.copyfile("target/manifest.json", "state/manifest.json") + shutil.copyfile("target/run_results.json", "state/run_results.json") + + def run_and_save_state(self): + run_dbt(["build"]) + self.copy_state() + + def rebuild_run_dbt(self, expect_pass=True): + self.clear_state() + run_dbt(["build"], expect_pass=expect_pass) + self.copy_state() + + def update_view_model_bad_sql(self): + # update view model to generate a failure case + not_unique_sql = "select * from forced_error" + write_file(not_unique_sql, "models", "view_model.sql") + + def update_view_model_failing_tests(self, with_dupes=True, with_nulls=False): + # test failure on build tests + # fail the unique test + select_1 = "select 1 as id" + select_stmts = [select_1] + if with_dupes: + select_stmts.append(select_1) + if with_nulls: + select_stmts.append("select null as id") + failing_tests_sql = " union all ".join(select_stmts) + write_file(failing_tests_sql, "models", "view_model.sql") + + def update_unique_test_severity_warn(self): + # change the unique test severity from error to warn and reuse the same view_model.sql changes above + new_config = schema_yml.replace("error", "warn") + write_file(new_config, "models", "schema.yml") + + +class TestSeedRunResultsState(BaseRunResultsState): + def test_seed_run_results_state(self, project): + self.run_and_save_state() + self.clear_state() + run_dbt(["seed"]) + self.copy_state() + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "result:success", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "result:success", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "result:success+", "--state", "./state"]) + assert len(results) == 7 + assert set(results) == { + "test.seed", + "test.table_model", + "test.view_model", + "test.ephemeral_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + "exposure:test.my_exposure", + } + + # add a new faulty row to the seed + changed_seed_contents = seed_csv + "\n" + "\\\3,carl" + write_file(changed_seed_contents, "seeds", "seed.csv") + + self.clear_state() + run_dbt(["seed"], expect_pass=False) + self.copy_state() + + results = run_dbt( + ["ls", "--resource-type", "seed", "--select", "result:error", "--state", "./state"], + expect_pass=True, + ) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "result:error", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.seed" + + results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"]) + assert len(results) == 7 + assert set(results) == { + "test.seed", + "test.table_model", + "test.view_model", + "test.ephemeral_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + "exposure:test.my_exposure", + } + + +class TestBuildRunResultsState(BaseRunResultsState): + def test_build_run_results_state(self, project): + self.run_and_save_state() + results = run_dbt(["build", "--select", "result:error", "--state", "./state"]) + assert len(results) == 0 + + self.update_view_model_bad_sql() + self.rebuild_run_dbt(expect_pass=False) + + results = run_dbt( + ["build", "--select", "result:error", "--state", "./state"], expect_pass=False + ) + assert len(results) == 3 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"view_model", "not_null_view_model_id", "unique_view_model_id"} + + results = run_dbt(["ls", "--select", "result:error", "--state", "./state"]) + assert len(results) == 3 + assert set(results) == { + "test.view_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + } + + results = run_dbt( + ["build", "--select", "result:error+", "--state", "./state"], expect_pass=False + ) + assert len(results) == 4 + nodes = set([elem.node.name for elem in results]) + assert nodes == { + "table_model", + "view_model", + "not_null_view_model_id", + "unique_view_model_id", + } + + results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"]) + assert len(results) == 6 # includes exposure + assert set(results) == { + "test.table_model", + "test.view_model", + "test.ephemeral_model", + "test.not_null_view_model_id", + "test.unique_view_model_id", + "exposure:test.my_exposure", + } + + self.update_view_model_failing_tests() + self.rebuild_run_dbt(expect_pass=False) + + results = run_dbt( + ["build", "--select", "result:fail", "--state", "./state"], expect_pass=False + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + results = run_dbt(["ls", "--select", "result:fail", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.unique_view_model_id" + + results = run_dbt( + ["build", "--select", "result:fail+", "--state", "./state"], expect_pass=False + ) + assert len(results) == 2 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"table_model", "unique_view_model_id"} + + results = run_dbt(["ls", "--select", "result:fail+", "--state", "./state"]) + assert len(results) == 1 + assert set(results) == {"test.unique_view_model_id"} + + self.update_unique_test_severity_warn() + self.rebuild_run_dbt(expect_pass=True) + + results = run_dbt( + ["build", "--select", "result:warn", "--state", "./state"], expect_pass=True + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + results = run_dbt(["ls", "--select", "result:warn", "--state", "./state"]) + assert len(results) == 1 + assert results[0] == "test.unique_view_model_id" + + results = run_dbt( + ["build", "--select", "result:warn+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 # includes table_model to be run + nodes = set([elem.node.name for elem in results]) + assert nodes == {"table_model", "unique_view_model_id"} + + results = run_dbt(["ls", "--select", "result:warn+", "--state", "./state"]) + assert len(results) == 1 + assert set(results) == {"test.unique_view_model_id"} + + +class TestRunRunResultsState(BaseRunResultsState): + def test_run_run_results_state(self, project): + self.run_and_save_state() + results = run_dbt( + ["run", "--select", "result:success", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + assert results[0].node.name == "view_model" + assert results[1].node.name == "table_model" + + # clear state and rerun upstream view model to test + operator + self.clear_state() + run_dbt(["run", "--select", "view_model"], expect_pass=True) + self.copy_state() + results = run_dbt( + ["run", "--select", "result:success+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + assert results[0].node.name == "view_model" + assert results[1].node.name == "table_model" + + # check we are starting from a place with 0 errors + results = run_dbt(["run", "--select", "result:error", "--state", "./state"]) + assert len(results) == 0 + + self.update_view_model_bad_sql() + self.clear_state() + run_dbt(["run"], expect_pass=False) + self.copy_state() + + # test single result selector on error + results = run_dbt( + ["run", "--select", "result:error", "--state", "./state"], expect_pass=False + ) + assert len(results) == 1 + assert results[0].node.name == "view_model" + + # test + operator selection on error + results = run_dbt( + ["run", "--select", "result:error+", "--state", "./state"], expect_pass=False + ) + assert len(results) == 2 + assert results[0].node.name == "view_model" + assert results[1].node.name == "table_model" + + # single result selector on skipped. Expect this to pass becase underlying view already defined above + results = run_dbt( + ["run", "--select", "result:skipped", "--state", "./state"], expect_pass=True + ) + assert len(results) == 1 + assert results[0].node.name == "table_model" + + # add a downstream model that depends on table_model for skipped+ selector + downstream_model_sql = "select * from {{ref('table_model')}}" + write_file(downstream_model_sql, "models", "table_model_downstream.sql") + + self.clear_state() + run_dbt(["run"], expect_pass=False) + self.copy_state() + + results = run_dbt( + ["run", "--select", "result:skipped+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + assert results[0].node.name == "table_model" + assert results[1].node.name == "table_model_downstream" + + +class TestTestRunResultsState(BaseRunResultsState): + def test_test_run_results_state(self, project): + self.run_and_save_state() + # run passed nodes + results = run_dbt( + ["test", "--select", "result:pass", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"unique_view_model_id", "not_null_view_model_id"} + + # run passed nodes with + operator + results = run_dbt( + ["test", "--select", "result:pass+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 2 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"unique_view_model_id", "not_null_view_model_id"} + + self.update_view_model_failing_tests() + self.rebuild_run_dbt(expect_pass=False) + + # test with failure selector + results = run_dbt( + ["test", "--select", "result:fail", "--state", "./state"], expect_pass=False + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + # test with failure selector and + operator + results = run_dbt( + ["test", "--select", "result:fail+", "--state", "./state"], expect_pass=False + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + self.update_unique_test_severity_warn() + # rebuild - expect_pass = True because we changed the error to a warning this time around + self.rebuild_run_dbt(expect_pass=True) + + # test with warn selector + results = run_dbt( + ["test", "--select", "result:warn", "--state", "./state"], expect_pass=True + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + # test with warn selector and + operator + results = run_dbt( + ["test", "--select", "result:warn+", "--state", "./state"], expect_pass=True + ) + assert len(results) == 1 + assert results[0].node.name == "unique_view_model_id" + + +class TestConcurrentSelectionRunResultsState(BaseRunResultsState): + def test_concurrent_selection_run_run_results_state(self, project): + self.run_and_save_state() + results = run_dbt( + ["run", "--select", "state:modified+", "result:error+", "--state", "./state"] + ) + assert len(results) == 0 + + self.update_view_model_bad_sql() + self.clear_state() + run_dbt(["run"], expect_pass=False) + self.copy_state() + + # add a new failing dbt model + bad_sql = "select * from forced_error" + write_file(bad_sql, "models", "table_model_modified_example.sql") + + results = run_dbt( + ["run", "--select", "state:modified+", "result:error+", "--state", "./state"], + expect_pass=False, + ) + assert len(results) == 3 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"view_model", "table_model_modified_example", "table_model"} + + +class TestConcurrentSelectionTestRunResultsState(BaseRunResultsState): + def test_concurrent_selection_test_run_results_state(self, project): + self.run_and_save_state() + # create failure test case for result:fail selector + self.update_view_model_failing_tests(with_nulls=True) + + # run dbt build again to trigger test errors + self.rebuild_run_dbt(expect_pass=False) + + # get the failures from + results = run_dbt( + [ + "test", + "--select", + "result:fail", + "--exclude", + "not_null_view_model_id", + "--state", + "./state", + ], + expect_pass=False, + ) + assert len(results) == 1 + nodes = set([elem.node.name for elem in results]) + assert nodes == {"unique_view_model_id"} + + +class TestConcurrentSelectionBuildRunResultsState(BaseRunResultsState): + def test_concurrent_selectors_build_run_results_state(self, project): + self.run_and_save_state() + results = run_dbt( + ["build", "--select", "state:modified+", "result:error+", "--state", "./state"] + ) + assert len(results) == 0 + + self.update_view_model_bad_sql() + self.rebuild_run_dbt(expect_pass=False) + + # add a new failing dbt model + bad_sql = "select * from forced_error" + write_file(bad_sql, "models", "table_model_modified_example.sql") + + results = run_dbt( + ["build", "--select", "state:modified+", "result:error+", "--state", "./state"], + expect_pass=False, + ) + assert len(results) == 5 + nodes = set([elem.node.name for elem in results]) + assert nodes == { + "table_model_modified_example", + "view_model", + "table_model", + "not_null_view_model_id", + "unique_view_model_id", + } + + self.update_view_model_failing_tests() + + # create error model case for result:error selector + more_bad_sql = "select 1 as id from not_exists" + write_file(more_bad_sql, "models", "error_model.sql") + + # create something downstream from the error model to rerun + downstream_model_sql = "select * from {{ ref('error_model') }} )" + write_file(downstream_model_sql, "models", "downstream_of_error_model.sql") + + # regenerate build state + self.rebuild_run_dbt(expect_pass=False) + + # modify model again to trigger the state:modified selector + bad_again_sql = "select * from forced_anothererror" + write_file(bad_again_sql, "models", "table_model_modified_example.sql") + + results = run_dbt( + [ + "build", + "--select", + "state:modified+", + "result:error+", + "result:fail+", + "--state", + "./state", + ], + expect_pass=False, + ) + assert len(results) == 5 + nodes = set([elem.node.name for elem in results]) + assert nodes == { + "error_model", + "downstream_of_error_model", + "table_model_modified_example", + "table_model", + "unique_view_model_id", + } From f841a7ca76b44429eb94174bc5d0c2fecbf2763b Mon Sep 17 00:00:00 2001 From: dave-connors-3 <73915542+dave-connors-3@users.noreply.github.com> Date: Thu, 19 Jan 2023 08:20:19 -0600 Subject: [PATCH 123/156] add backwards compatibility and default argument for `incremental_predicates` (#6628) * add backwards compatibility and default argument * changie <3 * Update .changes/unreleased/Fixes-20230117-101342.yaml Co-authored-by: Jeremy Cohen --- .changes/unreleased/Fixes-20230117-101342.yaml | 6 ++++++ .../macros/materializations/models/incremental/merge.sql | 6 ++++-- 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Fixes-20230117-101342.yaml diff --git a/.changes/unreleased/Fixes-20230117-101342.yaml b/.changes/unreleased/Fixes-20230117-101342.yaml new file mode 100644 index 00000000000..9a879e60a89 --- /dev/null +++ b/.changes/unreleased/Fixes-20230117-101342.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Provide backward compatibility for `get_merge_sql` arguments +time: 2023-01-17T10:13:42.118336-06:00 +custom: + Author: dave-connors-3 + Issue: "6625" diff --git a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql index 5033178be49..ca972c9f258 100644 --- a/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql +++ b/core/dbt/include/global_project/macros/materializations/models/incremental/merge.sql @@ -1,8 +1,10 @@ -{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} +{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%} + -- back compat for old kwarg name + {% set incremental_predicates = kwargs.get('predicates', incremental_predicates) %} {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }} {%- endmacro %} -{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%} +{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%} {%- set predicates = [] if incremental_predicates is none else [] + incremental_predicates -%} {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%} {%- set merge_update_columns = config.get('merge_update_columns') -%} From 6954c4df1b4da70570823f904137abc1ceaea492 Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Thu, 19 Jan 2023 11:11:17 -0500 Subject: [PATCH 124/156] CT-1786: Port docs tests to pytest (#6608) * CT-1786: Port docs tets to pytest * Add generated CLI API docs * CT-1786: Comply with the new style requirements Co-authored-by: Github Build Bot --- .../Under the Hood-20230113-150700.yaml | 6 + .../docs/build/doctrees/environment.pickle | Bin 183655 -> 183904 bytes core/dbt/docs/build/doctrees/index.doctree | Bin 87794 -> 87796 bytes .../dbt/docs/build/html/_static/alabaster.css | 4 +- core/dbt/docs/build/html/_static/basic.css | 2 +- core/dbt/docs/build/html/_static/doctools.js | 2 +- .../docs/build/html/_static/language_data.js | 2 +- .../docs/build/html/_static/searchtools.js | 2 +- core/dbt/docs/build/html/genindex.html | 4 +- core/dbt/docs/build/html/index.html | 6 +- core/dbt/docs/build/html/search.html | 4 +- core/dbt/docs/build/html/searchindex.js | 2 +- .../035_docs_blocks_tests/test_docs_blocks.py | 184 ------------------ .../docs/test_duplicate_docs_block.py | 35 ++++ .../functional/docs/test_good_docs_blocks.py | 171 ++++++++++++++++ tests/functional/docs/test_invalid_doc_ref.py | 47 +++++ .../docs/test_missing_docs_blocks.py | 43 ++++ 17 files changed, 317 insertions(+), 197 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230113-150700.yaml delete mode 100644 test/integration/035_docs_blocks_tests/test_docs_blocks.py create mode 100644 tests/functional/docs/test_duplicate_docs_block.py create mode 100644 tests/functional/docs/test_good_docs_blocks.py create mode 100644 tests/functional/docs/test_invalid_doc_ref.py create mode 100644 tests/functional/docs/test_missing_docs_blocks.py diff --git a/.changes/unreleased/Under the Hood-20230113-150700.yaml b/.changes/unreleased/Under the Hood-20230113-150700.yaml new file mode 100644 index 00000000000..178603104e9 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230113-150700.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Port docs tests to pytest +time: 2023-01-13T15:07:00.477038-05:00 +custom: + Author: peterallenwebb + Issue: "6573" diff --git a/core/dbt/docs/build/doctrees/environment.pickle b/core/dbt/docs/build/doctrees/environment.pickle index 1e44093fd51bdf1e1dad0604bc71acfe68ce02d8..cbdd7fd930c4898a71f8eee3590371524903485d 100644 GIT binary patch literal 183904 zcmeHw3!EiIdGE69Yo0qZ`({B7t~!hC0|cUAP?o2-uuIrA!~i+=oYOO>&z=|EeRg+* zRS8kp&=C!XC`r7=QAkzn|+5cV?=(zy8<1{$D-5s;++1^1GKVIbjL@FPQQ=HK%oWhuu2pxb0TcZuz}n z;-aq8s9u^#x~q5kExjYXt-*;_r_U1uHza!fd^4;kLYX*R9xKqtb58I&-~XEG$#> z_lk>oMqG%Y_Cecqohm%}NH19K*?yN5%0BGdEzfDUyxtAHU)}dMP-94D(%j^>&(?4dHf}3zT_p_N|0H)(we~{SzW)@272&@Fm2qoo1KQ`+g`b2 z`89mXa5+wc$4uCVD~)c|#%$kqIgsI+-)xlqn%#s_4p|Lr#_}K&>WO7|zU4cW*xCTiZo>!iA8c;%b#yGo^MnxE_kdwLRbMR{Snx`#@pO3&vXACR9avw(T}8p9yEB$8AB0_wnCKFGQB$ zGPto9th~&b+rg5S`_@bYQdaH9pDby((yrRuXBus&i(vRl$Mc=m9OiU`6$h zwHvnO*>S>hzg@NF<3ESkpXJ%6-`m6QSk=sl5 z5Dy~k`m@_#w%v2)_}#TrYbAc|gwx6jc{Tyu=w(e!Eg`&!A_P;U?D+S*K>R%3cjv;`UK{)ppZz zqDndi?rwr&+kUI<+hy0D1--W_cG>G#(7YVwNJg0n?%H0IINq6`!34@+wPPPS#Ap| zB?dF;bvqsKNl)b55I$@@P{`^WT%p-)x5{uG`a0w@z{B9sWxL}v+H+CWuq1GxMA8%^ z3fpdQ{PJwWf@g>IG~NLYEq5Mt#F!11cEDMf4~({;w*Z1zchlRm2h}*i(-O_@5y*jz z(WcevfD?J;t_wcOEv#xeeg|6P`QA}>(RlcG*_mrWR|BPB$!lpcROB3pJj`6d8>Xn@ z)`aa8v`z3^xoUTyQQmz5dtU7&wX^wyS3?_CE5ijdwu_z?-95?D?Hhr_sHF>`jzo8W zV@KQ!`wg!Q4U988A0>@oQU%oI`d(DZ;jV{InZu4avgQG&{*7IbWz6(cBCTvO$LR$p z-gpT#RgCVn$^5WW!QO6bFch^UtSMSrx1ifu#rk7t{D3FuA6Z*&ce%Y&xH`+w zf7L9{@t}ul0PtG70o_8Zy)oAJK=)*`)~mLmb`h&imK-&%mtVMwjXjnG%P^g{OY92X z#nm3EJ%V>liL`RUH(1^<&vSh6Hh2n48)0c-uM3SoOXP^6BKK;0c{1-L;pq*>t3g*9 zT^@FxakmaV12eOl*@h8A%XelS?0Z*sTL)V0L(qC89`03GCtdprPh7?B>N?dPq_Aq? zxGOmR=$)QSnuDwCC@Dne+Hn`oFXbpYZFIY^Z)X{`7uH_HpU63j1`*_vJO>7M&<@RXeHdazIg{+3L#~6v z1MDxL109AjnDctL*lO?x)=sNEKWtao7>J238a?rhsYFIXdAyC}c}#i11{m70ac-#u z?O_-D0M_q@?P)kZ0|b8A@9Pde)FZvf5PKlBzins_7=%%lGHV@lfP^ryHeK7wa%-p8 zUd(^lQhNcvel@!uT3Oekgp<80f{3hs- zf#r6~YJf?ULX^FR+2rj%)~HiqjTXC=1L@c>ClqA@9uI|=?eJk(%T4q0Ar$b8 zmSkBU6{c3n8 zR|vWY)OuVsoaT1d?{smr6bAanhAlACj%$fmJnt&8CyXWvOLvsQ zoB0DmtZ|zgQV^Du5teMv8FBovQ7Lh*2wr^=>(9el!2xuYw>tn!R2nZTb~!jyh$`Y_ zMjg%?*rWmPuv_hDd{+AruP**n)>p$&59c>-)aNp@=ajK6fy7W&Ff8$Fb!g_;9z7~-E3sc*di_?IRa}31&*t#so2!loz41Kjyq8{o9bR-pbWv;w)PSRrur{K5 zUjjoxS^C-O^k}9LN&_VLTI{4JqI8_nV1p24j7AwJxV0rO9wXA`q|whFD$bk$jjJF=NBrSlG<7qzRq` z0T<9FYes2Q+RR3`*#fGQ=a2VXfgevOMe*U|ftt1a{vz&x7eSnHV9c!!^G zICfLWHXRMHoDe$Ha|LwAdb;oqXog_GS2-YNApAj_v4P?_(NHX@lFUVH_71HZ1W+6o z>O(wZTv)<^EqVfb_9l_;+BZV{!!Ortiw(siW+#)E@R>2pY}nn2EZ=F`&?7{3pUA;D z9uDnyFVEa$wJI>C!H$8M+bEtC<_JQ9rN44or16u*buRc3G|ElrxFZCYU+8{{%?r5E zA@GoJXaN-tewB>7MJ`rCLN;5wPl)|!3~v+a4AplMrZ%ku&@FShV<-lGYeQceUb4mm z7iieL%Z8Hc+lI(O92|)?fRFGab}Ka;Fa|4Me8KiNJpYW>g{}7(yQF0wLSi6$S+@nT z2i~%;VR;zkiN_acn)M8bjF}E+;AGsvt{6TH@ll9n_)E24nRQBtHBiv&uN`` z2yM5@*tv9DH$q2-lj3UGpYKHRx$w$$AY*VUyU~H(zEuswcQ9p#IChl70sw2`Kcfp_ z)Uc<1`qCv!u&-S!va9w%jOpM|BUo;?wuAS?mu(c6g^!DZ4#5d$iOa^tWqb-18%R9o zzzj$VLXH^o$E5%^&)Y7HOb8=zDT`Cd%(!+F!-+J1gSeN+~^B%+jl%#zav!iz{(RjmiM; z!dM76oIsOcKfa8I2X7Fz)(cy#=rEDsqv_QH5 zGaz=EWD+Pcu_KJ;Ptkzri4(}KZGuV&?e4z!i@q^J!ESZ;{ zTy(oL!5AO*!;lJm96I?Ef;A+CH-u2CU^V=J!?m&KyBwc4uM z5Uw0Rcipb=Q2m`SUJI8R-qxL|U3LWio(K)`{>s}?>Fr(`Y)mKCU@Zlf0epnFA)V=j zp%%!C7BORMutvFiRj@X73)36g+LX$uTLXF?cjQn@%2!}N8jblwQ|cAuDeKCX+w z4QW`OU&Z7V+)i)zdEH=b*gj#r3i#(SmEO@{iloD29OB!sloJ*ZM$f@2;0426I9A*m zjI#+9^mt)JKUfOQXE4UXbkOu$eA#9h>JH{?Hbk(35`4}J$=(i50nV9r!YU(PnHU-D2}a%7O5)xZMfXCe z$%5Hi1;&><9NV8|20$-6(aF#f-bXBrJ1r8lsLcf9a06?%%7<~85lO?ro!6bAsgu!= z&$An|JaljrqJkEzL9rPS3~+<>%v&7@=eRhBg()Al7gZ?!nq(Y3TDHTnMD4_ye>512 zq8w0rRfuTr4My1wHDo`|GsyJ2XjRmCwCBw!G){Z zHfU1#@)*qbaG8|{=AoKl@NptK!d<;L_h9LDRk+Gl?bY867VEzc|FDcP2w~v(!fG6{ z=DpsXuz*;`H4}`TAYahUQM3^h!!^d@MO4{4S#VFm~ROf za;-yQlnt7TRh1en?ZW^IMqI3pur2p?@Y`$WM0eHi1IV?D>K}$5Fut#U6n+NFc|Sz1 z2~Lh$*=et|>*^~~f!5Ffzkk_&XyK8z=SBB?^G)~2?s{{NLtnwy0(z(MZaUzTbadrqCLAjUnmJfbg1yObJ2acrJtoR04 zxnAj8Fv5aG-ueUZu=>OJ&&TkeNAMp6Q2#9a6I`UaC1NLQ@YpJS{c~Vx_tNf6{eJj` zS6sRZK0$MW@rz-R&}voBFY!Q4MD?c;R5~+cl$YXv!xgDg>|qFy!1|+*3&hnQgP$-k z_&opdIREhl{^JSuqg($X{7lgEW&9IKsecLoVTD?@ph7JN2ZMkfDNC3}its(OPb)%C z5xyH0AvVXgI|Nd+=3J{&i#>}~P%yS1M%7RUFl4b|U5+46<&hHnL~jY6MRQD}SOVbu z$M7Q|q~PK!W=qgS!4?=~xa7A>Ffb|M_`)t7v|NX6=75&E1YJwJaS*z$QWqLr++t!^ zORRm^K})B_BH?7@M6I=&=EP2EI~ZY}b2HbRm6G~q0Atl9g0G#!iAJ?ANZc#KXcMI| zP9;=q(MyGB!b)fueN+fMEtOF&X`E+5*pVojW)Z|^=|wCm4-rcQeagO$U4hklremg0 z6m>{(N~+IsEh)z<&6Xo0!Z9wwNE>Mx?gcn{WjHj8gUDHk(bKw3q!U7})k}zw;(jz- zEX7m{O0<~DBbaTyn1vV-vqX%geK5ZV@!1s^PqkYm_-__=lldj&8iF(_JwEs4q%@w{ z(qtsLnoCmXBrVEY0gzr%j?Q*r91epo=zeNcmo$qIbBA7HgfwqQ!)By;k6z3|nuu8< z%`SHQtf_<;Am0Id$Z0f6RTmTs6BUSn;l7AQ2{&Xkyb=s4OIR86+aUyRH7Z?Pu3?RA z@?y5)Q<`<#O_s&(#i4}*+q=L4QhHw`3`8sMGs}C%iKap)O6of8NB09Jy?!(WlM1$W z0+IS~docAT*#(5B2letKeCiWuxY(z*@R0{BU%`Uv`K9ZoZCt0E-d38fKm^5srI=}$ zlJH4solSJkNZs+xH@%bH12fBQCHNz{5B|OZCg*Hk$%k)`=`E-5LoBDv5BISODM7rU zgj+K`a2(4oLFlMtRqL=>ujS(aCLVI*I2?D*d)rDF;ww3`=xI3IX?r#srL){p%{pk8 zW^A~^J`7uJDzK4hKDLtf!^Ocsc>k(d-ZOrlf+F!nM@8#vI zckO@GG`gANm$-?P_uNDgK2KNCPy&A-q%5dd@iZD(kTwJG@3!%baLc=Ch=;%c(REUTrD*X9t zYfhzxTWpY19O~f0SF$yZ8f~YA@9E+4KE61#V;**7(kd9t6sfmEW?9H+IE?m0%1Vp) zHh`m7#G~P(Jls=7X{1?%mOJ&*BIF#P;bJ)_M;xR{O=b|Z@6wA_NEy*eqq+nl=}S9L>9OSpdlJ15u)x~*h+JU|P}xzOK4*Lc`LQSudSkaDjv zTkec_!x?H+s$N>TTL6z<NTX0&L0gu85E6L(6k3%{L|_HwhO%_w!4H?vGV zX_a0JSoErNIoshuWg4at7Rq{A5IVgc4HxS)Ij$!nO=b|ZO}%J^LJ_S*p}&l4Xt;R> z8d4_CIR``XVtfv5D{7WU?AaWk?y)ss*useoFncWzBA168zXn&r&?M|;Q-cnZ4cg!zT#aej-vMCs%6=U7+vEHBeDxh(K}fC{r85aJcj+ZYIKtb|aIqt#8f5}H zl}9kYM=xgK4v1Oe4rjq`2Moi*Di$t8!P|{qX)AJ#lU97y@ARm}#?~8mC({a0xSy2w zgJ#Q{5%1ct$)=i0tN1}crB}r(_#3vAyeN?{@~~b;glhi+4Hv7Hj(&+~X%<2JgkHo# zvxr!t*%$NqBxf{=KE}}+-kyg+1IqV8ZX_lAme~?!R2qiIL(Qbs`81%>tIiSrB+2?F z$swFPtCtg@(;uSYVx1;O6GWxS41#uO1sF9kc|)`kjn2~r#%WHlzE#fsyRNzX(*4i9 z(0L2R0c(>9B-++G6KI}expRZrddPTE7zs)0JMB)>07o{Xrp799DiP*Ry@Uui z+J=UU-DrsK6mZ}GEaF9pbb|E)y;wypGN(*M9dI9m|5!bI#6f!CwGw=F05-18 z?I=y()SNFzh|VvuKj28vBh!?`dyhrz(DUV>haQV=Cw!yXy3ROg7)D}VllIO#0gGPm z91CA~Ewf`&Xw9YahzGnwFDt@dduX`WUsE$?(pjbQ2GeE4AUIl6VVuJhi4K;VIn@*33m-;-apD_6SaRuF9X6i2M;gwd$`k+ri$=S zXt=m}Nd*Q;3zNztwEsviX3@wXW=SKXFN#~=Hj1jR=;5hCMMLdL0j$q;^dG8!%xH65}Mur!Mx-mVw1s6Iq2k+l5P)4>mP zdb!zhWMsHHl40JFPP+k+ULn#Wa%csUgLOy+@6$_+kmS{9xLA^@wv<3lnT{*!xV$9;fEukS?1a}YH~ ziA0GH=w(Cr&BxGivER@U55Y>a2;$G`MJ#*=5lehWJ{TJO%#JUbEk{O&gLo$QZ?NfR=5o-JwG+eC4)L4heF_lL!|3oilp-03l(c|>~ zy_f7gzf{`OF4f>W2=MO5VLv)m2@hX72ycJj5yF)6zV9Ms8yN!HOl-U6Wy@%ED$-~| zIq8wcX27IZr}4H5Qs;3hVPuP5Mub93Xt-FRv{58rX%<0zmR`g{pNLqZ&%t&`kWvk1 zim>oXvn*tk98W#-wp;84Kze0N_f!xfX%->oTD`;wLGMSy#ezhHAsm~K8P^5+AM<^ z?Z<1Y@)JBx1vq+zA8m;v^hSwlwNfH!EFFauS3Nr9T0WKsf3a{^->}nDL}); zj+8j(fFMnz6Rhvji&eN0VwJd&*+U6Pk!$#XUjIy; zb3j_cR32%%p3%#T@YsJx!^Iw(T0*9n`9VI>mum^;y*oUUF z-^)m5kiwj2sxUu?hKoI%h8c*1(=3Aca=nN}zl?|_{(S)+pesH?37@*+U%$Xpkm3A& zI26}u!Iv}O&=dT6D*uLtI3$;DDjCog;o&uAdC2(V1oM-d6ADYaq78`pam59^r4Vff zAmWg3G->MPL%3xf4VzH%O?nXvB_m>qk_X!zLaJaeQ$)u{&9acu@fg=}q^z`v?*TY^ zMNIdS5E^L~q2)b#X%TY%FKD<}&dInM!JEt=Xg{bIt&lRJl}P!R+0a4gIc89iy7{Ep zy2&`$6mc;5sUD93CcR!pwvv!qAvuJZC-gESeC)5$ux=mwmR_{N#}KW=#};Fc2dTKl z06_HdZ)O?Kc+O<(Ik~5LoG_}}aiWDuLVc7-=oo@$Yn)jNcUggki`|7z2nbf1MG$Y$ zi&(e}B9^$!LKYoK85VLQDd7vvmM|mHQ7+M(waRk=joxOxzsT|hdRY+4JRc1gD|6wI z<=uMG3RNOni7JmrmKQIw{93bho$=3M?w``QvIAK3dS@zjOq#S*9%;31)XR$S*A^Ns z_E*)o@*DMH7G8^(C0_fp5?B5Wv%F?JWKHNHg;C{q0jPdlB^^~BySUwK!pAw!hwl}3 zQfI=a@`ywHzFrc9Lku2Mj-zeFyW><+jQdO#<4@6Wv0u|+AOTCW2;%>t7qReaL@e>@ z!2)NbRD+o!EPUQ93mG4TQDn!pzZ6<7|XQnnhUoj$T%TQ~m=QE>?3g z(njzmGYHxr=tV1(jA$iFK4yYlgq~vt6{(vO$DqPZtV#2_$@mup5`=%rBXDZ~l3phx zaW+yZB!>{QSuZid#Wtd0-7a>zUbMo+5Us?;79#*gDsC|V5PiJVEaMr+fkjU4IJp71 z%K?#IzlmaPg!w3uu(4M!8^T+zM8n12LdQ@9E6pN^Z_tZacnl(zc+5iLXrv4axsjCc zfY}me6gt8bniGG!3Bc$TXMgdxTlJD4g!v{kTrA9m$KT$r7p)K_qLqm9c>Hbg;&1OZ zTh|%)9AfS%4ZnR5p!DOM1u=zoB|oNsd8B3fu->8zr_75fif z7p+h-qLnE5mEE%{Se%tptBW z_rc#c&_#gj^p;a78p|nBbax-CkdoDCz(M<-@0I+T<(DkiE?L#ORk2%s$#26n1FH?b zwCLJiyX#hLZ(GSeXtzqvEZktv!#{1$7P+OG1(TE+8?LYqcN$K`@!{zH$VzIyvPc++ zz8tf>XFPBN^FWE})3cy?0H)U&qtB#}Mm9<$q}-yH6ybDlK*PmOM_YY@m1Ysdcj`qf z+y@a$+(+Ia4F2tucbP3mMvfy)jyWGr`2c{?EWviy?{rfmtQBD*snCz;B|=E>Luj~I zf-gz6;?s4n-NI(SWi@U5iKj{6Pu{Mz-E=o_QOW?c{AeQT1<*|NJDQ4qkIZFzhlljC zA>@s0NaTGI{}c;+iKWtYUF-|uFS5`MujoQjpO2d@L5{FPWu@EJZvYs*!XDvIDj%@t zQ!D=@hfwm*dMObS|3@@jEOFYD_S?!nqZhG|AtL@yk>T;7MrMga*&hi&)ekB9>@yunh~O3I;Pp>g_zUEM#=d zCyse%v0MRY^lF%Ha3CzwEW*k!=w(Hyc@G*cR&#QT1HqfjAZW{a(F!FaT8Wa6nOz2i zo?`|Tshg(Ry2<$0BZFOfNUW zcRq@Si+$(D)Rc6(Y5T5IVY65k*u&qfmerW|91nkIF#-Gqql5Sp;(2Ya>#_;3*Rncr zt|CkKY`cp2ReRRzHdqn_71LAVds5GbDKSS7PktGS=&uM#^fyBj{eg(0-y>Af-x0F- zZ$KCQ9wLnX1eEY=Dh~Uw-Z~O32G)_J#VDFU6VEQXkf`hlvn9xQ;1uh+s7s_LIo}3M zdVNrQS&OhFk_ji@)60qQyzipnV$V~XYCo$NvCttRmgrFNV_AfJMH{51h9-ef6BBe^ z?u>fJn0lkgTQ4o#O#nx)aQpq*)@gc)5R!c!8ZMS>a>_z#IGI7JZKqzeLavBbB3F|i z!6GF#`5?mJ1!ft{2!EIh-?w}_qx!1=i(b`7o9zgU1V) zn?A0$@WL%M?7<7aj6s)7S4OF%7>}AN#v^FBuywinge4~|!T$xSuSI-WFL=?|Ab3e* z^P+fpI$oBJ7OG*lk#*2=8rDoBrJ{asBGvL8vn9*8))t*=XX8#g`0fb2z~eiux$r|IcBJ2u+ik(+2W{7Ns_a@Y z1m7|#U%B`4^3}WcziJw{-Z_5!9Y;dNO%wqH*&+-O=^79P7)#;+gDE70`^{##&p6IF zcbr(w>52RqfJd+A#0%=A2I5r0#@Tw=5Powe8ZP!5wMoq`y@*AVj)*0`GuYZVse-{w zk$T%}mW7Op!{$G4oxKcj^op1+rV|=z7NKQMFD*jORWw{I=VWV7@Fp_|+OA%-Ldu9% zBIRRdMVrub%%CE5bF109$vD^)?_eUwP(_`F^^guDDN zG+gX1Y9p+#=|wDD1`$hKW+BmE!uLXMBz5wn*%D?XI?5%Q6E}Vq(CAg?2!E1f`b=^N zCnv4}Iq92|3Y{)N!^JvHj*v(VCo>4zNxf)=MiH$zd0i-T&P4 z=vycbnEED=Xq!qW&^*U%`c$*^knyCju}|tdJ)zzSko3CKO26jXRyFnhM4U>5`BJ@v z2sb(h4Hvu75Z}rKr?SwPSt6Zay;Lt&;Y5g4;zZZ)V_(6YFSWYO8QU$j@l{R6EWXj% zh1WF?)$j-z`>^vF(2H4kEn=2q_a zYsN#?gdS4(1+7N`lwMaEKjb>TU53wVIkT`+n+}rFnZ#MXpqC)wEPsWD%{a?9^kNpy zf|w=F((f`hX`%YPiPXkZW=ocl>sl_?lzP$wub%>{e)O8&VKN$j@eNj3sU~R_;b7TX z5RQrI`rx}umc#1|wo6MArja77GgX8sG;GGHw&=wyoC+~Zoa*8$W^qA&8+;8AIK^oa zF5r8(Ikp4?3Bk5A--Z$&Oh%fmIF%4_g!p5=ktd$O7P#T<9dDxxrUumsb=%JFDEr{kJ-{>1UZI+6e>x#i|+?GdL=nJ+if(; zu00DKPil)E%_6kitCtob%s)ZHW`y~mUd%$6h*=`cE_V4WQ0mxK==f*+(jf;9!>hWW zS2)PawVQD6k<|!;qb2xQa0x48emk6JXf-O`1`ZBzn2@~K=OYET-DFuTu!@5UJhK!I zKTN58kuVUgJZ6^nj0;UL7fPx+?L}V$M0&kQ>}Dmy3z1A1`LNvQ4pRxXQT|rzh}CM4*(*)K1Q}tlR6yA++qMA`gq(d;~BqMjRPz2o80Zy-vB^*-A0_2M5q_ZgphyMONeloe?-H@ z4x={4e?~82;WUU?;xvi}AQAEvZIGH;z8(lQF)!ui&PbQdOLNaqS_f$K>bBoAlupsh zgHY^AXt-Fh$*~ft;A94=wKMdh6>3Ga61AE<+K80c7_$B(MQp6u@fcUcOgg<=>+S; zda()@LaY)OGW&80DYDr|5f+~?%VNd>ShO_nEtqcu7`@`}?=6_`=_NtP{<~!3LWj}D6cM)-rYxI&L9Axmf43=5$TD};R__W}mwp&eDVfzZ<0(eK8y{-^_t2b#jdS(ee*U@cM;nTb&tAt}0c)P)c_ZZ-F zzH>WD(>FEe%h8Jt=a<+YjjnxUn!aB0=f`|O;G~9}qQD6Y^oz@HG^>J4OEd*7(e}8N zxS9D?dh6qzfT^FRXaO%)L>nDQZ!OZj0bJXcu?;4nlk{JUI&U6XGs-A ziG-H3_0l5T=1eqP>^5p6uU&c(i*5=LOI&9m3$KLlh1^K$WUtv0W~927N;PNgwhXBB zsi>*>Yr@XcYCGd$hw7fTkb!=|2ocUzR6Qq*+9dU(?HqaI3GN;bJ|i zE?hmS7qd_#VwNa!k=|h^Rk%nPh*o}NmiLSYO)?K6$3vt8a6_APyODTfp71Y{2_=*8 ze2vp<;XdPNxY&J?@il@qnL*H=su!(r97HQ|oZ~Ojix=s)o8>;^JY&pxa^IxC0O08D zf1;P>i4>zmBEw7dQX!n?A~am=G-_j@eR>fK$3esr$B~bL27i;@GFy&}2uGL*b6%yt z5y0q`V7%r>G#IB65zgzSLWuAn8a5%q+w>w9B1FUz5e~L|N2*{jQ>5PRHp@ar!h9H? zw*dHFK%-Z~bd`^=NV5nlAJEH+Q1c(5;bJu>hZ_WMGJ~LfKrdRMWJD`b@-eg2M(8vplH^r96mhG-=& zwiwH9q~aC>0MW-!%rc&FoC!9z0>{Z+?i)Kvx8I1@<_Y&AnJ}^ro^NDwJcWjfJw|Pu zzeO)%;Wda@;x&qI!V~fpZIGHe%WSzb+U1ke+}G)M0UW&z{xaAbdWbGI@FYUR6?$n9 zP5fnOxLB~N!O&~2ClMX$00g7Ic_Iu{T5xpb`nZ6zk7t1s`rYFr%GJ{mv z9eU9Ufg)OoKusRSKuT=#L4?73%rck}{V*3jei|V2V*UF8i(b`7yM+k1Q6k~tUcEdB z-Tw(1F4n!;c;Z35h=ukMu|)d|*$G7WUdWB4P98H`!i+X~L@{UQ(ANNrUU8DM9SCD2 zhfwluy_5)v6S&C?g7#^>XoW-(twf^7PkoS|`f$Q#s9X~pb$LBx9EndK zi;jJm1VnnhDL&wVNHb0)y4<9f58*}Y(QvUBC60F>NE7J<>$G01!iNy6#D~nD#XyQ| z_EChzbIr1t@c2)nbuI@#|(e$++dX7kW(2rzM{P zJpH(2|69M~R}Bc0aVlZs8NF->$NYCRT&!xfAlmYi!LW&$F(Q^|TE6*i@D~u)m@P*} zhZC_5^S&>+1rX^~Vzl3r2r)_|Iy^%!8$yR$(Xa^}o~IYFXj>6+-#Q$86X@k;%aPIH z>R5+48$quHKzd~uZ}}uTj8lma%X;Y$LcAUgn-F4CFJd7?L@W{FV7oU-6%1yI)Z0yF zS;)v(G^cr+LGJ)WdUZ^9d=e&U7GdTtz03$jzYPr+D>}K(K=39r2-^4PMJv>dXeDYs zX0}}tdX5=Xq;5WFwr(;$wl3}}3O9s42(a`z8rk+qDu(0`avs)8j&QTTK*PG-><6CAK&p6N8r1Ru%3Vj+->Gd74`;+i5k_ju%>SaZE%@5IV zvDc{0KZi~M!zSjRh*;t|iW@o!`HD73O>Hn+?u>pXWBulC44no@dd1uCp1YlT$q=&L zhK7q}n;bfkT25w=s=GihTG7}eT8U&$ZiOQyHu)gJ;BK=FX2dUohVqXsT?csdDnHs{ zNBE5r2^ST;TnO#I4hYle?%! z0Hjx-@xCC!Se#1ec!yp(ghYF2xLBfzT|oqCBAsA;uU@P|oQPE-PP1EhNRiDxim-T} zSr#)6Ai_U+JBA(tM0!1-za2x5>SaP`{}D7?to?8VvVa{!U)GCOs2|Zv)PH=((BkbF z`i|MU&iLq9JcE$#8hQrc^y8rEy%4L#UWj(rt=RO9rc5qrrhcrK0^yu_`ykTb#hH4( z?WHzxr1A*x;pc(#>znmHH}L+KyZhb1vDs7+HlpF;hEa8I*6DgNi{=qAOPWV5+bzf# zK#N5p#h04pB-4xxqo?F--nbmF^wWm)ujY8024OKyB~0wq%Y<;sE75SVqSa<%H|Rwy z^o)oldN#Fr<0e3(SA|2fmeVNDTInt#kxrC&t6n~Y65oV|i-i<*y6f6I=s@DRFDEtdZnLGyC~}-B zQphCzKJ*6xk6ukqIjySW+7-Xd1H!-|WlVMf;pl$790`?v7!4Pzbj$wvj(vWqQfoUE z_<7y5ZOz&4^tRG;#X>!Fz3Cfhan4BH0bhDz_rQ0Zww2(I=sx)S2Ku3=&*&|uP&SrR zqU`QI_93p4)o7GZ7Vx>Rn&rcHyWrDX)w)%IFM#1Uy?C|3xd5*1v5~U}-{LxGw@S_| ze$Hzi{%L!*$Su{ZgEoA>3$CyacN$K`@!^|dk(HFYED{EyuP>P8J>!ELm=8+aBJGUd z0$_TbQ5=9u#wjA15c8y7Vua)U6B;gdyyU6~!J5nHZndUy@LSxK@a?eHG0C4m=PPF@#NHIzzGCWl;6~bvwM#IHUqc*zVt{1VWJVY#U z9Qo*e@GY?~H(QR32uGL*bGF0o1~7Uh7;k(f8jMqk2>0owLWuBcG;BhImR`g{gos!o z!ol{ik}4R?6sfniSr#%9<`dMst*~zdG?4LxIyqH zGYHzd^`aF@Mzj(oA2XXy2|dRQDpEIpV76{D{x!+Eh{A2K_W>lmPDVDnl1d>tgqR2P z5+hvfV`x~ni+xrvTH#`dR^nodu?v+{++qMA`uL(*#xssH5jjro7TE6qBE5bi_PP@8 zMKWRJDZPvckNFogT2B)nX!mlxp&`_XW*AE+*S*?KVx zZ$QivZ#WBT5~10eaJgKIwBa43!bsO)P@>``$-*nX3Lw= z?ix1WNJ%OkkiQi`=@oIbRhu*rQ6eGa4!wj3(cX@Ri$$w8-nd6EVj)>XERpO&wqX;# z7jh%1llPe|VMe4p;+V5{`+fkUSDfT@RKgg^A(T9*ml7eGh`god+V#IF;z~X}x?1FZwK^#oF6vyUPyt~SeJ#shfdKkqGwQvr=$_4oG{#I#-( zgzjH}hKqF{b`lGC3*ub8XhrXVXeG)&{uadIy#;Zx*}Bg7=MbBZNZ)|i2T=4nXX4`Mo4!S{pqm%dk<{6AaDSE!(vqCe?#KAmjvb zDfo63{4v6xQ3k^Y;LqXk&#v2mzsCG_r95L*4q&d=3zh>YcfJ=)&9>WqtL@u~%-tsh z@I|wV-|9BYhiBVvvj-^qVK%19&THraK3A#t&I;3u0|>g=D^v`I~b|W1SdPKPS-EntxCI!7u&AecFQ%ZRc+WVkcnU_elJ+v?)u3K zfr+j^yZvRo;FMHO%0&I+p!Q&#+l?M0?mn4jq;3u-5UmGP)PRHrpzqc1LEw9XHLxSR z?N-ZGy93n@h8<+DHVnmds^yMr&pL;DwTtR+hRf@3fq#OrYPZ>eJ>rW(gJiA+Mmk&2l6BDp288$4JlXtRehArv`9naw!TV1X?o;B03 zq3&C=4pzy^NX|^}!6<ktpWcBB`j;STXVhoZ^QMs*WZD+ zz7uafrBbs%Uce>SBCx|?HZAu+x6?aX|4q2+C{#kb;Us#676auuIpb6633f$Yn#(Q`t>Z#6Gka zu8%5Om$nt=ED46efhz4T`bDp{Em(PR`1@T@_&4{i=>_8*%LV-dmma*#z#kGj4M`q} zlUc=$gnKm0u<;h_Y`BU}9O_?mD^QWaDj?j#m4ROU-9T>r_u!vkSrr?jyEuv!P}%4w zPzNqvk$UA7eE-9c3|{nUy!Cle4+*bLc%@xeUyYhV-3M8s2<*ZkyrS%YC+A$Ywf-k)a1uJ3 z4!B|&s>zyjtxhfH`Z_lY(zKzog~m1K=?fn)YrF&>_37!|OE|wp#G^ z3^dVA8yfVG>7DQdURm4Vw}lNmLK82JO-wTr_0!--IA);izbGeTYyFIzocfu08H~&E z_*L?muG6T7Z}IT*Tn%?FOniNE7mRL6j!H8`UIh;xkjPgv6S+igt>-bRN@VyBc+z=0 z2Ki&P6}}Rj;jcM3e|1$8FN7PpeCC)5?L? z-%vGSob1foww&&d8l!t6q+7h!nIZf>Rkj4-f2wN2IN_tS&;{dQ3N|X#NNfCMW1O#v zI8Q!0L;K&Vawcg1f~pDQwCBIL%LV^qW8`lN$&V)ze6t-!Ycf^vBUS!H75s;)3FB3e zesPl%f9e)f4g2bl`1sAc4BaQgvj-f8Y*IC0oNoHUA1C`FV`PtoWQT8 zcVGCIOW6O4!qcHG!T+yHZS`ToIRBFOayb2eY>fV?uyFzU<5w&)?Eg1a#sul_RW)Ip z^z?fjobkUj#(30s#xDkB=zdg{D?#@oswRxny$n7-c!&(n|IHZHBcZt2BQrGrt13r= z<|kE67^gXW7bF={S^EN0!@<#rXNZ#9*`fC&Jb1vd;JB&@<3!8fFyM{ION}wUE@qs( z`;gK2IjXz~ji04z!Z`JGe>|uBmyJ;#4a382*BPp>Rpm)gyl`%p3AypH`Nl)zw=7j&AF~Xy{PqY~{L-xB=nG$6GwyFu^Wb=)w$^P;o zV@yZm61J@`L-VIpITAE~Qq_cUnx)$axw8M!7~gBd-jaS@J45`pR9O?m|GlaS*m}*$_jY&fP=ylRZSQtJ3o-J%^2frDB~V%TynEx@zYf~6B>V! zstMz?=f+R=8e@G*u$~`3xl)xeLHZS{CXACV8-=^Z_#O-S&KrfdRe2IrH&jg+r#d@E zx!V}gBO%f9G0JbMawKTJQ`LlVnkCy&K4y$(=qO>I7i<;H+mP}RRh|UZA5t}8oNDQY zl&=~idm<(~cT38bRJjs#KcQ;EINkX>B>tZ<&ez49lkFLq(f6~etO??OsA|GE@$v=w z(_Un1_&phup1(x@Jb3zmBk;|tCX5p&6%tU1#o;{x(&v1l_-;YQi|( z`BVB27~@=YoyEH&{z#QILHzqvO&BLWcT)dX#&};VT9=}25s#^ICTRbhstMz?%eO^5 zZH#j0DAIaQ{FuFqCAVVvyTrQ_?3F)rHg!o}m)s&Xc1|3y_3#%Y%>9>38TCT2`f8Q9_5CabxBN{}OYp5?=nSyhSz%N11< z##xs3l7TUz$F?{v$ItI2kE#+SxPFVO3FBPn_mUqpM)(vVJg1lZ@2Zpu*59vc!Z_=> zz2sjTBYiR^J-?UyyeeUW^Z%u4!Z_zs-T^q4I1C=)skm@RYce$Z17p;0uhq4>%&6QZ-?m>FhY=<;Hj(<~+;CDKAr{ zNa*=oRTIWpmhQ;^6=Ot?ZNcMkZ_eG0f1N5(g6m&WHDR3V=Q=s>7Gtz;Ov2kd!|(EVLwbWd_&Cm)>Q{5z_I3C{mP)r4`*rL+67GfWKw!(KCY znSL0aJm5HRrK$2$D*bj<%7nsStZKqI>$$W0ea1)+TNNNZe|CSh zDq({2t5i)G=X?wfvTk?Xie1L9mXNmmpfT3haH-QrXIgVll{CS7N7aOJ-sSH;++&RL zaZY*uix0o0N|j*yE>#o8*_OQc@Cjp7kAzgq-+K5nRgwhHA5}GBoM&02>g&dc4(B>~ zk*fc#N|Rvv%c>@fGo6i8E!}2nC^*aoEss?FRFxvZ@{d(b7-xB9xC@^2i!U@r^!OI` zsxjykng-v;Nz-139A26)r4`Xb6gmKcPuf3kM-PGti>Ns;>dffnz9&k{&R@H=Yva|D?3yd*65;i6B ziO%_|90^UoRMmuWnhV}+c%3o6*G7C7zu9nuDr z)gz4Rysc~3sInx8-m7ZDIMLEz>|tX}^G-516x&thNl@*onlMgvekk^LjPcD|?czY} zJ5?DIq~EP-!Z_)JfP5~QbE@VE@Yn$dfB)~8rCGMLFl~%yJ|4+iSa`wUM4zT= z!nmGu7ZzS^jBVat7cMJYuF98aLM~A?VVrX5vO?1s-Fyg`yQolC7QBINj0( z-;6mM~x9a#R)I|RO2J6lnK^9qiVu9>+&_s ze>KMVIA=V65%Wn^ss!8rq-w%A+p$+Jw26Gu7Md8?S?@ZbRlh9jybj595r44q?) z>agX`odcbvN|aFb4pkGzxz3*hU2BZ+u;ngZ$K0<*q%na*7?KHV7IVb55&Wc(sk!i2iFsG2a&xpc|+N@IM7vxgT<@HcdC*mcn?%f80TH` zF4RYi@x6MB*Rnb=>htqni29H!QG)9as+us)wKTr{C1Y$uFB!3&`&QHws$>bi|60|A zalU2ozh{lnJt64Mivj*nl`6sZ4^&MUXImNreBRlnhTI8t=f(jy!_x;GcW+cRVVv*$ zIN&a0q^}nmFOCJiLX|e5^p~reFwT5#EU;>f_;rN%{2dUMDrtiEUr{w-oOgLV@HS(l zL)1I!K=WgQzotr;VEh(U6UG^rM63SD7~P4{Va_WK?^7j8aQ$CZO&I4|7O{HF7}<%@ zN?z3JbE;GcwjWkCVVrGg1m*k2=uV7Qa-%5UQzc9A{asZP#`(@hQP!MeYG?@SI8nPT zkEBe%qX!%pj;We3&a`Ak|8vHu9@*l<;Wc@)`Lk6?5{f=k)r4`LrL+0h8e@8lGo3q= z|3y`z1lRjiO&I4o=X{f!jj_F&vt4k&$-F9Ag71T>CXDm_Tpy2mw=v>3a^m~>c+~Hz z(kE)*9#s>@Ye2qJ@YBY4ALqR1?-YDcl`6sZCsa)sXImC2{I)T!M>*GdQNnMk(j=Jv zx~d7|OiQDLtIjnw6byUK+z8=vc=CWF!KJDujB}kIAw0tv;Zt1O#nHj7s+0+ZzfjeL zan^IAgL{mTJ{gmq9~s=ON|@mMRjMY8b1shz-e`>Qu(!;Q3bs_K5^Nt(HDR1>NmQ_B zjOuWPnG+FwyDCY7=eMewFwXP1=eJ$EjFXTqd@_sd&Ai_j;cIzgl6rP#m*$66X%o!f zt7^hH^Rig%SBx>ivo zM)#VK?&95p6;;v%@2^uek>=g5Ke}|uk|VvX^~d1n4ZZs3`H#o>k1y~aPp}_hhhKXB zk|p(rA*KFm_^msmU_zj{oSCo&K`}izisui4qC4XStNnJR-0C*VcFT7i7?>UD)!n5_ zmh8UU3&t_evn!B21HynTo;2&sC6XrKD&Mv3veT+Gx>aT(n3#1MwpaGsW!IkRI*lsc zJT}*8&p;Zx-0R)h3nsZ8yGl}rtL>%*QB%ve;I^&72^IEZ#BI0zvNzwHX*YUxA3z2x z_VK^&>YaWIUc+j4PcZ5^GYzLTw`-<-(C&%9FR~l(TF{y{vW5IwCmqxKf1w*l}-oq>i2>7 z)DHo)`iJ2+TI%}8AZbti&+y-1h?Q`!ySD?y8u#sHr(yZ_)lk%?2Pvab$}T)-X;1wD zcIzkDzpUi-Pr~m<>wgaaf@^(>{pilr(YeA>)vgc01}jt2xuNHEgQayW|JM4a;Z~^n zl@K88f+FHFFKgmAcM?~O)hw@U-`HhD;B`!fM?FSi?Kj1h^}k~#IkhLc@VN<*7bbc> z>TPR);fuL;2dQNiLoh2E){Na?_<~h)b_=9tAMVxv57@0;6pVSDn$tSGqtUKd4I4CC z-tSs15BLK!SG(=Q^S!&EU{AxM=1MRN2TC=1!SXAZb-#B@ zul@|&!}9At#NU1d|J>GlOK;}3Ua$%X;y3{H{Py^-+C}vrz-3!g_XeX^F+xlE)=zk; zu)e~xEw@tZ)mK7dp2?AY?4r`u)|}mw;a!!mNI=%dnb}~?)$@L>-6|1mxAnTgFnWn# zolza~{gz(+r%1*UW&-{~h#^k+3oVD{-_q-1{RYc@+XWRL>2_wh zfb@yWp!q>heof1{@e-%vL&ZP~ac2*_v)W*7Q?Gs!m=4A+X&-7e+Ex|s@c3oHRNHmt zoR-xnGxO!H>%ilJA@rJZ*enGju030>v|GM?7$q32g{BfohISC-RQ4U8>pj>&Gw|sM z4NDNLqj^oM(}8vib2if)W)8lv66bHE`BtNmcx38kj!3v^u%6xw4HkYln<2_(MPR!# GJN`dXsR`8p literal 183655 zcmeHw3z!{8b*3ciHBV_qlI179wlE{v8VTDtfC*q(HpVMkGDwCPEN*)4?U~zG_vP-s zk_OuWFEO^DjWNC^I4lrg6P6_`5E0DVm;k{f4$H&wfAbI@!XP$rl))HivuC!*bNLJUcwSgYIAxsJm2!2N^Ef4 z?sR9$70)ZrIt?fxJY$?)N0P=^Qay3e80WFdJ}QS_1Hq0e0m{wUcW*@yb;o3#|TL?Gk?L zD%WnSGoE z+v{08|0GUAIl7nCT+71hKJf;(-S%(Vc0-hR(+xoJO`+k^oDDKL?3b-ZqkX76+kN$` z=gYopwLB0RtGFY9y+^s@l1=?Ht~kP#Ja$ zssV@x5qACA>E}#)&K$qHc1EqluU+f3vI1Uq&N)$0N2~U%)ou7?w+(e1l6I0+tpfNK z6B`KGg;uC$dr+≶w!`X1m%25|NiUW72O|%Iz8S%re~MIwI?gY*yK;0ZZIIYOmUE zT253+r^DS%P;A?8wSBwn+OwedR>dxR9SfS5BOJ*nGr?WkixS5>^K;m8dS%a_XT@T2 znyf+7(7@&)u36AnNI*#Ygt(^N;Z+t^Ztb+%>0Fj;q86e&1C16)Yd23HJ7C+L@*&G@ zL8ZiCCcJK^13u}Aoc-a$P6P^Bor5bho9$K^u0vmkdNgN_)p!LklG3-f`|7W5WC5bJJwyLY1+*YdPPvwIWd zK*ng(YIVSgymHqCALSNSw;aC%t?_*C2)k%J{JZSTwVV~1bsZP5A7ps%k3_=cRE*R z8Tzl9Y}R_!Hr6g-)ya~h#`W@xSF^Fll3+Qe^LB|{ z!MnKHmup|fyCy|iIpG^DZJI}aVhn|6%S;K6@h@s^>vkvyXtGcZNt@a^kJrWQ1Dy);PeVZq)W_NX+Y7bIa zwQ$@O9Dnq-B$MXgDmzLF(Ybcqh4V`}icTBdF6`S`M(ym{Gx!rZXVD;nToiCBpj8ZI zegZejX~Sd~1ciPaU+O`h2@Kzmd_J@oW$<$F&d3qgvvM}AFui=psiGe(;dk-Uuyzx? z8QWc_)o@y1mNCb(LG@+c9o@#0*D{+hM60+CYZ#G3b_36W!5y?iGhH8sSW(UdyXTPW z;P3$Z3+Or=EEj^pVk8iFz9`wfZtn*%W-7E2;{n16SAy5O3M#9>N z?mZudg0l1r(&^DmBa{Y6@LKGo*GB0$rNIUv${39@y4e36;a5*aiO@^8%WTjA*R!7S zlsHSYEWYJJDh^8cH5;RA8rWn-^BkOGpZub0uDr6ed&gCmm45TGt9D#*<;6QLz4WD* z?cMu=9)I!%aVwvn!L-|I_JZ|S?$~|#H9Ibc`>wq5;)b)W?aoE_jN(lFFnG!w_Ey2_ zP89~bkb=sDqOalEQNz#1PPOZT(Wp<@hLi zBnY^GHd!+YJg}Toc@8#0{I@2|sG!Ykbek=pI*AUiISkD(44QfR$@~t#-D%hd?FO_g zZ19NFy50PW_56xV6P`4i=*^e4+99lU{6?0CodS%Dafra~8|Qg2on{jyY?D~}i99{g zoy3C{mOsJsSx6w9U7;(io@uuaKq8DLAPCcFyKeiC2a;#^jD+{Fu~_|05b5ISAwJ&W zCmfF56tYc611u+m4)t6C-Lal7yaSpc81PjNh#3ff(5cu!@tkNVmQ+dRA~t)6)(rwE zjtli6o-r;g;lLI>fj#?Vk?z{pL;S-p*KCUo#Uo}Xl9%wAG0be(-H9yUY1+^uM0KCY z!8jfc?RTxn++?*XFs8wdftfo=JSof(gapf8b!MdT)5LWy_z^V9P3X8I1eagzevZux zxX~f-kZ@=L6%KxtjJriHRzgBfwRSCy{b&qt6Y31rcM7I9tpm_4bGc(E27YTpUm9Mr z&I1={*u2Y)#ys^Biih=36X4BGYqpIc5;o9_{9WLJiO8w-AHW^K8GBlG2}*015(C&$ zd$tru5y!4_yo%j`P7>(%L_07UJx1d8YoolZ7ezU8r;V`-VuICCq$5<+hR8$A)UNUQ;Tgdee6so{VzSar_xr+??HbALN*y~o%kE&C7>1KG>F zEr>nvmVF(|!zfQYzChEgXFz1kbT|Vi;|_Mk@L`CLLM+4gwTV2Adn0SVX}Hn5o=!Wog% z!ogXv8p9C~4uwhdSQwmzb^|8)5ZLf*F8FDg1puMFVvsggIvqZK&BkTe1rzK7X>rzL zRzZOv7sf!{Ie23G08H>Awc~9(gJ}Z8@KN5$5YBZvWeD!IEDY+FXROKr2q=0rxPsw1 zvojB&?N%8(mu~BN=*VzVTrKMj&fK4U|sxY zbRmoycGtHoTe1ZE+Vvv4Y9GXy4h}Vf6?SVHyeGcwBym~zxG3llEIm(LHZCsXQ>fTL z;yDLqKvEEL#F#%W1+aPEv@o((7=cS!oJwZKwVN1Dr1=}gy*wTlwWvp86DNyHnobKw zD{iygfT0L<0)mzVYaaCIT(h-3v5^Eu9&&~NNSwJwJRzK%jzz^Ry%=gt6!lbbB@U@k z8Q@(Q3jv1{XcFwlm+|o6eqrlGVT%pKl`OM-`Xu)j!U^Ej; z=A|bW-R?{<#)thdqyis@PJU^yj->F05K0xSgkd6twqb;DVlW=Ati|2fs;k3gv1o0r zwz@WiD+kbBw<|nUe+P`$!li~ccV}uZx(WVX8ye#MmA9?Z+qEn>DVze@Dbj| zbfy!AS|Bf4#Eh-M8s)Cl!TQuKOmArGx9hn zaa{~+YQ!-?GwhUfPWrS=^Y6sNjgl%A-)YuIbi`|^c<`PUNFps zW5vzEIGa#Gj~7PtgJsZs24gHt2Ti}lmu;4#?qJSlLnIq|T3oAu=qK=dB-l`as}4YT z7drkj^uxG-0KMjI!Dt887*Ld~2@4=5!RNe?>}}%|;GAhYtTN)2iIKtXVAP$hB<_7i zbT5ROESSAjV0^jFvHe+Q0Q9mQoeVAEeZEG*ku)6KdEFVB zIuQ-|Ji9T=LkCA7DrmtP6q^CT05>?1d8-5A92e)XFy+Jcq6)=dmyDxF%XT=HsGU&r zj|5{;lmlw73K7jc!6>_-97XH9!D<-YLm zh1ED@&3nB&;L;`x=8*94(m#MpgXN)pcF#)2R}7C4iw&^1t%Z#}v@GlbT=nI90ES%a zP#9K&_F;9U25b2+qJkk-{Ugl!K4@qW{nMfb@_Vnn$1Cl)`ifMP^p+ksxRNi)_39tR z+lS&IHq5a@p94jPV6a_bi5CL^}oP>K8OE&0qt5vTE3t{tpHbr&>$&Gm_~~5 z3$!0Afjv`C5q=&OAvVX|ZwS1}nscpAjk-uMwhzYVPzNyhv0;UeAW!9y60ABwYYA4M zIi^u80dT$&5)wiRc3v@Cg2oOu(;&kozg>bMP6suRV(sjcQ+zxL1X7 zD@tRWN~oCDOND5{&OyWIqe9?msf=<-IzggIq=9iFb2=}D)_|%t^ z(%fXWG#N>*;gS?ONsID!0HjxxqqAKY(Zk>ix}O@=CCwtl+^v@wACd*>?;?Tl@jbh*cDZMWe2BMXZnB_g=M3bQtC3T(lqx%7qUO$?I zNd;SlftY`|(U^LZ>;l5m1A2K9KJ_IuT_l7w?oHh(-fb;tK!`!;qDOpdpf;E(7&_k?7&h%xU`y0|Y$ff7i-Upi{sXhTXZ-La?uQb;NPFYsfK0D9u7<4#JQ7bt zE0PH_PwHhxc-}A3aIxnNb3bA$00e6?gPxX$G9o;yky~bs?DMm*6-JTn=$yc2I>Iy7ZELl`on@D-j4HN;iE)fQ3T?Rw}#*#R|Uy{;yLo!Zy4=PY5Xa! zCFQuvY&kM29OWtum66uqen6vFgLpg**D$CE<5a?ht(Oa-!WlGdLWQ1Q#6pFLSfaw; ze0j~O)Nr#7a*9J8TtZ8>#!;i~wD80qZWO>HNZaONZzrvS!Ay~QyV)!Y84ZWgo=90~ z5#I@L^on>ie3XZ~vnY);i_mh9URs2l--?Ed<(wRGkR~;mLD0TWFIpjGL@SZ&?*ULuWdF;Go#>q$y zVdg=-%m^R*3L4h!V~^-XD|`&mN_@=i!^cv+MCfLh^3{GE!xuXhM!FoBhx3JUFV)E` z1^}XuADLx5<2e(2c$HLg+Hsx$NO~OycHCf?3XU6)rd}iyN|vqwW$T-B3YYm6a$W2) zbU;tg(ky~_jb6mUZ4j}T(||{>a+l*CUrHQLB0OBEmj|KP^U-jzVp9ubM6#(of_bN2 z%tEt>S)$oz?gb`GR>`w)-#6~)$95ZM2Ow44WfhH&aKA2W2T#d$;kT30zQk;4GfExi z%`8(-TBR0X(W}xGY=;MxX_!V>aP+bubUKTMi*=eD*AtN@GYHy4deI7nB3g+;Ux90A zxOoN|QYOwh2Sf5=d=70ZYL-Xr*&Lwmu{B`W42lgfJKzVA%R`P|gDYWZ5_YqxL5Il) z5e9ED%V0+J;Z_^2#dWjK^@cU@E*b!2V zG69{+BbYy+7qf5K@#^I*3FhT&lq3m2l`oTOLUj9lZS6%P$=iCS!Iy>Xv2tpJ7l zNqIkEw!9hft`D1Rs;RV!9{^N(RlJg))urS`iG-1_>19Ny_OH-zv1;k)mxz{T5yX$` zMJzOnh$WgmhtDTDqfzuRj@Iz@JPaC8z87*MDdA7dmN28zFgzY=CaumV0gYaDj_@Z* z);CEG;bg^1kduiC+%hy=tkdLZf~Yi^LC~(#i&kh9(MmKrPZt=cIKlc>Irr^&>E)O0 zd+LSGn<)-hn@k|lw$_!N+Z{A4qqPU86;HW0@F{2@47 zV@I1cVZ_206eTNB9u8x|PJzWqQloXVk0LByW0u8?3xw-bktWmfw*ih``7eh<2r2FJ zBtk<|FAYNYbu?To{IHw2cE?p$QspNz2-+L;q7~9dv=Zrma4)>25VpcZ=Q)_-ab6lt zHcR_(>IpA^VTZj+@f%wccd$L2JVZDT`_IxVv0;pz+JRU`w^4;nXb{9fdf-?I9vy&< zYjfL5Q#Ul{%MqfBO6(7Kx#&$(l*Er5i`b#(%Rvu47Tr#G#B5z>95f6gF|SE`=RJT$ zuXm1xM`z3I1uV4YQhCG!-mRAv;jiyR!^QrZnlY2kDwRhte@HK8;kAfa;SBb4Zzc2Z923ywS`t#O*{}} zJn$pINhA*i%Uh_2e8w!V84p>adD{22*v#LM;>;GId-PJ^Bq*;WPjex^QU}8NQE|zmL{y^|1GYHzV z^`aG0Mzj(sA2VBv2tCIPDpEHWn5~t5P%4gn;+juS24 z5bC2uLdR?L(jnaCMl@XPE_4`0u+l7o_;$UBh07pfiOVcxX@-@Ny;R4)rcnIA^O#mYi4Ym55;C^vB}4e-Gth9csOgZ6fTdXk@w4?J7LrE95=qOC zo(}#(r_0QiBO}8#kqq-*>2wVM(kn#ziX2+OwbH3z8zkxW>*Q!gvR zgWiOOi#;eA941(k83gUS^r98MgJ>nbbNr1R_!?>cwxJK23eP_pK0Fhqb ziC)h^)EFfaC4Nya8^UiskA{o=hK_g$R+>c+e?u=~;X8;};yd!e(BSXv_^#Pkv7n@(AWtLtt9p zMrxr)#4OR{)V@8J?zyN`+TAYI;EM`y_T#W0y;TWczH|^yf8Z;GDdm0NMas6pY}qmz zos2Y^P)>TJaW-JmtJ8Q}1*!8ml`t}`ml2`RbI@?HLTRH&z|t&&_<4E}3wb=54pQ769p$G2K%^h@@GB7)viPLeQ^5!^MJ5j*ki6WClUo z){9n%8PQ6_e9UZYAoLtFs7T%1WVUWHE*4KrqjLe$H_Y4)sPy_7*;Ya7h2#)!?$*nV z@Upj{VclN#UcG3Ammyk-mo3JA2U2m10f6Y^BW4-T_zrvxiwEe#^9WMcxgS93bsh0C z8A88ECZs%|mlWYNUqZvhPD2M&1TD=Xh#%IASU3(MmN<^$`(a3N6m5`l|G;dyGxCjN zD!Ff&c^nYw)o#D0WV;(F!$NK(CA`XP2{ZDH!;CpQ<@N&}z4|2YI3T2v9KwpNmldJZ88lq1)MQ&s z;3hK&T2C)pp;AOEQR(rwAH=VnC-rc%*?P$MQanm8djG+l07|bz#cw(w>WotfCHLs1 zL^#r0(QvUNCEjyDkS5Xz*7xbfD%=RMO5DioO9@Dk%|42-_@`!B%s4?DNzHo$!WRLM zUI*xJ@AZRvi4fBN3K}k!e%Mtk;0*|m=tV2Uk7y<0Kfd>R@%CQ-$ZTC_Ty!*wCP{Z+ zKLKd;`e*7r2c#uT<&maq=?FNMzR9le*k2*x#U7hlLZ+Dci+rLl*AmQY^kNo%ihhv5(lUhaX|*@9#*jZchWOew^iK>ZpQv(LvRo>88FFo6aNxJ6|sm z!YKwnrZ8M#ADYU3FC&>j3bWHxVJ=3)#U4(>48*}{7D4BVvhvKObM9D?UOA zpSt2-zreR3!~6H)rMONDzMKIsJ;AT1@^5H}m*mn-B?HEke%!4GkB|IT=?Yc#|0f?I-l26;ejD5-A@u z8#)L*#|$b`H-B!nZZZxwNgPc6R*#1PlU^?)TS-W*kQ~Cyqk5SUKK3_gShtV;L@!$5 zV~AGbV~erJgH+sN03iDKSF?;~JZB>IoZPp1jEw4boM>T^P#+}{I@ZCnjYI?1qTyn9 zp%Vgvm1YsdXX-^PTm}(KTxKDQj-(6=xsjA`yV(+EBs$6^nzL5Zl8ZK7m!XwMq=tV13iD)INJRVtIyvTCJY+YykbC~<5G_LFc7QNn?iXD?CEtN-F ztsC^RBK-9*8ZP!%)wuEzy_kj9B4&x#{<_4Kf6pwh84p<(dPreZ`F#MYA6Ge=iYkxo zY&VKOT2opz!@pkV5SHQ-!jWW#s^^(+3|8;totKC(~tA@A4jHfHo_v!BCI^7 zmlffZ|A>Z*)trp95xmI^g7)9_q7_O;v=SvBGr=xG&oP6F)Xmr!RJgu%^mN^1{0jmJ z!oTDZxYGfWUMC}QHc}}hhY)kNUSfocorQ*VyVwPK(TZwBv=SFvi~ty^xWxcK^s&P% z;~B?+MNaNGxdFJB03yA96UEvH^HCyU<66CJ2yb~A8ZP!0I))-xX%;~|rx&sC7(^`b zn1#gANEsG#BPn6mYzZ?89bpR1iNDx_F2G53^)-#!6Q`f<*q`7wodB|oNsd8B3ftlpvvr_75fCNl`yGxVYrN=CF2B_A_k2SU#=gNoG6wAs4J_}4_zzvRJ?=L4jEy5|0!jD$Q$ zrH~v#%w>9s5iWKK8rJP%d-b9fE{13&F18r42vTv20f6YkGRt_zan?qTlN$`V9uVpE zn@R70(17MZ?9yTzK5>r+U!}Q6gH2D38bO7B6o1q}jU8xaSaaPifq4Y)ya8 zc{Dd}*KoYt@E6M?Ez=}C&&W(WFK*ZWtS0_G5NU(sR8ovhrixKQ!^Le2?b!%enne(w zrx&qkUl6gReUZN?fAFscz0hnqGQKm48on{_lRjqqOqJ3MR)hI3MpBQ2E1n9^SzQ^v;30f+9j)6 zw<>naFZpelWni_zcNSgSYj@p>?QJdD2klnLnS~qddHAR8*&?@8vtW)gW5X5p;ZDP; zI6l0%KeCdVt}GG;qA$-Z?->u=$UIP@`t&U5bpTASGe#dtA&qR5NJx33UQ&e9-G+vX zosPEp1S`!Vi0{#hShx=&mbj0+Lm2$aDep5|j*J{fm>hFHn{qFJ(JaAs*Y9*wBdnES zBB{{(^b#Q?_-Qm;EWz_rt@u>kYqzl3Z&^(nf8x6&@F#EA+HSfVxFlr&T7EPU^#W)n z`W;P0zenb>y~Bfg*%0zZHYD;sg@1?zzQa=Kx-Rww@%LD0hgWnVsn18umLNyip|a9# z>yH79USW^$CzTJ_^Qm=zl0zu@nO;hS#QzBm7fYNrrTwP0M?LPRXl;9%PoNEHlb ziqzXqvn*tE%qNa{Z(`XCX!L5BZf_tg(k#NtEA_G>)cjj$xLD1}4GsivGJ~LX^r96? zMzj(oA2WLl2tCIPDpEIx%+^iDzb05e5&D;W=fkZ4Nw1TU{SBm2NDd+94!y((7Yop^ zZWnupUbMo+5Us?;7GvW9skp@eK=knevy5jPhYzs8adLM)d{vzP$>g3&?z3GuwP z*LB$h*lSrGc&j2y_iVe0`Bi(?>NZ#s1QpX$;(Johhbb{f5Kn#?is-KhN%S{E6a9gR zqTeG_(cclW_-{ZL{T?EW{sffpYbp-=HNABtS`4fsNsCc5fhL|^bRkjMqh?Ex@xV#e zby1f{PjY?=nDqLf_^uXVNhA|aexa8W;dwtt!^NJbHq~A+0ftRXwGpvIhl-!dBIGOD zAT_nlY`HV)9b@W^B5%F4a5n)Qy~6GHTU+PqB|=DcGa4?IY;wv%YB-res_nUY(TZjh z(MsfM@)KC3#3mm^7`)spgBjrubK(1zZ)a5ha=@Zj_0eWK!fljDcqr@TLFoQEG+eBE zwWhkM7qQSjB9>@>Asguk-wU~s)X5EIOPJ9nUzN()f%irLqgR~$?ZCTBF9|}JZ$`t# z!VKHp1$-0b-FndqQ6gH2D39;JTf7~3A2M6l8TTAwU59iB-hBY2ALl%p+JPrN;4`=$%T$Z&65X$q*FW?MzJmG-nym>t*=XX8#g z`05Cp-|?N+T=*FhJJN5-?Y7|ZgSP8BRdy{Hf-jkruiSHa`RX0}UOa_c?;Jn=iX)-o z28sZJY!L>CbPb3Cj3se^!4wk0{n=)@&p6IFcbr(w>52S>fTy31sejLj7t~1&#Hoah z=j&xd_{~LVxY%#hCN;bCA{M@bh$X%=*xER$g27CYdb`#v3mFxM&41oHy903aikL2@ z6B=n2q2+qLvSadw*n7~hZXY|U7p?FyL@V*J#aQPi6}K1wh(7K& z%Xr3fCSuRYU1|RsK+@|tQG}RKA0-kxzNMEA;VyrThKt=rZG`nhy@-X&AYzHjEF}6% z_+H43q)r|;Tf&S)N4Z3E;>Ii1=~m|mf0AVSOmYY(V}QcQ40;$17wa@RLLxPs%phn_ z)r(eW6wyjFI#1`XQ=DLZljwarUV8au`<{9peKW-YQ{Myui6k&0{Sr#)c5KhG-O{V4F1UP!--`^@^S1%1h_%0eQ7Jk@G zEMOJ#b$ZbX=_6W+^pCGXF5W8Soo4Ggz%1@WRc-hDvvZ<@7K$U z@Yg>=!^Qrpx}0~fUd+O45wpZ=e_g(x^+mJ1W;|qF=plvQ&w22;OyL$2f7W%#(3 zGYdPl=^!bcNu1?7dI=KF@@+J1##w%>7qf5{#4K@^ewVRH3)SyUq&A)~Te6H?*K@h1 z)RP`~4Xy7_uSa3`!jYXub9OJ`K|CRK;RUoNw|RT;pW&93@{wrO~O{{ z@t8y7AkLY(t>~>brB)kO>Qgf#Efz^VKF=&C8Fw54Ke>@>EbW840E*r|BYrU%X}022 zLd0IZM2KGEYBXFdXthwUr5CY~Ga{DAS^jpK!CzO_Hd~I21V>PUd2cIw4S>-r!l7B% zI98su(qooHIuYWndg%~C{9QC$EJW3@{oQ&o3n?OIi4;qF{dOk`HMLtM_;1#6J->up z!%nGGv-#AQlNxxh+0tYLIfjB1DoMACM*)srNsi8T8;!DS&qBwO+M-9Z2rZw|ON$WZ z$I-AEVLqT2vk)d?mI$+pT|NtxI(8L0{u#e?$bpyPRb9|4yvWP7oABNvs}TlAOYo`S z5?03iG`!EyYE-%n930>UF|(PO&~wb7B6agVvvrekFE+rB_KcL__3xQ( z;$A?c*T={@aF${G}&?VH5K+L@aR{#TOtE z@)d26np$JF+!^Vzd1>xDl+FM&dUf0HJCvsM@*ouZbTnM7*yLD=RB$qb)Y^r5(TXM> z(Mr^6^3_J9#3mm^7~E-=!Hn+N5-D3H&Wv&P0u;TXkB+?%YNJF#!z=aDAY}hrXt-GR zY7Moc7qO5&B9=&hAxByW-wU~s)X5>UCCo^Zg=zTy{fstm1uS~CNzS_v!blEbi~r6Vp&cD-r(jQ!tBn3TB&Ba=T-@R`vt8c^44}xlS)B!a)WzJ7v3M=pck$1ZA2^a?N9FQ!kho#{eSLU7dI8c8vywMgQit+ z+uKU}@O^+)hkYK|a_9Lz#2LHPgckP0Jj4be(`Z87$=kcwLj07M^Wn`J!XK__QC zD0h3!g#b-&cNHzpk}8N22`$gpON(%ui_mbf+o+AacIibdTn7w7-b^^?KP53T>g`VudbzhW{G9XoWfvtwf#2BkqeAalhSc zU1$6>HIJ9Z-roV3`tj1EsgUAW98$!us?y*~Dvz{P@6pSF@Xo;li19ldNUI#Dl42Y+ zRg4d#;o?R{tsA^wFJjTqAYw^FBR@qp_?sQRYPK91CmKb4=f2wEQ9#p=`}7|Mqle{* z6loUG;}7++BHZfl&~ULHRTr)v*Na&w5;02@xkzW&Nfj;<2BMV{PKGKqvB=MP&v?)T z^C0qih;%f09YE6SM&iUg;a?;ZN>0^FiEy8j&~UN)B;#uYYchkN-J%z*a2!M{ah&4^ z>BT|%XPf0d<2+-`d2&zEUk-5eI!<(Go=7oDBr@EimkQxDSEAu!r%@XN?bnM~I1VC~ zIF5V_H26vS17^#S5#b0EVa`$d8vu-63C4%!i3a0TBEr|{r9z1CwP@Ic2=CO3Scni2 zOGG%>@*Syy!Ay~QdzV=jG7{#)_`C(c4*?pz8m6m!ghiS~Sh-g(D?-g5MZ?8vP7XH+ z-ed+r`$fHIg_04iM9Ig@QX8S?m_bGA<{M_~CgWdxkY9M1{`&w)ualA0JyIzohY<5) zy~GF?`#)${w~IZY7p-tHL@RNz#aMPD6}K1wh(1=G0@Y|@cFY;iIL=x&wkkSJzY!4W z^&4?)o^UUc2_t9dWkh()X=u3EW7Nj^(|QpLuR+8TuTeY+PsmraL2BxGX3L$?E}xv{ z9;e?0aP$gyIqVHRL>C))5}{$QUK)g8uSUbgf>jNMT6!@H$s%TnWS_Ygn1l_sp4IHY z8^hU`m+VrbZB=2LZq@RwXs@nsyG^HMH7M%~zn#=j+iYnwLLFj_EE7*U^!OTpqF1E- zzVqQ$y(9>k{w^9WmT7WKPnw}*2C1^U^`aF5MYIxuntT-lDY3~15eDCDmcfkZhq>tS z(*T)+^+y4VUe!mtg$TD%BH`gvdU+7K|2P^h*1g(z;sL#gh4vA#MEeWb2}Jl_$c>~< z9yVLTj5c{hF=yw{4*`r`agujC5XMLjq2#A}DG@UL2Q*wP(_|}5;3hK&+9&m*6%s|X z5{Vvv>x2BQ4G?7lQo~IY9@FB!1@gcMCVjx8}`zXTV3(c~a@cl<6h`7IiHq%0`Tr9%jD z77d#a;vv0=g%A<3M2Lg!-Xv8pm?=_kx0q!iBV*B==4}SO8xZN$G2QV=n50>RnRn=A zMkxCC(QvV%lj{rwZ!&|R{eWJyLd}R)qUK{}+a;mrm_bGA<`ZV?CgWop;;y1_L+Aql zORuAmZJ(rKNDd+AYkJ8MZuVDbSht%!su!(rGej$Kv&GntNh)qJ01$or#4O_(=UJb0 zp4@l9JPD}u`i|KBN%$AZgq0PigSbu1KbN85Vy{t~f3DMuSa=R1mUxchhE77hq7710 zXPPZ{M!yrWesed5o(GWhinrfAchA*JhG^`60}U6;HaT=6wVcc#Rd=~wv_i6oRw7xG zTj5BFO+JV)c#T;GGvXIPL;1&+Y`~*e`Oy|T!f%vFxM=F-LTJB^hKseY)@0wH7qQSk zB9`cXA=~E&-wU~s)X5RECCunk1aWfq-rWO$^a?cI7ep9~QwbgK)=P(w=sVGHu|yNQ zf(X(?I>GuOy;y}f5vxR;X1DN=BAa~_VevC&S;~m7GgpAb__kFmkFW$ zucP5&?S~_f1?(95zFxFK{fJhg{^L7_7H`MUV`l3*ZYNaYdW>;I#H_rKiT?*@*uO%>rR zG+f*;s_xCYKrd#|JR)XE^QdLJ1sMZqu}Gx2!z?G6W@H#WC1>--O8|@BPCnkIL0F7a z2@}`qWkPiIFGIt{idLJ6&FMuf^o)oldN#FrqiePt84Zpy4d!j!xCPMYRpHRA$f{4`>J9{8@) z))M>?-3Nc~ryqLys@`%6Wn(!d%I@xEAL1%mjYbJ&0iXM-Sw4KX3qHM7ty>lN0vLYN zi&q=G7r?bWHgfjhTU-b2R>_&g&w0(mKW)zzxuu$Q(1!1K!4>x5PQ$4V#SDGcGj7TqyUwsAmEky^a&@ekD?j5{V4A=%qq9 z%`?z&vD2uH?w_p}v2YwjEO8w9=zj1mv6q=GM@EDrOoTbxVXpx&dL!m`7@D*s-ga{AlMJz;!h$SK%Y!54`g27CYdYd=PLPo-Tf||D#_6VTSt6{p|m9R*& z2rGB$Wksm@O=!4S&B@^g!JEt=Xy2t5txz(el_>d`*>po;PrE8$)w6GooU%ZTupe?h~=9-}tSUsVFbCML*;SmHH`+gAzs ziZ)11Z7^HzjCT3tln1=pZsz5CX9FC)!tHna>$F}Xgk;Y_!^M(K4wpy`Co@R3Jx?!M z(a0lOiCj(YN+l&W`5?mJ6=oUC2%j&x%D28=3t053e%$lht_`bQY#GL*QTTKw;l%}a*0WnLw;XGLOf`tI=hB36?ZNT!tX5`vw&6V8tA#Y3cieP+Q z@Qmf9HqUP^>a zzl4U1WttpH5V*+qy{V4_BE8-epLrnC zj8lm&pVZ5T@SwCdNB*9Ma&YX{dIY7>dj_( z%{a&;caXyOrrrsd`tX#D?UCw?KkdPuRotwa4g$RGsf>OZP9CKOMD_1s+Be~!UCX;O zndhB`&#OSZW!jszI?i;pUFkMq`=uoV5X4tcc_p~Il)m~q@EF*X`Y8L+t$!DO)}B?r z4}R3YfdA03S_0a#?u=D`2rjv)x4Hf>`~)lC;y)hYKZxmICEpL)VYs-TgYin& zbpd}lykK`Q8l^z|$nNc~zYZ^(K;*vD@V3pm?Urx1s=Z+4%dAG%_Ih{qw%pA2mtmu@ zI~ba?TefRKOsWTeK*$N=Qt<66_+x}WqYQ=*z@Nk6pIx^Be~tO=N_obr9Kc+!7pwqK z?tCwpoNc%LR@=7|nY)$-@I|wV-|9BYhiBVvvj-^g=D^Yp6O7bmg43K%iq$vas! z!xr^}j^}WVttnR>&zfo2Q1`7_2diXNBxk1gU=+Z60ERm$)O>hldDdw_VZe9Z3b=0z zMqKL<)DYULqNFC!1)#=Tb0D0C({%jaRcoi}x5LG|>u<#J23Xz=9lOI!l|jm7--Zvh z;isH})sQ}Y_A{SV?bYARZg~^latGcr1c6x4W7V(qcCD`YM}oE9JTxMQ%Qa`N)`0(m z5|%gGt+`(P_u=~6>c5A#z720Zy;8G4Uce>SBCx|?HZAu+x6?aPe+yi71S+9jaayxL zW#}OFyD+_eD;l~D|2e|`egph_TfK+B*Iux@I}@ztUI3zm3W&Ep^9eX|6-;>D8DP_A zhb(~ESr5EFm?Wt!s|mzHGFY39|5%A)sj~D{n8<&xzX$uHcfmhiFoelIY~;s_SjG{L zwQukjUQx#u+Y83xMgc4`dt@5&6zfw#R=OPcIo<5 zVtn-wxojwUD!XZt*oXGQ^-)C|(ze2!CBZN_P^H~Pzv$Jr2CH_4zuyIgzoqxmUNGLV zT+lyo>A_(J{*c&dNb*RW%qngq+@o2Bjkj25!&P+RQ2(M^fk6-%x-Fzn340N2^g$27 zFsc<)fer(Bg&_a4tTV z4H-7WZwq@?XyO^MiA~Hz{dD*dcIvcJ&dSNyT%XFxsc+58U=)wXXT4{-PNNzgv*G2r z67KX&a2JejN)8({L|y?89+1cvF%!8&Zm#DssY+ycNSjNN^VcxQAFHkK%x;E1XK?;z zRZW};H}c|FmYK&d~mKRn7$MUsW|> zoc8>awOsK3&lve9hvdg2eZCtG!zr06cwCh~Q3e03YQlIGq)%3I;!oXVs$pLf5+5Ip z%g}uiJbS=l$Ocst#_6Uf@;KQqFh=%RNOpJxEJOA4RCyB0evYaM<5Z^(o^h@%V_Zig zmguZfhU{0VG9}1 zwx|i?{7VkbaQfe8jQ+{6aRK_{GY}c}|Enrvg7kN*nlMg!`rrj;{42&7kNVDd=YEFn zKUd{S(ESBf6UOOY4&MhnLSVr#U>VkqoJbcq4LwF~&EI~tiJx6fusKd8!>Al*|nVVv~T zrd>|>e=$aQH1~;iYi7v4N0li-_FGj=7$=+W98LC@_Zee48kewrcNv;Lt;&(0`4g%p zjMFUL=f{=(J!5>Y4|_}c`RfevkE*gJi2oZ^6UK>`22xg>ZE8q3zNyl%t=5gXfs|$N z=m7_X|9;GpogYZqY>e@BlyMJs7P;B6_}PQgewL~U;~LM6pX@Tm`lMhzKYsFJRmMca za+#_LA>|{gJPE4*hpGwVR7*FcJZy~YwK3VbTT;HM%9Wt|YpN!U z)1ALV;+Mua-w<<7wr6BU-@j00O%VTcRTIXEmoLzt{tQ#Y?}?c7{3ZHR;OPU7z&EOz zFiyB^iT+|^bgzl&&Re8^zA95f-505vFi!R`-|F3NHRj36PQ@7EFxd%v&BUWKP0DYp zGA2mhuWG_L>Cy@P>y0rky3X7w{p(b@5_G>-)r4`n^QZLhH^#Z>I*WHl{E;eag828U znlMg$?xg-Njq$!-v@S*4A|6oXOwj%%RTIW(mv4*ssWHyQh$Vkx#6PIAB?$kKstMzS zOIGYBr%a6x#jF#LhDz4#*TAC(92}0SnlMhaEM#?|F|NhnFfV9zzA95f*V|Q17$-Y- z>G-#dF)rHg!o}ljR5=s0zf{$PaoVMe$2S3 zlbt`sKgSs3>q3PWPw=NyITIRxx~d7|wC7InuQJB^q+mUN)&4)JGA2mBT-Ahe(q&Vi zjxoN+Lca4RKuuMi1l4s_6UM2|4h`=zM)XKXw0vOrW>t;^&9|$XFivy9!0=+%m{==#!j1w;%qkPL4<=B>Vy!?%Yf2~TC;QAY?CX92PKScRAV}xU8DG1Nm zSNMb~WrFp8Q8i(l_1qE48Cy(^#Bt`sNzWgkoCZ%Ha4>$dstMzq%LXWy7^53rs)Tgs zjZa>vN|R9cZ>pLw&UAKsVjJUmnDZx1nlR3?w3oci7|~;!oR;I~_ma1$ z5+%6)&#ESjbDiHye!v*vlZ5b`Uh==GQYKjcLsb*TSHY6EW%ez2sl05+*qR zvZ@K=oKJcO;Jv|N@CaX93vW?PhGri#M*T^gddc%M5!!!LrBBqrk5x?=uL0ROW&Js( zhJ~Y?^}KP)1Uz`a5#hM13FAy>$0^S?#`7@eSw2pAmMTR;&*!O{FwU}cNB%2}5k0mE z-;#S{?soi_s}d!+-luB9IM+}0=DeGX(S8!Ay`LB7y;_w%Q3HomO&G5M**5V%Fvj{Q zXFYF^_&Zc-5={TTstNN<^Dh99(aE2gW;%RkW<>HiRhk6TpHVeooavG9TMGy5d5_Tj z_r~a+;KEKmIK%n(RS6TE|8G?j#yOYH?uX7bH4F@U&D>@BmGI;N$AKrPnlR3F{_K9M zF~TRgz>8Pux2RGk6#fiV6UJH3o!##-Mtaz)0O|R&`zuum6P#b6YQi|@WAGyDcGs=g zW&ErNY0F(>tgquzr;pCG=51Bd1n&)16UKR$pMAL780F)f^8AAjZ&9U6uziQB3FB-_ z4nBO`7}X;o)$&shf2vB7;Q3EfO&I4{7O8r~7}4QeCofXvBUp#ZGsWJEXCU(>q_9C;_b!1)Agm1KFrtbfw%9tSiC#oillP*2> zdg8R{L1FHp*LCpd0SAR^RZSQtJ3G&Lt}&)Z!lpz%(fJKkj)bN!P&Hwk=7N(A*BRq` zeZ+V1$%boHSrf#+Ox1*O;>QoU@cG_y(`q@h@DXk@(R__D<~M}QryibZ$8S{SO;A6t zYQi}6l6P>v#~9ibkp z7^gbtbFlx%7}?{D?1GQM{y>#0LHGAmO&F&;|5c|W+f0oHCphDUpMqTtPakkVxI)#0 zanj{c!fnO~A7g~)#|Y0=WlAV~v#JT>WXtxhy~G&RBaG_2t!ukgSrSC=QZ-?mXlXFk zH^wyYBy&Tt*Q@d*sBWp6Fiv%TDE9Y^@y%Q9;y`Rql`%p3ovJ2`lb#!h{iHF@*Rs|G zIL{Bm-mA)&p!}n%CX7=q-(m6{V|??DGJk`~w^f-EWIwEG!Z_KI4JJ#rn;HZ1xn#~; zHJ?=FNYMOCRTIW(mMtx8GR89>kK`>Zl;F7oj?=cTr(hl_x=UMb(6Hsjqi1ryZj9}9$o6!*W4o5`v|Bf_M`soh zzN*TVp!+XXO&F(JI&pv87~?0V7|)%;|FbG%g7lxNnlMg!{uKVCXPO#xpDJfQ=Peu? z0LOrX!%0;W#_7+U%sr17$;vg1$vb+-Z!Lp&zlUrQk5@3 z`ERM3Fiv@PxOlTM!qZ)|L z*_JJ0j-6*}oH)w4&RfMCh6fKgFkGc-!Z_2?$Q0gbhETN|9jsW2z>Mvn*LJe$*Jv zBO%T57ZUu9DoKLpZ>gFv&a*69vg~}rW52v0$-k@8B$)m;RTIXU&RsD++Zf$p&sex* zd=@-?!14AOswRwcE?qKyu`#~G@pkT-@nxz+34LFpYQi|z*&yHnV?>AjqCECjQ>93- zY^$0u&T_$`@tcg%y^i;f2a8vY->6EO;QcmL6UKR$oQ3*`F}~Mq@>*60Mty$XfvEqX zN|fOG1F9yBb1jW;f72M-&`U;a=bnoCnkrd>@4r$tVVrMS{O=dW=w2)6&Wi#5T$L)p z_G79hjI%9`0iJS!sUdel-MMkVjqvmV$AIfqO&I4pKMweOW2B!bG+rDFyhxQcq4Z~~ znlR3MZY=P(jS;_r5TCyTV!tYBg7;UbnlR40JRbNuW28gWJL*95V}h?$rAsh=ld1{h zj7y?be`JjA#ON^Rh{JnTi4t7@fvO4PT+1R>4;UjmF?Y2CUG766#a9lW~YQi|vk{SK^ z#;6|Ig~peKZz6H&&Q+Qrb?fvfxA^r7_R~OPQlL_<9(d- zp1)J@Q>s)6wm+_F!Z_QqNa1&laXrep&WjTMttw4|=|@ye7-w1 zm%@_=90~sFnB_V@LO5lN@JX)i;^^Sh2WS0ERTIV)o*NzfPsT`}h)K_n3|_8Em}pfl zRW)Ipb9rR2X^il&x6F?U)>WwzY|p8hFwV9lD)?q&REIOnoQUA6CYb-AstM!F%VM#AZH)17JeU`U{e~({ zg6aRMYQi|vxw{9SFh+OSTNdsf{1;Wi1m{0fHDR1{Y3%#7-!MD|&y9PZ3{M_#41S`j z3FBO6LT;;pAU3OZPMz_jL1Z!uVhV7O8cGIV+B1;GF86wO^nwX)$F7pp z;cB~SLDbapEx2uSu(ZN{jJWN#U-sskGwnvN?gPkRWX~TUbJgTHyEnatk#_E**+`Bs_@S^ ze|5!yQ!ViBAPw(np(1{MCjRIKCsaBe%&UJ0yr+H$pw&MNztK|HKMF~^>wk*> z21Bfbd)z%8DAu@dH#-f>x37kxHa$ogjZ${tdzNQ_O4unUTa%e=ga-`q)DF;=s@ zvVDD*5rNk+86NQ%g|)YcE9>88CONeyxbV3NkryUa|b_c0t7DF&A8`g~7 zVEBU7b9M`)W*_d={}JrgE(ylGPR(f@-qvVWtcDF5E$?%!mIwTSnXBFQ;rZTOP_UoE z)xnx_!+Q06*$*$UA*cR3z)`RY%)$$$8ogk}70kNdySZ2Y8QjD2>p#ce9*2K!?Y+J? zb89bH4Fquv9{;PIL{V}*~bL!q;^eRSZDc|~&3F|98+j1+lUj2k;V;SWpNA|Ld zN>^KRc29UqD`Zj}hOTS36X=p}-6Ms>*d>wEQoMKb;k{;6F8 ze<8#WC;Ww$L-Viib+LYf6~66)ior3u!AhqIXuV!A64qMRcYyS@FM{R=J^7lJbN!`G z#fOT47UGT`cxSc2+NNIpL@*tUUD`g>YP78?-r@1fg2}e)%sDNqQD)}LUDtuf1w-gH Y<*-=_MqGQgTxqv_`!GtdJG1To10{8uT>t<8 diff --git a/core/dbt/docs/build/doctrees/index.doctree b/core/dbt/docs/build/doctrees/index.doctree index 3acd417b911278b24a5d2810fb56f09df6f5612c..55bd0490c3b53919735097a6b246f6a7ce194429 100644 GIT binary patch delta 74 zcmeygmi5b8R+a|VsWKZ`9yOTC2WMv%C}if9q!uM6mt>ZuDijpur)B1(Dio)dloses T$zYG2k|EZEqHeQfQuick search ©2022, dbt Labs. | - Powered by Sphinx 6.0.0 - & Alabaster 0.7.12 + Powered by Sphinx 6.1.3 + & Alabaster 0.7.13 diff --git a/core/dbt/docs/build/html/index.html b/core/dbt/docs/build/html/index.html index c0f74617916..fd48833b877 100644 --- a/core/dbt/docs/build/html/index.html +++ b/core/dbt/docs/build/html/index.html @@ -837,8 +837,8 @@

    Quick search

    ©2022, dbt Labs. | - Powered by Sphinx 6.0.0 - & Alabaster 0.7.12 + Powered by Sphinx 6.1.3 + & Alabaster 0.7.13 | Quick search - + \ No newline at end of file diff --git a/core/dbt/docs/build/html/search.html b/core/dbt/docs/build/html/search.html index 9622cf38d3c..9a6204f2b3c 100644 --- a/core/dbt/docs/build/html/search.html +++ b/core/dbt/docs/build/html/search.html @@ -106,8 +106,8 @@

    Related Topics

    ©2022, dbt Labs. | - Powered by
    Sphinx 6.0.0 - & Alabaster 0.7.12 + Powered by Sphinx 6.1.3 + & Alabaster 0.7.13 diff --git a/core/dbt/docs/build/html/searchindex.js b/core/dbt/docs/build/html/searchindex.js index 25dd9fd3af5..62af17602dc 100644 --- a/core/dbt/docs/build/html/searchindex.js +++ b/core/dbt/docs/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "from": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "select": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "those": 0, "have": 0, "been": 0, "explicitli": 0, "path": 0, "configur": 0, "log": 0, "onli": 0, "appli": 0, "thi": 0, "current": 0, "overrid": 0, "dbt_log_path": 0, "i": 0, "includ": 0, "which": 0, "load": 0, "dbt_project": 0, "yml": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "The": 0, "name": 0, "us": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "project": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "inter": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "command": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "log_path": 0, "model": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "log_path": [[0, "build|log_path"], [0, "compile|log_path"], [0, "parse|log_path"], [0, "run|log_path"], [0, "seed|log_path"], [0, "test|log_path"]], "models": [[0, "build|models"], [0, "compile|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file +Search.setIndex({"docnames": ["index"], "filenames": ["index.rst"], "titles": ["dbt-core\u2019s API documentation"], "terms": {"type": 0, "boolean": 0, "If": 0, "set": 0, "variabl": 0, "resolv": 0, "unselect": 0, "node": 0, "string": 0, "specifi": 0, "stop": 0, "execut": 0, "first": 0, "failur": 0, "drop": 0, "increment": 0, "fulli": 0, "recalcul": 0, "tabl": 0, "from": 0, "definit": 0, "choic": 0, "eager": 0, "cautiou": 0, "select": 0, "all": 0, "ar": 0, "adjac": 0, "resourc": 0, "even": 0, "thei": 0, "those": 0, "have": 0, "been": 0, "explicitli": 0, "path": 0, "configur": 0, "log": 0, "onli": 0, "appli": 0, "thi": 0, "current": 0, "overrid": 0, "dbt_log_path": 0, "i": 0, "includ": 0, "which": 0, "load": 0, "dbt_project": 0, "yml": 0, "directori": 0, "look": 0, "file": 0, "work": 0, "home": 0, "default": 0, "its": 0, "parent": 0, "The": 0, "name": 0, "us": 0, "defin": 0, "sampl": 0, "data": 0, "termin": 0, "given": 0, "json": 0, "compar": 0, "project": 0, "store": 0, "result": 0, "fail": 0, "row": 0, "databas": 0, "dbt_target_path": 0, "int": 0, "number": 0, "while": 0, "yaml": 0, "suppli": 0, "argument": 0, "your": 0, "should": 0, "eg": 0, "my_vari": 0, "my_valu": 0, "ensur": 0, "version": 0, "match": 0, "one": 0, "requir": 0, "todo": 0, "No": 0, "help": 0, "text": 0, "avail": 0, "inform": 0, "skip": 0, "interact": 0, "setup": 0, "metric": 0, "analysi": 0, "exposur": 0, "macro": 0, "dictionari": 0, "map": 0, "keyword": 0}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"dbt": 0, "core": 0, "": 0, "api": 0, "document": 0, "command": 0, "build": 0, "defer": 0, "exclud": 0, "fail_fast": 0, "full_refresh": 0, "indirect_select": 0, "log_path": 0, "model": 0, "profil": 0, "profiles_dir": 0, "project_dir": 0, "selector": 0, "show": 0, "state": 0, "store_failur": 0, "target": 0, "target_path": 0, "thread": 0, "var": 0, "version_check": 0, "clean": 0, "compil": 0, "parse_onli": 0, "debug": 0, "config_dir": 0, "dep": 0, "doc": 0, "init": 0, "skip_profile_setup": 0, "list": 0, "output": 0, "output_kei": 0, "resource_typ": 0, "pars": 0, "write_manifest": 0, "run": 0, "run_oper": 0, "arg": 0, "seed": 0, "snapshot": 0, "sourc": 0, "test": 0}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx": 57}, "alltitles": {"dbt-core\u2019s API documentation": [[0, "dbt-core-s-api-documentation"]], "Command: build": [[0, "dbt-section"]], "defer": [[0, "build|defer"], [0, "compile|defer"], [0, "run|defer"], [0, "snapshot|defer"], [0, "test|defer"]], "exclude": [[0, "build|exclude"], [0, "compile|exclude"], [0, "list|exclude"], [0, "run|exclude"], [0, "seed|exclude"], [0, "snapshot|exclude"], [0, "test|exclude"]], "fail_fast": [[0, "build|fail_fast"], [0, "run|fail_fast"], [0, "test|fail_fast"]], "full_refresh": [[0, "build|full_refresh"], [0, "compile|full_refresh"], [0, "run|full_refresh"], [0, "seed|full_refresh"]], "indirect_selection": [[0, "build|indirect_selection"], [0, "list|indirect_selection"], [0, "test|indirect_selection"]], "log_path": [[0, "build|log_path"], [0, "compile|log_path"], [0, "parse|log_path"], [0, "run|log_path"], [0, "seed|log_path"], [0, "test|log_path"]], "models": [[0, "build|models"], [0, "compile|models"], [0, "list|models"], [0, "run|models"], [0, "seed|models"], [0, "snapshot|models"], [0, "test|models"]], "profile": [[0, "build|profile"], [0, "clean|profile"], [0, "compile|profile"], [0, "debug|profile"], [0, "deps|profile"], [0, "init|profile"], [0, "list|profile"], [0, "parse|profile"], [0, "run|profile"], [0, "run-operation|profile"], [0, "seed|profile"], [0, "snapshot|profile"], [0, "test|profile"]], "profiles_dir": [[0, "build|profiles_dir"], [0, "clean|profiles_dir"], [0, "compile|profiles_dir"], [0, "debug|profiles_dir"], [0, "deps|profiles_dir"], [0, "init|profiles_dir"], [0, "list|profiles_dir"], [0, "parse|profiles_dir"], [0, "run|profiles_dir"], [0, "run-operation|profiles_dir"], [0, "seed|profiles_dir"], [0, "snapshot|profiles_dir"], [0, "test|profiles_dir"]], "project_dir": [[0, "build|project_dir"], [0, "clean|project_dir"], [0, "compile|project_dir"], [0, "debug|project_dir"], [0, "deps|project_dir"], [0, "init|project_dir"], [0, "list|project_dir"], [0, "parse|project_dir"], [0, "run|project_dir"], [0, "run-operation|project_dir"], [0, "seed|project_dir"], [0, "snapshot|project_dir"], [0, "test|project_dir"]], "selector": [[0, "build|selector"], [0, "compile|selector"], [0, "list|selector"], [0, "run|selector"], [0, "seed|selector"], [0, "snapshot|selector"], [0, "test|selector"]], "show": [[0, "build|show"], [0, "seed|show"]], "state": [[0, "build|state"], [0, "compile|state"], [0, "list|state"], [0, "run|state"], [0, "seed|state"], [0, "snapshot|state"], [0, "test|state"]], "store_failures": [[0, "build|store_failures"], [0, "test|store_failures"]], "target": [[0, "build|target"], [0, "clean|target"], [0, "compile|target"], [0, "debug|target"], [0, "deps|target"], [0, "init|target"], [0, "list|target"], [0, "parse|target"], [0, "run|target"], [0, "run-operation|target"], [0, "seed|target"], [0, "snapshot|target"], [0, "test|target"]], "target_path": [[0, "build|target_path"], [0, "compile|target_path"], [0, "parse|target_path"], [0, "run|target_path"], [0, "seed|target_path"], [0, "test|target_path"]], "threads": [[0, "build|threads"], [0, "compile|threads"], [0, "parse|threads"], [0, "run|threads"], [0, "seed|threads"], [0, "snapshot|threads"], [0, "test|threads"]], "vars": [[0, "build|vars"], [0, "clean|vars"], [0, "compile|vars"], [0, "debug|vars"], [0, "deps|vars"], [0, "init|vars"], [0, "list|vars"], [0, "parse|vars"], [0, "run|vars"], [0, "run-operation|vars"], [0, "seed|vars"], [0, "snapshot|vars"], [0, "test|vars"]], "version_check": [[0, "build|version_check"], [0, "compile|version_check"], [0, "debug|version_check"], [0, "parse|version_check"], [0, "run|version_check"], [0, "seed|version_check"], [0, "test|version_check"]], "Command: clean": [[0, "dbt-section"]], "Command: compile": [[0, "dbt-section"]], "parse_only": [[0, "compile|parse_only"]], "Command: debug": [[0, "dbt-section"]], "config_dir": [[0, "debug|config_dir"]], "Command: deps": [[0, "dbt-section"]], "Command: docs": [[0, "dbt-section"]], "Command: init": [[0, "dbt-section"]], "skip_profile_setup": [[0, "init|skip_profile_setup"]], "Command: list": [[0, "dbt-section"]], "output": [[0, "list|output"]], "output_keys": [[0, "list|output_keys"]], "resource_type": [[0, "list|resource_type"]], "Command: parse": [[0, "dbt-section"]], "compile": [[0, "parse|compile"]], "write_manifest": [[0, "parse|write_manifest"]], "Command: run": [[0, "dbt-section"]], "Command: run_operation": [[0, "dbt-section"]], "args": [[0, "run-operation|args"]], "Command: seed": [[0, "dbt-section"]], "Command: snapshot": [[0, "dbt-section"]], "Command: source": [[0, "dbt-section"]], "Command: test": [[0, "dbt-section"]]}, "indexentries": {}}) \ No newline at end of file diff --git a/test/integration/035_docs_blocks_tests/test_docs_blocks.py b/test/integration/035_docs_blocks_tests/test_docs_blocks.py deleted file mode 100644 index f37c8e677ac..00000000000 --- a/test/integration/035_docs_blocks_tests/test_docs_blocks.py +++ /dev/null @@ -1,184 +0,0 @@ -import json -import os - -from test.integration.base import DBTIntegrationTest, use_profile - -import dbt.exceptions - -class TestGoodDocsBlocks(DBTIntegrationTest): - @property - def schema(self): - return 'docs_blocks_035' - - @staticmethod - def dir(path): - return os.path.normpath(path) - - @property - def models(self): - return self.dir("models") - - @use_profile('postgres') - def test_postgres_valid_doc_ref(self): - self.assertEqual(len(self.run_dbt()), 1) - - self.assertTrue(os.path.exists('./target/manifest.json')) - - with open('./target/manifest.json') as fp: - manifest = json.load(fp) - - model_data = manifest['nodes']['model.test.model'] - self.assertEqual( - model_data['description'], - 'My model is just a copy of the seed' - ) - self.assertEqual( - { - 'name': 'id', - 'description': 'The user ID number', - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['id'] - ) - self.assertEqual( - { - 'name': 'first_name', - 'description': "The user's first name", - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['first_name'] - ) - - self.assertEqual( - { - 'name': 'last_name', - 'description': "The user's last name", - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['last_name'] - ) - self.assertEqual(len(model_data['columns']), 3) - - @use_profile('postgres') - def test_postgres_alternative_docs_path(self): - self.use_default_project({"docs-paths": [self.dir("docs")]}) - self.assertEqual(len(self.run_dbt()), 1) - - self.assertTrue(os.path.exists('./target/manifest.json')) - - with open('./target/manifest.json') as fp: - manifest = json.load(fp) - - model_data = manifest['nodes']['model.test.model'] - self.assertEqual( - model_data['description'], - 'Alt text about the model' - ) - self.assertEqual( - { - 'name': 'id', - 'description': 'The user ID number with alternative text', - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['id'] - ) - self.assertEqual( - { - 'name': 'first_name', - 'description': "The user's first name", - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['first_name'] - ) - - self.assertEqual( - { - 'name': 'last_name', - 'description': "The user's last name in this other file", - 'data_type': None, - 'meta': {}, - 'quote': None, - 'tags': [], - }, - model_data['columns']['last_name'] - ) - self.assertEqual(len(model_data['columns']), 3) - - @use_profile('postgres') - def test_postgres_alternative_docs_path_missing(self): - self.use_default_project({"docs-paths": [self.dir("not-docs")]}) - with self.assertRaises(dbt.exceptions.CompilationError): - self.run_dbt() - - -class TestMissingDocsBlocks(DBTIntegrationTest): - @property - def schema(self): - return 'docs_blocks_035' - - @staticmethod - def dir(path): - return os.path.normpath(path) - - @property - def models(self): - return self.dir("missing_docs_models") - - @use_profile('postgres') - def test_postgres_missing_doc_ref(self): - # The run should fail since we could not find the docs reference. - with self.assertRaises(dbt.exceptions.CompilationError): - self.run_dbt() - - -class TestBadDocsBlocks(DBTIntegrationTest): - @property - def schema(self): - return 'docs_blocks_035' - - @staticmethod - def dir(path): - return os.path.normpath(path) - - @property - def models(self): - return self.dir("invalid_name_models") - - @use_profile('postgres') - def test_postgres_invalid_doc_ref(self): - # The run should fail since we could not find the docs reference. - with self.assertRaises(dbt.exceptions.CompilationError): - self.run_dbt(expect_pass=False) - -class TestDuplicateDocsBlock(DBTIntegrationTest): - @property - def schema(self): - return 'docs_blocks_035' - - @staticmethod - def dir(path): - return os.path.normpath(path) - - @property - def models(self): - return self.dir("duplicate_docs") - - @use_profile('postgres') - def test_postgres_duplicate_doc_ref(self): - with self.assertRaises(dbt.exceptions.CompilationError): - self.run_dbt(expect_pass=False) diff --git a/tests/functional/docs/test_duplicate_docs_block.py b/tests/functional/docs/test_duplicate_docs_block.py new file mode 100644 index 00000000000..2ff9459e4b3 --- /dev/null +++ b/tests/functional/docs/test_duplicate_docs_block.py @@ -0,0 +1,35 @@ +import pytest + +from dbt.tests.util import run_dbt +import dbt.exceptions + + +duplicate_doc_blocks_model_sql = "select 1 as id, 'joe' as first_name" + +duplicate_doc_blocks_docs_md = """{% docs my_model_doc %} + a doc string +{% enddocs %} + +{% docs my_model_doc %} + duplicate doc string +{% enddocs %}""" + +duplicate_doc_blocks_schema_yml = """version: 2 + +models: + - name: model + description: "{{ doc('my_model_doc') }}" +""" + + +class TestDuplicateDocsBlock: + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": duplicate_doc_blocks_model_sql, + "schema.yml": duplicate_doc_blocks_schema_yml, + } + + def test_duplicate_doc_ref(self, project): + with pytest.raises(dbt.exceptions.CompilationError): + run_dbt(expect_pass=False) diff --git a/tests/functional/docs/test_good_docs_blocks.py b/tests/functional/docs/test_good_docs_blocks.py new file mode 100644 index 00000000000..9fc9a7f0bb5 --- /dev/null +++ b/tests/functional/docs/test_good_docs_blocks.py @@ -0,0 +1,171 @@ +import json +import os +from pathlib import Path +import pytest + +from dbt.tests.util import run_dbt, update_config_file, write_file + + +good_docs_blocks_model_sql = "select 1 as id, 'joe' as first_name" + +good_docs_blocks_docs_md = """{% docs my_model_doc %} +My model is just a copy of the seed +{% enddocs %} + +{% docs my_model_doc__id %} +The user ID number +{% enddocs %} + +The following doc is never used, which should be fine. +{% docs my_model_doc__first_name %} +The user's first name (should not be shown!) +{% enddocs %} + +This doc is referenced by its full name +{% docs my_model_doc__last_name %} +The user's last name +{% enddocs %} +""" + +good_doc_blocks_alt_docs_md = """{% docs my_model_doc %} +Alt text about the model +{% enddocs %} + +{% docs my_model_doc__id %} +The user ID number with alternative text +{% enddocs %} + +The following doc is never used, which should be fine. +{% docs my_model_doc__first_name %} +The user's first name - don't show this text! +{% enddocs %} + +This doc is referenced by its full name +{% docs my_model_doc__last_name %} +The user's last name in this other file +{% enddocs %} +""" + +good_docs_blocks_schema_yml = """version: 2 + +models: + - name: model + description: "{{ doc('my_model_doc') }}" + columns: + - name: id + description: "{{ doc('my_model_doc__id') }}" + - name: first_name + description: The user's first name + - name: last_name + description: "{{ doc('test', 'my_model_doc__last_name') }}" +""" + + +class TestGoodDocsBlocks: + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": good_docs_blocks_model_sql, + "schema.yml": good_docs_blocks_schema_yml, + "docs.md": good_docs_blocks_docs_md, + } + + def test_valid_doc_ref(self, project): + result = run_dbt() + assert len(result.results) == 1 + + assert os.path.exists("./target/manifest.json") + + with open("./target/manifest.json") as fp: + manifest = json.load(fp) + + model_data = manifest["nodes"]["model.test.model"] + + assert model_data["description"] == "My model is just a copy of the seed" + + assert { + "name": "id", + "description": "The user ID number", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["id"] + + assert { + "name": "first_name", + "description": "The user's first name", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["first_name"] + + assert { + "name": "last_name", + "description": "The user's last name", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["last_name"] + + assert len(model_data["columns"]) == 3 + + +class TestGoodDocsBlocksAltPath: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": good_docs_blocks_model_sql, "schema.yml": good_docs_blocks_schema_yml} + + def test_alternative_docs_path(self, project): + # self.use_default_project({"docs-paths": [self.dir("docs")]}) + docs_path = Path(project.project_root, "alt-docs") + docs_path.mkdir() + write_file(good_doc_blocks_alt_docs_md, project.project_root, "alt-docs", "docs.md") + + update_config_file( + {"docs-paths": [str(docs_path)]}, project.project_root, "dbt_project.yml" + ) + + result = run_dbt() + + assert len(result.results) == 1 + + assert os.path.exists("./target/manifest.json") + + with open("./target/manifest.json") as fp: + manifest = json.load(fp) + + model_data = manifest["nodes"]["model.test.model"] + + assert model_data["description"] == "Alt text about the model" + + assert { + "name": "id", + "description": "The user ID number with alternative text", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["id"] + + assert { + "name": "first_name", + "description": "The user's first name", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["first_name"] + + assert { + "name": "last_name", + "description": "The user's last name in this other file", + "data_type": None, + "meta": {}, + "quote": None, + "tags": [], + } == model_data["columns"]["last_name"] + + assert len(model_data["columns"]) == 3 diff --git a/tests/functional/docs/test_invalid_doc_ref.py b/tests/functional/docs/test_invalid_doc_ref.py new file mode 100644 index 00000000000..7c486938124 --- /dev/null +++ b/tests/functional/docs/test_invalid_doc_ref.py @@ -0,0 +1,47 @@ +import pytest + +from dbt.tests.util import run_dbt +import dbt.exceptions + + +invalid_doc_ref_model_sql = "select 1 as id, 'joe' as first_name" + +invalid_doc_ref_docs_md = """{% docs my_model_doc %} +My model is just a copy of the seed +{% enddocs %} + +{% docs my_model_doc__id %} +The user ID number +{% enddocs %} + +The following doc is never used, which should be fine. +{% docs my_model_doc__first_name %} +The user's first name +{% enddocs %}""" + +invalid_doc_ref_schema_yml = """version: 2 + +models: + - name: model + description: "{{ doc('my_model_doc') }}" + columns: + - name: id + description: "{{ doc('my_model_doc__id') }}" + - name: first_name + description: "{{ doc('foo.bar.my_model_doc__id') }}" +""" + + +class TestInvalidDocRef: + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": invalid_doc_ref_model_sql, + "docs.md": invalid_doc_ref_docs_md, + "schema.yml": invalid_doc_ref_schema_yml, + } + + def test_invalid_doc_ref(self, project): + # The run should fail since we could not find the docs reference. + with pytest.raises(dbt.exceptions.CompilationError): + run_dbt(expect_pass=False) diff --git a/tests/functional/docs/test_missing_docs_blocks.py b/tests/functional/docs/test_missing_docs_blocks.py new file mode 100644 index 00000000000..3b6f4e540b9 --- /dev/null +++ b/tests/functional/docs/test_missing_docs_blocks.py @@ -0,0 +1,43 @@ +import pytest + +from dbt.tests.util import run_dbt +import dbt.exceptions + + +missing_docs_blocks_model_sql = "select 1 as id, 'joe' as first_name" + +missing_docs_blocks_docs_md = """{% docs my_model_doc %} +My model is just a copy of the seed +{% enddocs %} + +{% docs my_model_doc__id %} +The user ID number +{% enddocs %}""" + +missing_docs_blocks_schema_yml = """version: 2 + +models: + - name: model + description: "{{ doc('my_model_doc') }}" + columns: + - name: id + description: "{{ doc('my_model_doc__id') }}" + - name: first_name + # invalid reference + description: "{{ doc('my_model_doc__first_name') }}" +""" + + +class TestMissingDocsBlocks: + @pytest.fixture(scope="class") + def models(self): + return { + "model.sql": missing_docs_blocks_model_sql, + "schema.yml": missing_docs_blocks_schema_yml, + "docs.md": missing_docs_blocks_docs_md, + } + + def test_missing_doc_ref(self, project): + # The run should fail since we could not find the docs reference. + with pytest.raises(dbt.exceptions.CompilationError): + run_dbt() From 9801eebc588b4e5ea053a58533a4da1b51cdef4a Mon Sep 17 00:00:00 2001 From: Jeremy Cohen Date: Fri, 20 Jan 2023 19:58:40 +0100 Subject: [PATCH 125/156] Consolidate changie entries from #6620 (#6684) --- .changes/unreleased/Fixes-20230116-123645.yaml | 4 ++-- .changes/unreleased/Fixes-20230116-123709.yaml | 6 ------ 2 files changed, 2 insertions(+), 8 deletions(-) delete mode 100644 .changes/unreleased/Fixes-20230116-123709.yaml diff --git a/.changes/unreleased/Fixes-20230116-123645.yaml b/.changes/unreleased/Fixes-20230116-123645.yaml index ee15803a297..b3c35d8e2be 100644 --- a/.changes/unreleased/Fixes-20230116-123645.yaml +++ b/.changes/unreleased/Fixes-20230116-123645.yaml @@ -1,6 +1,6 @@ kind: Fixes -body: Respect quoting config for dbt.ref() + dbt.source() in dbt-py models +body: Respect quoting config for dbt.ref(), dbt.source(), and dbt.this() in dbt-py models time: 2023-01-16T12:36:45.63092+01:00 custom: Author: jtcohen6 - Issue: "6103" + Issue: 6103 6619 diff --git a/.changes/unreleased/Fixes-20230116-123709.yaml b/.changes/unreleased/Fixes-20230116-123709.yaml deleted file mode 100644 index 56788519d0a..00000000000 --- a/.changes/unreleased/Fixes-20230116-123709.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Fixes -body: Respect quoting config for dbt.this() in dbt-py models -time: 2023-01-16T12:37:09.000659+01:00 -custom: - Author: jtcohen6 - Issue: "6619" From 3aeab73740644e3df224b3870af39a961ebcc2d9 Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Fri, 20 Jan 2023 14:27:02 -0500 Subject: [PATCH 126/156] convert 069_build_tests (#6678) --- .../models-circular-relationship/model_0.sql | 3 - .../models-circular-relationship/model_1.sql | 3 - .../models-circular-relationship/model_99.sql | 4 - .../models-circular-relationship/test.yml | 18 -- .../models-failing/model_0.sql | 3 - .../models-failing/model_1.sql | 3 - .../models-failing/model_2.sql | 3 - .../models-failing/model_3.sql | 3 - .../models-failing/model_99.sql | 3 - .../069_build_tests/models-failing/test.yml | 15 - .../models-interdependent/model_a.sql | 1 - .../models-interdependent/model_c.sql | 1 - .../models-interdependent/schema.yml | 41 --- .../models-simple-blocking/model_a.sql | 1 - .../models-simple-blocking/model_b.sql | 1 - .../models-simple-blocking/schema.yml | 8 - .../069_build_tests/models/model_0.sql | 3 - .../069_build_tests/models/model_1.sql | 3 - .../069_build_tests/models/model_2.sql | 3 - .../069_build_tests/models/model_99.sql | 3 - .../069_build_tests/models/test.yml | 15 - .../069_build_tests/seeds/countries.csv | 10 - .../069_build_tests/snapshots/snap_0.sql | 16 -- .../069_build_tests/snapshots/snap_1.sql | 39 --- .../069_build_tests/snapshots/snap_99.sql | 15 - .../069_build_tests/test-files/model_b.sql | 1 - .../test-files/model_b_null.sql | 1 - .../integration/069_build_tests/test_build.py | 143 ---------- .../069_build_tests/tests-failing/model_0.sql | 3 - .../069_build_tests/tests-failing/model_1.sql | 3 - .../069_build_tests/tests-failing/model_2.sql | 3 - .../tests-failing/model_99.sql | 3 - .../069_build_tests/tests-failing/test.yml | 18 -- tests/functional/build/fixtures.py | 268 ++++++++++++++++++ tests/functional/build/test_build.py | 198 +++++++++++++ 35 files changed, 466 insertions(+), 393 deletions(-) delete mode 100644 test/integration/069_build_tests/models-circular-relationship/model_0.sql delete mode 100644 test/integration/069_build_tests/models-circular-relationship/model_1.sql delete mode 100644 test/integration/069_build_tests/models-circular-relationship/model_99.sql delete mode 100644 test/integration/069_build_tests/models-circular-relationship/test.yml delete mode 100644 test/integration/069_build_tests/models-failing/model_0.sql delete mode 100644 test/integration/069_build_tests/models-failing/model_1.sql delete mode 100644 test/integration/069_build_tests/models-failing/model_2.sql delete mode 100644 test/integration/069_build_tests/models-failing/model_3.sql delete mode 100644 test/integration/069_build_tests/models-failing/model_99.sql delete mode 100644 test/integration/069_build_tests/models-failing/test.yml delete mode 100644 test/integration/069_build_tests/models-interdependent/model_a.sql delete mode 100644 test/integration/069_build_tests/models-interdependent/model_c.sql delete mode 100644 test/integration/069_build_tests/models-interdependent/schema.yml delete mode 100644 test/integration/069_build_tests/models-simple-blocking/model_a.sql delete mode 100644 test/integration/069_build_tests/models-simple-blocking/model_b.sql delete mode 100644 test/integration/069_build_tests/models-simple-blocking/schema.yml delete mode 100644 test/integration/069_build_tests/models/model_0.sql delete mode 100644 test/integration/069_build_tests/models/model_1.sql delete mode 100644 test/integration/069_build_tests/models/model_2.sql delete mode 100644 test/integration/069_build_tests/models/model_99.sql delete mode 100644 test/integration/069_build_tests/models/test.yml delete mode 100644 test/integration/069_build_tests/seeds/countries.csv delete mode 100644 test/integration/069_build_tests/snapshots/snap_0.sql delete mode 100644 test/integration/069_build_tests/snapshots/snap_1.sql delete mode 100644 test/integration/069_build_tests/snapshots/snap_99.sql delete mode 100644 test/integration/069_build_tests/test-files/model_b.sql delete mode 100644 test/integration/069_build_tests/test-files/model_b_null.sql delete mode 100644 test/integration/069_build_tests/test_build.py delete mode 100644 test/integration/069_build_tests/tests-failing/model_0.sql delete mode 100644 test/integration/069_build_tests/tests-failing/model_1.sql delete mode 100644 test/integration/069_build_tests/tests-failing/model_2.sql delete mode 100644 test/integration/069_build_tests/tests-failing/model_99.sql delete mode 100644 test/integration/069_build_tests/tests-failing/test.yml create mode 100644 tests/functional/build/fixtures.py create mode 100644 tests/functional/build/test_build.py diff --git a/test/integration/069_build_tests/models-circular-relationship/model_0.sql b/test/integration/069_build_tests/models-circular-relationship/model_0.sql deleted file mode 100644 index 2fe54b32418..00000000000 --- a/test/integration/069_build_tests/models-circular-relationship/model_0.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('countries') }} \ No newline at end of file diff --git a/test/integration/069_build_tests/models-circular-relationship/model_1.sql b/test/integration/069_build_tests/models-circular-relationship/model_1.sql deleted file mode 100644 index b11c0b7b7ed..00000000000 --- a/test/integration/069_build_tests/models-circular-relationship/model_1.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('model_0') }} diff --git a/test/integration/069_build_tests/models-circular-relationship/model_99.sql b/test/integration/069_build_tests/models-circular-relationship/model_99.sql deleted file mode 100644 index a680446bea0..00000000000 --- a/test/integration/069_build_tests/models-circular-relationship/model_99.sql +++ /dev/null @@ -1,4 +0,0 @@ -{{ config(materialized='table') }} - -select '1' as "num" - diff --git a/test/integration/069_build_tests/models-circular-relationship/test.yml b/test/integration/069_build_tests/models-circular-relationship/test.yml deleted file mode 100644 index 991dde8a22a..00000000000 --- a/test/integration/069_build_tests/models-circular-relationship/test.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 2 - -models: - - name: model_0 - columns: - - name: iso3 - tests: - - relationships: - to: ref('model_1') - field: iso3 - - - name: model_1 - columns: - - name: iso3 - tests: - - relationships: - to: ref('model_0') - field: iso3 diff --git a/test/integration/069_build_tests/models-failing/model_0.sql b/test/integration/069_build_tests/models-failing/model_0.sql deleted file mode 100644 index 2fe54b32418..00000000000 --- a/test/integration/069_build_tests/models-failing/model_0.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('countries') }} \ No newline at end of file diff --git a/test/integration/069_build_tests/models-failing/model_1.sql b/test/integration/069_build_tests/models-failing/model_1.sql deleted file mode 100644 index cc5cf86c1e4..00000000000 --- a/test/integration/069_build_tests/models-failing/model_1.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select bad_column from {{ ref('snap_0') }} \ No newline at end of file diff --git a/test/integration/069_build_tests/models-failing/model_2.sql b/test/integration/069_build_tests/models-failing/model_2.sql deleted file mode 100644 index 25bea5224cf..00000000000 --- a/test/integration/069_build_tests/models-failing/model_2.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('snap_1') }} \ No newline at end of file diff --git a/test/integration/069_build_tests/models-failing/model_3.sql b/test/integration/069_build_tests/models-failing/model_3.sql deleted file mode 100644 index bc0d81e14e5..00000000000 --- a/test/integration/069_build_tests/models-failing/model_3.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('model_1') }} diff --git a/test/integration/069_build_tests/models-failing/model_99.sql b/test/integration/069_build_tests/models-failing/model_99.sql deleted file mode 100644 index 38c103e823b..00000000000 --- a/test/integration/069_build_tests/models-failing/model_99.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select '1' as "num" \ No newline at end of file diff --git a/test/integration/069_build_tests/models-failing/test.yml b/test/integration/069_build_tests/models-failing/test.yml deleted file mode 100644 index 6f9133aa487..00000000000 --- a/test/integration/069_build_tests/models-failing/test.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: 2 - -models: - - name: model_0 - columns: - - name: iso3 - tests: - - unique - - not_null - - name: model_2 - columns: - - name: iso3 - tests: - - unique - - not_null diff --git a/test/integration/069_build_tests/models-interdependent/model_a.sql b/test/integration/069_build_tests/models-interdependent/model_a.sql deleted file mode 100644 index 43258a71464..00000000000 --- a/test/integration/069_build_tests/models-interdependent/model_a.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as id diff --git a/test/integration/069_build_tests/models-interdependent/model_c.sql b/test/integration/069_build_tests/models-interdependent/model_c.sql deleted file mode 100644 index 6b5ce07801a..00000000000 --- a/test/integration/069_build_tests/models-interdependent/model_c.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('model_b') }} diff --git a/test/integration/069_build_tests/models-interdependent/schema.yml b/test/integration/069_build_tests/models-interdependent/schema.yml deleted file mode 100644 index 1d3fe4a9bfa..00000000000 --- a/test/integration/069_build_tests/models-interdependent/schema.yml +++ /dev/null @@ -1,41 +0,0 @@ -version: 2 - -models: - - name: model_a - columns: - - name: id - tests: - - unique - - not_null - - relationships: - to: ref('model_b') - field: id - - relationships: - to: ref('model_c') - field: id - - - name: model_b - columns: - - name: id - tests: - - unique - - not_null - - relationships: - to: ref('model_a') - field: id - - relationships: - to: ref('model_c') - field: id - - - name: model_c - columns: - - name: id - tests: - - unique - - not_null - - relationships: - to: ref('model_a') - field: id - - relationships: - to: ref('model_b') - field: id diff --git a/test/integration/069_build_tests/models-simple-blocking/model_a.sql b/test/integration/069_build_tests/models-simple-blocking/model_a.sql deleted file mode 100644 index 23fa9a380d7..00000000000 --- a/test/integration/069_build_tests/models-simple-blocking/model_a.sql +++ /dev/null @@ -1 +0,0 @@ -select null as id diff --git a/test/integration/069_build_tests/models-simple-blocking/model_b.sql b/test/integration/069_build_tests/models-simple-blocking/model_b.sql deleted file mode 100644 index ad13bfaf538..00000000000 --- a/test/integration/069_build_tests/models-simple-blocking/model_b.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('model_a') }} diff --git a/test/integration/069_build_tests/models-simple-blocking/schema.yml b/test/integration/069_build_tests/models-simple-blocking/schema.yml deleted file mode 100644 index 92f1934fb25..00000000000 --- a/test/integration/069_build_tests/models-simple-blocking/schema.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 - -models: - - name: model_a - columns: - - name: id - tests: - - not_null diff --git a/test/integration/069_build_tests/models/model_0.sql b/test/integration/069_build_tests/models/model_0.sql deleted file mode 100644 index 2fe54b32418..00000000000 --- a/test/integration/069_build_tests/models/model_0.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('countries') }} \ No newline at end of file diff --git a/test/integration/069_build_tests/models/model_1.sql b/test/integration/069_build_tests/models/model_1.sql deleted file mode 100644 index d8efda2c3b2..00000000000 --- a/test/integration/069_build_tests/models/model_1.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('snap_0') }} \ No newline at end of file diff --git a/test/integration/069_build_tests/models/model_2.sql b/test/integration/069_build_tests/models/model_2.sql deleted file mode 100644 index 25bea5224cf..00000000000 --- a/test/integration/069_build_tests/models/model_2.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('snap_1') }} \ No newline at end of file diff --git a/test/integration/069_build_tests/models/model_99.sql b/test/integration/069_build_tests/models/model_99.sql deleted file mode 100644 index 38c103e823b..00000000000 --- a/test/integration/069_build_tests/models/model_99.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select '1' as "num" \ No newline at end of file diff --git a/test/integration/069_build_tests/models/test.yml b/test/integration/069_build_tests/models/test.yml deleted file mode 100644 index 6f9133aa487..00000000000 --- a/test/integration/069_build_tests/models/test.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: 2 - -models: - - name: model_0 - columns: - - name: iso3 - tests: - - unique - - not_null - - name: model_2 - columns: - - name: iso3 - tests: - - unique - - not_null diff --git a/test/integration/069_build_tests/seeds/countries.csv b/test/integration/069_build_tests/seeds/countries.csv deleted file mode 100644 index 82db396fd6f..00000000000 --- a/test/integration/069_build_tests/seeds/countries.csv +++ /dev/null @@ -1,10 +0,0 @@ -"iso3","name","iso2","iso_numeric","cow_alpha","cow_numeric","fao_code","un_code","wb_code","imf_code","fips","geonames_name","geonames_id","r_name","aiddata_name","aiddata_code","oecd_name","oecd_code","historical_name","historical_iso3","historical_iso2","historical_iso_numeric" -"ABW","Aruba","AW","533","","","","533","ABW","314","AA","Aruba","3577279","ARUBA","Aruba","12","Aruba","373","","","","" -"AFG","Afghanistan","AF","4","AFG","700","2","4","AFG","512","AF","Afghanistan","1149361","AFGHANISTAN","Afghanistan","1","Afghanistan","625","","","","" -"AGO","Angola","AO","24","ANG","540","7","24","AGO","614","AO","Angola","3351879","ANGOLA","Angola","7","Angola","225","","","","" -"AIA","Anguilla","AI","660","","","","660","AIA","312","AV","Anguilla","3573511","ANGUILLA","Anguilla","8","Anguilla","376","","","","" -"ALA","Aland Islands","AX","248","","","","248","ALA","","","Aland Islands","661882","ALAND ISLANDS","","","","","","","","" -"ALB","Albania","AL","8","ALB","339","3","8","ALB","914","AL","Albania","783754","ALBANIA","Albania","3","Albania","71","","","","" -"AND","Andorra","AD","20","AND","232","6","20","ADO","","AN","Andorra","3041565","ANDORRA","","","","","","","","" -"ANT","Netherlands Antilles","AN","530","","","","","ANT","353","NT","Netherlands Antilles","","NETHERLANDS ANTILLES","Netherlands Antilles","211","Netherlands Antilles","361","Netherlands Antilles","ANT","AN","530" -"ARE","United Arab Emirates","AE","784","UAE","696","225","784","ARE","466","AE","United Arab Emirates","290557","UNITED ARAB EMIRATES","United Arab Emirates","140","United Arab Emirates","576","","","","" \ No newline at end of file diff --git a/test/integration/069_build_tests/snapshots/snap_0.sql b/test/integration/069_build_tests/snapshots/snap_0.sql deleted file mode 100644 index 03e8e491f21..00000000000 --- a/test/integration/069_build_tests/snapshots/snap_0.sql +++ /dev/null @@ -1,16 +0,0 @@ -{% snapshot snap_0 %} - -{{ - config( - target_database=database, - target_schema=schema, - unique_key='iso3', - - strategy='timestamp', - updated_at='snap_0_updated_at', - ) -}} - -select *, current_timestamp as snap_0_updated_at from {{ ref('model_0') }} - -{% endsnapshot %} \ No newline at end of file diff --git a/test/integration/069_build_tests/snapshots/snap_1.sql b/test/integration/069_build_tests/snapshots/snap_1.sql deleted file mode 100644 index 90455ed4625..00000000000 --- a/test/integration/069_build_tests/snapshots/snap_1.sql +++ /dev/null @@ -1,39 +0,0 @@ -{% snapshot snap_1 %} - -{{ - config( - target_database=database, - target_schema=schema, - unique_key='iso3', - - strategy='timestamp', - updated_at='snap_1_updated_at', - ) -}} - -SELECT - iso3, - "name", - iso2, - iso_numeric, - cow_alpha, - cow_numeric, - fao_code, - un_code, - wb_code, - imf_code, - fips, - geonames_name, - geonames_id, - r_name, - aiddata_name, - aiddata_code, - oecd_name, - oecd_code, - historical_name, - historical_iso3, - historical_iso2, - historical_iso_numeric, - current_timestamp as snap_1_updated_at from {{ ref('model_1') }} - -{% endsnapshot %} \ No newline at end of file diff --git a/test/integration/069_build_tests/snapshots/snap_99.sql b/test/integration/069_build_tests/snapshots/snap_99.sql deleted file mode 100644 index 5288dbbb805..00000000000 --- a/test/integration/069_build_tests/snapshots/snap_99.sql +++ /dev/null @@ -1,15 +0,0 @@ -{% snapshot snap_99 %} - -{{ - config( - target_database=database, - target_schema=schema, - strategy='timestamp', - unique_key='num', - updated_at='snap_99_updated_at', - ) -}} - -select *, current_timestamp as snap_99_updated_at from {{ ref('model_99') }} - -{% endsnapshot %} \ No newline at end of file diff --git a/test/integration/069_build_tests/test-files/model_b.sql b/test/integration/069_build_tests/test-files/model_b.sql deleted file mode 100644 index 24cb03c7e01..00000000000 --- a/test/integration/069_build_tests/test-files/model_b.sql +++ /dev/null @@ -1 +0,0 @@ -select * from {{ ref('model_a') }} diff --git a/test/integration/069_build_tests/test-files/model_b_null.sql b/test/integration/069_build_tests/test-files/model_b_null.sql deleted file mode 100644 index 4e5224ddf72..00000000000 --- a/test/integration/069_build_tests/test-files/model_b_null.sql +++ /dev/null @@ -1 +0,0 @@ -select null from {{ ref('model_a') }} diff --git a/test/integration/069_build_tests/test_build.py b/test/integration/069_build_tests/test_build.py deleted file mode 100644 index 628367082e7..00000000000 --- a/test/integration/069_build_tests/test_build.py +++ /dev/null @@ -1,143 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile, normalize -import yaml -import shutil -import os - - -class TestBuildBase(DBTIntegrationTest): - @property - def schema(self): - return "build_test_069" - - @property - def project_config(self): - return { - "config-version": 2, - "snapshot-paths": ["snapshots"], - "seed-paths": ["seeds"], - "seeds": { - "quote_columns": False, - }, - } - - def build(self, expect_pass=True, extra_args=None, **kwargs): - args = ["build"] - if kwargs: - args.extend(("--args", yaml.safe_dump(kwargs))) - if extra_args: - args.extend(extra_args) - - return self.run_dbt(args, expect_pass=expect_pass) - - -class TestPassingBuild(TestBuildBase): - @property - def models(self): - return "models" - - @use_profile("postgres") - def test__postgres_build_happy_path(self): - self.build() - - -class TestFailingBuild(TestBuildBase): - @property - def models(self): - return "models-failing" - - @use_profile("postgres") - def test__postgres_build_happy_path(self): - results = self.build(expect_pass=False) - self.assertEqual(len(results), 13) - actual = [r.status for r in results] - expected = ['error']*1 + ['skipped']*5 + ['pass']*2 + ['success']*5 - self.assertEqual(sorted(actual), sorted(expected)) - - -class TestFailingTestsBuild(TestBuildBase): - @property - def models(self): - return "tests-failing" - - @use_profile("postgres") - def test__postgres_failing_test_skips_downstream(self): - results = self.build(expect_pass=False) - self.assertEqual(len(results), 13) - actual = [str(r.status) for r in results] - expected = ['fail'] + ['skipped']*6 + ['pass']*2 + ['success']*4 - self.assertEqual(sorted(actual), sorted(expected)) - - -class TestCircularRelationshipTestsBuild(TestBuildBase): - @property - def models(self): - return "models-circular-relationship" - - @use_profile("postgres") - def test__postgres_circular_relationship_test_success(self): - """ Ensure that tests that refer to each other's model don't create - a circular dependency. """ - results = self.build() - actual = [r.status for r in results] - expected = ['success']*7 + ['pass']*2 - self.assertEqual(sorted(actual), sorted(expected)) - - -class TestSimpleBlockingTest(TestBuildBase): - @property - def models(self): - return "models-simple-blocking" - - @property - def project_config(self): - return { - "config-version": 2, - "snapshot-paths": ["does-not-exist"], - "seed-paths": ["does-not-exist"], - } - - @use_profile("postgres") - def test__postgres_simple_blocking_test(self): - """ Ensure that a failed test on model_a always blocks model_b """ - results = self.build(expect_pass=False) - actual = [r.status for r in results] - expected = ['success', 'fail', 'skipped'] - self.assertEqual(sorted(actual), sorted(expected)) - - -class TestInterdependentModels(TestBuildBase): - - @property - def project_config(self): - return { - "config-version": 2, - "snapshot-paths": ["snapshots-none"], - "seeds": { - "quote_columns": False, - }, - } - - @property - def models(self): - return "models-interdependent" - - def tearDown(self): - if os.path.exists(normalize('models-interdependent/model_b.sql')): - os.remove(normalize('models-interdependent/model_b.sql')) - - - @use_profile("postgres") - def test__postgres_interdependent_models(self): - # check that basic build works - shutil.copyfile('test-files/model_b.sql', 'models-interdependent/model_b.sql') - results = self.build() - self.assertEqual(len(results), 16) - - # return null from model_b - shutil.copyfile('test-files/model_b_null.sql', 'models-interdependent/model_b.sql') - results = self.build(expect_pass=False) - self.assertEqual(len(results), 16) - actual = [str(r.status) for r in results] - expected = ['error']*4 + ['skipped']*7 + ['pass']*2 + ['success']*3 - self.assertEqual(sorted(actual), sorted(expected)) - diff --git a/test/integration/069_build_tests/tests-failing/model_0.sql b/test/integration/069_build_tests/tests-failing/model_0.sql deleted file mode 100644 index 2fe54b32418..00000000000 --- a/test/integration/069_build_tests/tests-failing/model_0.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('countries') }} \ No newline at end of file diff --git a/test/integration/069_build_tests/tests-failing/model_1.sql b/test/integration/069_build_tests/tests-failing/model_1.sql deleted file mode 100644 index 058c968c760..00000000000 --- a/test/integration/069_build_tests/tests-failing/model_1.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('snap_0') }} diff --git a/test/integration/069_build_tests/tests-failing/model_2.sql b/test/integration/069_build_tests/tests-failing/model_2.sql deleted file mode 100644 index 25bea5224cf..00000000000 --- a/test/integration/069_build_tests/tests-failing/model_2.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select * from {{ ref('snap_1') }} \ No newline at end of file diff --git a/test/integration/069_build_tests/tests-failing/model_99.sql b/test/integration/069_build_tests/tests-failing/model_99.sql deleted file mode 100644 index 38c103e823b..00000000000 --- a/test/integration/069_build_tests/tests-failing/model_99.sql +++ /dev/null @@ -1,3 +0,0 @@ -{{ config(materialized='table') }} - -select '1' as "num" \ No newline at end of file diff --git a/test/integration/069_build_tests/tests-failing/test.yml b/test/integration/069_build_tests/tests-failing/test.yml deleted file mode 100644 index c6dbe0e971f..00000000000 --- a/test/integration/069_build_tests/tests-failing/test.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 2 - -models: - - name: model_0 - columns: - - name: iso3 - tests: - - unique - - not_null - - name: historical_iso_numeric - tests: - - not_null - - name: model_2 - columns: - - name: iso3 - tests: - - unique - - not_null diff --git a/tests/functional/build/fixtures.py b/tests/functional/build/fixtures.py new file mode 100644 index 00000000000..7c4d93e6186 --- /dev/null +++ b/tests/functional/build/fixtures.py @@ -0,0 +1,268 @@ +seeds__country_csv = """iso3,name,iso2,iso_numeric,cow_alpha,cow_numeric,fao_code,un_code,wb_code,imf_code,fips,geonames_name,geonames_id,r_name,aiddata_name,aiddata_code,oecd_name,oecd_code,historical_name,historical_iso3,historical_iso2,historical_iso_numeric +ABW,Aruba,AW,533,,,,533,ABW,314,AA,Aruba,3577279,ARUBA,Aruba,12,Aruba,373,,,, +AFG,Afghanistan,AF,4,AFG,700,2,4,AFG,512,AF,Afghanistan,1149361,AFGHANISTAN,Afghanistan,1,Afghanistan,625,,,, +AGO,Angola,AO,24,ANG,540,7,24,AGO,614,AO,Angola,3351879,ANGOLA,Angola,7,Angola,225,,,, +AIA,Anguilla,AI,660,,,,660,AIA,312,AV,Anguilla,3573511,ANGUILLA,Anguilla,8,Anguilla,376,,,, +ALA,Aland Islands,AX,248,,,,248,ALA,,,Aland Islands,661882,ALAND ISLANDS,,,,,,,, +ALB,Albania,AL,8,ALB,339,3,8,ALB,914,AL,Albania,783754,ALBANIA,Albania,3,Albania,71,,,, +AND,Andorra,AD,20,AND,232,6,20,ADO,,AN,Andorra,3041565,ANDORRA,,,,,,,, +ANT,Netherlands Antilles,AN,530,,,,,ANT,353,NT,Netherlands Antilles,,NETHERLANDS ANTILLES,Netherlands Antilles,211,Netherlands Antilles,361,Netherlands Antilles,ANT,AN,530 +ARE,United Arab Emirates,AE,784,UAE,696,225,784,ARE,466,AE,United Arab Emirates,290557,UNITED ARAB EMIRATES,United Arab Emirates,140,United Arab Emirates,576,,,, +""" + +snapshots__snap_0 = """ +{% snapshot snap_0 %} + +{{ + config( + target_database=database, + target_schema=schema, + unique_key='iso3', + + strategy='timestamp', + updated_at='snap_0_updated_at', + ) +}} + +select *, current_timestamp as snap_0_updated_at from {{ ref('model_0') }} + +{% endsnapshot %} +""" + +snapshots__snap_1 = """ +{% snapshot snap_1 %} + +{{ + config( + target_database=database, + target_schema=schema, + unique_key='iso3', + + strategy='timestamp', + updated_at='snap_1_updated_at', + ) +}} + +SELECT + iso3, + name, + iso2, + iso_numeric, + cow_alpha, + cow_numeric, + fao_code, + un_code, + wb_code, + imf_code, + fips, + geonames_name, + geonames_id, + r_name, + aiddata_name, + aiddata_code, + oecd_name, + oecd_code, + historical_name, + historical_iso3, + historical_iso2, + historical_iso_numeric, + current_timestamp as snap_1_updated_at from {{ ref('model_1') }} + +{% endsnapshot %} +""" + +snapshots__snap_99 = """ +{% snapshot snap_99 %} + +{{ + config( + target_database=database, + target_schema=schema, + strategy='timestamp', + unique_key='num', + updated_at='snap_99_updated_at', + ) +}} + +select *, current_timestamp as snap_99_updated_at from {{ ref('model_99') }} + +{% endsnapshot %} +""" + +models__model_0_sql = """ +{{ config(materialized='table') }} + +select * from {{ ref('countries') }} +""" + +models__model_1_sql = """ +{{ config(materialized='table') }} + +select * from {{ ref('snap_0') }} +""" + +models__model_2_sql = """ +{{ config(materialized='table') }} + +select * from {{ ref('snap_1') }} +""" + +models__model_3_sql = """ +{{ config(materialized='table') }} + +select * from {{ ref('model_1') }} +""" + +models__model_99_sql = """ +{{ config(materialized='table') }} + +select '1' as "num" +""" + +models__test_yml = """ +version: 2 + +models: + - name: model_0 + columns: + - name: iso3 + tests: + - unique + - not_null + - name: model_2 + columns: + - name: iso3 + tests: + - unique + - not_null +""" + +models_failing_tests__tests_yml = """ +version: 2 + +models: + - name: model_0 + columns: + - name: iso3 + tests: + - unique + - not_null + - name: historical_iso_numeric + tests: + - not_null + - name: model_2 + columns: + - name: iso3 + tests: + - unique + - not_null +""" + +models_failing__model_1_sql = """ +{{ config(materialized='table') }} + +select bad_column from {{ ref('snap_0') }} +""" + + +models_circular_relationship__test_yml = """ +version: 2 + +models: + - name: model_0 + columns: + - name: iso3 + tests: + - relationships: + to: ref('model_1') + field: iso3 + + - name: model_1 + columns: + - name: iso3 + tests: + - relationships: + to: ref('model_0') + field: iso3 + +""" + +models_simple_blocking__model_a_sql = """ +select null as id +""" + +models_simple_blocking__model_b_sql = """ +select * from {{ ref('model_a') }} +""" + +models_simple_blocking__test_yml = """ +version: 2 + +models: + - name: model_a + columns: + - name: id + tests: + - not_null +""" + +models_interdependent__model_a_sql = """ +select 1 as id +""" + +models_interdependent__model_b_sql = """ +select * from {{ ref('model_a') }} +""" + +models_interdependent__model_b_null_sql = """ +select null from {{ ref('model_a') }} +""" + + +models_interdependent__model_c_sql = """ +select * from {{ ref('model_b') }} +""" + +models_interdependent__test_yml = """ +version: 2 + +models: + - name: model_a + columns: + - name: id + tests: + - unique + - not_null + - relationships: + to: ref('model_b') + field: id + - relationships: + to: ref('model_c') + field: id + + - name: model_b + columns: + - name: id + tests: + - unique + - not_null + - relationships: + to: ref('model_a') + field: id + - relationships: + to: ref('model_c') + field: id + + - name: model_c + columns: + - name: id + tests: + - unique + - not_null + - relationships: + to: ref('model_a') + field: id + - relationships: + to: ref('model_b') + field: id +""" diff --git a/tests/functional/build/test_build.py b/tests/functional/build/test_build.py new file mode 100644 index 00000000000..eb9529be102 --- /dev/null +++ b/tests/functional/build/test_build.py @@ -0,0 +1,198 @@ +import pytest + +from dbt.tests.util import run_dbt +from tests.functional.build.fixtures import ( + seeds__country_csv, + snapshots__snap_0, + snapshots__snap_1, + snapshots__snap_99, + models__test_yml, + models__model_0_sql, + models__model_1_sql, + models__model_2_sql, + models__model_3_sql, + models__model_99_sql, + models_failing__model_1_sql, + models_circular_relationship__test_yml, + models_failing_tests__tests_yml, + models_simple_blocking__model_a_sql, + models_simple_blocking__model_b_sql, + models_simple_blocking__test_yml, + models_interdependent__test_yml, + models_interdependent__model_a_sql, + models_interdependent__model_b_sql, + models_interdependent__model_b_null_sql, + models_interdependent__model_c_sql, +) + + +class TestBuildBase: + @pytest.fixture(scope="class") + def seeds(self): + return {"countries.csv": seeds__country_csv} + + @pytest.fixture(scope="class") + def snapshots(self): + return { + "snap_0.sql": snapshots__snap_0, + "snap_1.sql": snapshots__snap_1, + "snap_99.sql": snapshots__snap_99, + } + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + } + + +class TestPassingBuild(TestBuildBase): + @pytest.fixture(scope="class") + def models(self): + return { + "model_0.sql": models__model_0_sql, + "model_1.sql": models__model_1_sql, + "model_2.sql": models__model_2_sql, + "model_99.sql": models__model_99_sql, + "test.yml": models__test_yml, + } + + def test_build_happy_path(self, project): + run_dbt(["build"]) + + +class TestFailingBuild(TestBuildBase): + @pytest.fixture(scope="class") + def models(self): + return { + "model_0.sql": models__model_0_sql, + "model_1.sql": models_failing__model_1_sql, + "model_2.sql": models__model_2_sql, + "model_3.sql": models__model_3_sql, + "model_99.sql": models__model_99_sql, + "test.yml": models__test_yml, + } + + def test_failing_test_skips_downstream(self, project): + results = run_dbt(["build"], expect_pass=False) + assert len(results) == 13 + actual = [str(r.status) for r in results] + expected = ["error"] * 1 + ["skipped"] * 5 + ["pass"] * 2 + ["success"] * 5 + + assert sorted(actual) == sorted(expected) + + +class TestFailingTestsBuild(TestBuildBase): + @pytest.fixture(scope="class") + def models(self): + return { + "model_0.sql": models__model_0_sql, + "model_1.sql": models__model_1_sql, + "model_2.sql": models__model_2_sql, + "model_99.sql": models__model_99_sql, + "test.yml": models_failing_tests__tests_yml, + } + + def test_failing_test_skips_downstream(self, project): + results = run_dbt(["build"], expect_pass=False) + assert len(results) == 13 + actual = [str(r.status) for r in results] + expected = ["fail"] + ["skipped"] * 6 + ["pass"] * 2 + ["success"] * 4 + assert sorted(actual) == sorted(expected) + + +class TestCircularRelationshipTestsBuild(TestBuildBase): + @pytest.fixture(scope="class") + def models(self): + return { + "model_0.sql": models__model_0_sql, + "model_1.sql": models__model_1_sql, + "model_99.sql": models__model_99_sql, + "test.yml": models_circular_relationship__test_yml, + } + + def test_circular_relationship_test_success(self, project): + """Ensure that tests that refer to each other's model don't create + a circular dependency.""" + results = run_dbt(["build"]) + actual = [str(r.status) for r in results] + expected = ["success"] * 7 + ["pass"] * 2 + + assert sorted(actual) == sorted(expected) + + +class TestSimpleBlockingTest: + @pytest.fixture(scope="class") + def models(self): + return { + "model_a.sql": models_simple_blocking__model_a_sql, + "model_b.sql": models_simple_blocking__model_b_sql, + "test.yml": models_simple_blocking__test_yml, + } + + def test_simple_blocking_test(self, project): + """Ensure that a failed test on model_a always blocks model_b""" + results = run_dbt(["build"], expect_pass=False) + actual = [r.status for r in results] + expected = ["success", "fail", "skipped"] + assert sorted(actual) == sorted(expected) + + +class TestInterdependentModels: + @pytest.fixture(scope="class") + def seeds(self): + return {"countries.csv": seeds__country_csv} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + } + + @pytest.fixture(scope="class") + def models(self): + return { + "model_a.sql": models_interdependent__model_a_sql, + "model_b.sql": models_interdependent__model_b_sql, + "model_c.sql": models_interdependent__model_c_sql, + "test.yml": models_interdependent__test_yml, + } + + def test_interdependent_models(self, project): + results = run_dbt(["build"]) + assert len(results) == 16 + + +class TestInterdependentModelsFail: + @pytest.fixture(scope="class") + def seeds(self): + return {"countries.csv": seeds__country_csv} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + } + + @pytest.fixture(scope="class") + def models(self): + return { + "model_a.sql": models_interdependent__model_a_sql, + "model_b.sql": models_interdependent__model_b_null_sql, + "model_c.sql": models_interdependent__model_c_sql, + "test.yml": models_interdependent__test_yml, + } + + def test_interdependent_models_fail(self, project): + results = run_dbt(["build"], expect_pass=False) + assert len(results) == 16 + + actual = [str(r.status) for r in results] + expected = ["error"] * 4 + ["skipped"] * 7 + ["pass"] * 2 + ["success"] * 3 + assert sorted(actual) == sorted(expected) From a181cee6aef38eeb20e68c81cbee557b8db09968 Mon Sep 17 00:00:00 2001 From: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Date: Fri, 20 Jan 2023 13:46:36 -0700 Subject: [PATCH 127/156] Improve error message for packages missing `dbt_project.yml` (#6685) * Improve error message for packages missing `dbt_project.yml` * Use black formatting * Update capitalization of expected error message --- .changes/unreleased/Features-20230120-112921.yaml | 6 ++++++ core/dbt/config/project.py | 9 ++++++--- test/unit/test_config.py | 2 +- 3 files changed, 13 insertions(+), 4 deletions(-) create mode 100644 .changes/unreleased/Features-20230120-112921.yaml diff --git a/.changes/unreleased/Features-20230120-112921.yaml b/.changes/unreleased/Features-20230120-112921.yaml new file mode 100644 index 00000000000..01532220a7f --- /dev/null +++ b/.changes/unreleased/Features-20230120-112921.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Improve error message for packages missing `dbt_project.yml` +time: 2023-01-20T11:29:21.509967-07:00 +custom: + Author: dbeatty10 + Issue: "6663" diff --git a/core/dbt/config/project.py b/core/dbt/config/project.py index 7f0398f53c6..861785cf0c9 100644 --- a/core/dbt/config/project.py +++ b/core/dbt/config/project.py @@ -75,6 +75,11 @@ {error} """ +MISSING_DBT_PROJECT_ERROR = """\ +No dbt_project.yml found at expected path {path} +Verify that each entry within packages.yml (and their transitive dependencies) contains a file named dbt_project.yml +""" + @runtime_checkable class IsFQNResource(Protocol): @@ -163,9 +168,7 @@ def _raw_project_from(project_root: str) -> Dict[str, Any]: # get the project.yml contents if not path_exists(project_yaml_filepath): - raise DbtProjectError( - "no dbt_project.yml found at expected path {}".format(project_yaml_filepath) - ) + raise DbtProjectError(MISSING_DBT_PROJECT_ERROR.format(path=project_yaml_filepath)) project_dict = _load_yaml(project_yaml_filepath) diff --git a/test/unit/test_config.py b/test/unit/test_config.py index 4c1707d28b9..d3523b16aa8 100644 --- a/test/unit/test_config.py +++ b/test/unit/test_config.py @@ -798,7 +798,7 @@ def test_no_project(self): with self.assertRaises(dbt.exceptions.DbtProjectError) as exc: dbt.config.Project.from_project_root(self.project_dir, renderer) - self.assertIn('no dbt_project.yml', str(exc.exception)) + self.assertIn('No dbt_project.yml', str(exc.exception)) def test_invalid_version(self): self.default_project_data['require-dbt-version'] = 'hello!' From 93619a9a3761d3606d257c1274828ac4eef4f627 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Fri, 20 Jan 2023 16:37:54 -0800 Subject: [PATCH 128/156] Ct 738/dbt debug log fix (#6541) * Code cleanup and adding stderr to capture dbt * Debug with --log-format json now prints structured logs. * Add changelog. * Move logs into miscellaneous and add values to test. * nix whitespace and fix log levels * List will now do structured logging when log format set to json. * Add a quick None check. * Add a get guard to class check. * Better null checking * The boolean doesn't reflect the original logic but a try-catch does. * Address some code review comments and get us working again. * Simplify logic now that we have a namespace object for self.config.args. * Simplify logic for json log format checking. * Simplify code for allowing our GraphTest cases to pass while also hiding compile stats from dbt ls/list . * Simplify structured logging types. * Fix up boolean logic and simplify via De'Morgan. * Nix unneeded fixture. Co-authored-by: Mila Page --- .../unreleased/Features-20230107-003157.yaml | 6 ++ core/dbt/compilation.py | 18 +++-- core/dbt/events/proto_types.py | 41 +++++++++++- core/dbt/events/types.proto | 31 +++++++++ core/dbt/events/types.py | 27 ++++++++ core/dbt/main.py | 14 ++-- core/dbt/task/debug.py | 67 ++++++++++++------- core/dbt/task/list.py | 35 +++++----- test/unit/test_graph.py | 7 +- tests/functional/profiles/test_profile_dir.py | 26 +++---- tests/unit/test_events.py | 3 + 11 files changed, 198 insertions(+), 77 deletions(-) create mode 100644 .changes/unreleased/Features-20230107-003157.yaml diff --git a/.changes/unreleased/Features-20230107-003157.yaml b/.changes/unreleased/Features-20230107-003157.yaml new file mode 100644 index 00000000000..27858b516be --- /dev/null +++ b/.changes/unreleased/Features-20230107-003157.yaml @@ -0,0 +1,6 @@ +kind: Features +body: Have dbt debug spit out structured json logs with flags enabled. +time: 2023-01-07T00:31:57.516063-08:00 +custom: + Author: versusfacit + Issue: "5353" diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index c0237b0a993..a89f36d9f31 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -1,11 +1,12 @@ -import os -from collections import defaultdict -from typing import List, Dict, Any, Tuple, Optional - +import argparse import networkx as nx # type: ignore +import os import pickle import sqlparse +from collections import defaultdict +from typing import List, Dict, Any, Tuple, Optional + from dbt import flags from dbt.adapters.factory import get_adapter from dbt.clients import jinja @@ -32,6 +33,7 @@ from dbt.node_types import NodeType, ModelLanguage from dbt.events.format import pluralize import dbt.tracking +import dbt.task.list as list_task graph_file_name = "graph.gpickle" @@ -473,7 +475,13 @@ def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph if write: self.write_graph_file(linker, manifest) - print_compile_stats(stats) + + # Do not print these for ListTask's + if not ( + self.config.args.__class__ == argparse.Namespace + and self.config.args.cls == list_task.ListTask + ): + print_compile_stats(stats) return Graph(linker.graph) diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 3fb92eeda51..0bae4be273f 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -1,5 +1,5 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: types.proto +# sources: core/dbt/events/types.proto # plugin: python-betterproto from dataclasses import dataclass from datetime import datetime @@ -2847,6 +2847,45 @@ class RunResultWarningMessageMsg(betterproto.Message): data: "RunResultWarningMessage" = betterproto.message_field(2) +@dataclass +class DebugCmdOut(betterproto.Message): + """Z047""" + + msg: str = betterproto.string_field(1) + + +@dataclass +class DebugCmdOutMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "DebugCmdOut" = betterproto.message_field(2) + + +@dataclass +class DebugCmdResult(betterproto.Message): + """Z048""" + + msg: str = betterproto.string_field(1) + + +@dataclass +class DebugCmdResultMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "DebugCmdResult" = betterproto.message_field(2) + + +@dataclass +class ListCmdOut(betterproto.Message): + """Z049""" + + msg: str = betterproto.string_field(1) + + +@dataclass +class ListCmdOutMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "ListCmdOut" = betterproto.message_field(2) + + @dataclass class IntegrationTestInfo(betterproto.Message): """T001""" diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 87a10c19eda..fc251042005 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -2258,6 +2258,37 @@ message RunResultWarningMessageMsg { RunResultWarningMessage data = 2; } +// Z047 +message DebugCmdOut { + string msg = 1; +} + +message DebugCmdOutMsg { + EventInfo info = 1; + DebugCmdOut data = 2; +} + +// Z048 +message DebugCmdResult { + string msg = 1; +} + +message DebugCmdResultMsg { + EventInfo info = 1; + DebugCmdResult data = 2; +} + +// Z049 +message ListCmdOut { + string msg = 1; +} + +message ListCmdOutMsg { + EventInfo info = 1; + ListCmdOut data = 2; +} + + // T - Integration tests // T001 diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index 043ed8d40ff..e74327be179 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -2345,3 +2345,30 @@ def code(self): def message(self) -> str: # This is the message on the result object, cannot be formatted in event return self.msg + + +@dataclass +class DebugCmdOut(InfoLevel, pt.DebugCmdOut): + def code(self): + return "Z047" + + def message(self) -> str: + return self.msg + + +@dataclass +class DebugCmdResult(InfoLevel, pt.DebugCmdResult): + def code(self): + return "Z048" + + def message(self) -> str: + return self.msg + + +@dataclass +class ListCmdOut(InfoLevel, pt.ListCmdOut): + def code(self): + return "Z049" + + def message(self) -> str: + return self.msg diff --git a/core/dbt/main.py b/core/dbt/main.py index a6c6f0b013d..429d823be52 100644 --- a/core/dbt/main.py +++ b/core/dbt/main.py @@ -229,15 +229,15 @@ def run_from_args(parsed): if task.config is not None: log_path = getattr(task.config, "log_path", None) log_manager.set_path(log_path) - # if 'list' task: set stdout to WARN instead of INFO - level_override = parsed.cls.pre_init_hook(parsed) - setup_event_logger(log_path or "logs", level_override) + setup_event_logger(log_path or "logs") - fire_event(MainReportVersion(version=str(dbt.version.installed), log_version=LOG_VERSION)) - fire_event(MainReportArgs(args=args_to_dict(parsed))) + # For the ListTask, filter out system report logs to allow piping ls output to jq, etc + if not list_task.ListTask == parsed.cls: + fire_event(MainReportVersion(version=str(dbt.version.installed), log_version=LOG_VERSION)) + fire_event(MainReportArgs(args=args_to_dict(parsed))) - if dbt.tracking.active_user is not None: # mypy appeasement, always true - fire_event(MainTrackingUserState(user_state=dbt.tracking.active_user.state())) + if dbt.tracking.active_user is not None: # mypy appeasement, always true + fire_event(MainTrackingUserState(user_state=dbt.tracking.active_user.state())) results = None diff --git a/core/dbt/task/debug.py b/core/dbt/task/debug.py index 5f3e3854759..7460c08cc84 100644 --- a/core/dbt/task/debug.py +++ b/core/dbt/task/debug.py @@ -5,7 +5,11 @@ from typing import Optional, Dict, Any, List from dbt.events.functions import fire_event -from dbt.events.types import OpenCommand +from dbt.events.types import ( + OpenCommand, + DebugCmdOut, + DebugCmdResult, +) from dbt import flags import dbt.clients.system import dbt.exceptions @@ -99,25 +103,25 @@ def run(self): return not self.any_failure version = get_installed_version().to_version_string(skip_matcher=True) - print("dbt version: {}".format(version)) - print("python version: {}".format(sys.version.split()[0])) - print("python path: {}".format(sys.executable)) - print("os info: {}".format(platform.platform())) - print("Using profiles.yml file at {}".format(self.profile_path)) - print("Using dbt_project.yml file at {}".format(self.project_path)) - print("") + fire_event(DebugCmdOut(msg="dbt version: {}".format(version))) + fire_event(DebugCmdOut(msg="python version: {}".format(sys.version.split()[0]))) + fire_event(DebugCmdOut(msg="python path: {}".format(sys.executable))) + fire_event(DebugCmdOut(msg="os info: {}".format(platform.platform()))) + fire_event(DebugCmdOut(msg="Using profiles.yml file at {}".format(self.profile_path))) + fire_event(DebugCmdOut(msg="Using dbt_project.yml file at {}".format(self.project_path))) self.test_configuration() self.test_dependencies() self.test_connection() if self.any_failure: - print(red(f"{(pluralize(len(self.messages), 'check'))} failed:")) + fire_event( + DebugCmdResult(msg=red(f"{(pluralize(len(self.messages), 'check'))} failed:")) + ) else: - print(green("All checks passed!")) + fire_event(DebugCmdResult(msg=green("All checks passed!"))) for message in self.messages: - print(message) - print("") + fire_event(DebugCmdResult(msg=f"{message}\n")) return not self.any_failure @@ -273,21 +277,33 @@ def test_git(self): return green("OK found") def test_dependencies(self): - print("Required dependencies:") - print(" - git [{}]".format(self.test_git())) - print("") + fire_event(DebugCmdOut(msg="Required dependencies:")) + + logline_msg = self.test_git() + fire_event(DebugCmdResult(msg=f" - git [{logline_msg}]\n")) def test_configuration(self): + fire_event(DebugCmdOut(msg="Configuration:")) + profile_status = self._load_profile() + fire_event(DebugCmdOut(msg=f" profiles.yml file [{profile_status}]")) + project_status = self._load_project() - print("Configuration:") - print(" profiles.yml file [{}]".format(profile_status)) - print(" dbt_project.yml file [{}]".format(project_status)) + fire_event(DebugCmdOut(msg=f" dbt_project.yml file [{project_status}]")) + # skip profile stuff if we can't find a profile name if self.profile_name is not None: - print(" profile: {} [{}]".format(self.profile_name, self._profile_found())) - print(" target: {} [{}]".format(self.target_name, self._target_found())) - print("") + fire_event( + DebugCmdOut( + msg=" profile: {} [{}]\n".format(self.profile_name, self._profile_found()) + ) + ) + fire_event( + DebugCmdOut( + msg=" target: {} [{}]\n".format(self.target_name, self._target_found()) + ) + ) + self._log_project_fail() self._log_profile_fail() @@ -348,11 +364,12 @@ def _connection_result(self): def test_connection(self): if not self.profile: return - print("Connection:") + fire_event(DebugCmdOut(msg="Connection:")) for k, v in self.profile.credentials.connection_info(): - print(" {}: {}".format(k, v)) - print(" Connection test: [{}]".format(self._connection_result())) - print("") + fire_event(DebugCmdOut(msg=f" {k}: {v}")) + + res = self._connection_result() + fire_event(DebugCmdOut(msg=f" Connection test: [{res}]\n")) @classmethod def validate_connection(cls, target_dict): diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index fa8d3ccd8d2..411bed3da4c 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -1,15 +1,21 @@ import json +import dbt.flags + from dbt.contracts.graph.nodes import Exposure, SourceDefinition, Metric from dbt.graph import ResourceTypeSelector from dbt.task.runnable import GraphRunnableTask, ManifestTask from dbt.task.test import TestSelector from dbt.node_types import NodeType -from dbt.events.functions import warn_or_error -from dbt.events.types import NoNodesSelected +from dbt.events.functions import ( + fire_event, + warn_or_error, +) +from dbt.events.types import ( + NoNodesSelected, + ListCmdOut, +) from dbt.exceptions import DbtRuntimeError, DbtInternalError -from dbt.logger import log_manager -from dbt.events.eventmgr import EventLevel class ListTask(GraphRunnableTask): @@ -50,20 +56,6 @@ def __init__(self, args, config): '"models" and "resource_type" are mutually exclusive ' "arguments" ) - @classmethod - def pre_init_hook(cls, args): - """A hook called before the task is initialized.""" - # Filter out all INFO-level logging to allow piping ls output to jq, etc - # WARN level will still include all warnings + errors - # Do this by: - # - returning the log level so that we can pass it into the 'level_override' - # arg of events.functions.setup_event_logger() -- good! - # - mutating the initialized, not-yet-configured STDOUT event logger - # because it's being configured too late -- bad! TODO refactor! - log_manager.stderr_console() - super().pre_init_hook(args) - return EventLevel.WARN - def _iterate_selected_nodes(self): selector = self.get_node_selector() spec = self.get_selection_spec() @@ -148,9 +140,14 @@ def run(self): return self.output_results(generator()) def output_results(self, results): + """Log, or output a plain, newline-delimited, and ready-to-pipe list of nodes found.""" for result in results: self.node_results.append(result) - print(result) + if dbt.flags.LOG_FORMAT == "json": + fire_event(ListCmdOut(msg=result)) + else: + # Cleaner to leave as print than to mutate the logger not to print timestamps. + print(result) return self.node_results @property diff --git a/test/unit/test_graph.py b/test/unit/test_graph.py index 5534fe21f19..3f330717608 100644 --- a/test/unit/test_graph.py +++ b/test/unit/test_graph.py @@ -59,6 +59,7 @@ def setUp(self): # Create file filesystem searcher self.filesystem_search = patch('dbt.parser.read_files.filesystem_search') + def mock_filesystem_search(project, relative_dirs, extension, ignore_spec): if 'sql' not in extension: return [] @@ -72,6 +73,7 @@ def mock_filesystem_search(project, relative_dirs, extension, ignore_spec): self.hook_patcher = patch.object( dbt.parser.hooks.HookParser, '__new__' ) + def create_hook_patcher(cls, project, manifest, root_project): result = MagicMock(project=project, manifest=manifest, root_project=root_project) result.__iter__.side_effect = lambda: iter([]) @@ -82,7 +84,6 @@ def create_hook_patcher(cls, project, manifest, root_project): # Create the Manifest.state_check patcher @patch('dbt.parser.manifest.ManifestLoader.build_manifest_state_check') def _mock_state_check(self): - config = self.root_project all_projects = self.all_projects return ManifestStateCheck( project_env_vars_hash=FileHash.from_contents(''), @@ -98,6 +99,7 @@ def _mock_state_check(self): # Create the source file patcher self.load_source_file_patcher = patch('dbt.parser.read_files.load_source_file') self.mock_source_file = self.load_source_file_patcher.start() + def mock_load_source_file(path, parse_file_type, project_name, saved_files): for sf in self.mock_models: if sf.path == path: @@ -117,7 +119,6 @@ def _mock_hook_path(self): ) return path - def get_config(self, extra_cfg=None): if extra_cfg is None: extra_cfg = {} @@ -224,8 +225,6 @@ def test__model_materializations(self): config = self.get_config(cfg) manifest = self.load_manifest(config) - compiler = self.get_compiler(config) - linker = compiler.compile(manifest) expected_materialization = { "model_one": "table", diff --git a/tests/functional/profiles/test_profile_dir.py b/tests/functional/profiles/test_profile_dir.py index 7a4c8214a1d..75a30512dcb 100644 --- a/tests/functional/profiles/test_profile_dir.py +++ b/tests/functional/profiles/test_profile_dir.py @@ -1,13 +1,17 @@ -import io import os import pytest import yaml -from contextlib import contextmanager, redirect_stdout + +from contextlib import contextmanager from pathlib import Path -from typing import List import dbt.flags as flags -from dbt.tests.util import run_dbt, write_file, rm_file + +from dbt.tests.util import ( + run_dbt_and_capture, + write_file, + rm_file, +) @pytest.fixture(scope="class") @@ -84,16 +88,6 @@ def environ(env): os.environ[key] = value -# Use this if you need to capture the standard out in a test -def run_dbt_and_capture_stdout(args: List[str] = None, expect_pass=True): - stringbuf = io.StringIO() - with redirect_stdout(stringbuf): - res = run_dbt(args, expect_pass=expect_pass) - stdout = stringbuf.getvalue() - - return res, stdout - - class TestProfiles: def dbt_debug(self, project_dir_cli_arg=None, profiles_dir_cli_arg=None): # begin with no command-line args or user config (from profiles.yml) @@ -106,8 +100,8 @@ def dbt_debug(self, project_dir_cli_arg=None, profiles_dir_cli_arg=None): if profiles_dir_cli_arg: command.extend(["--profiles-dir", str(profiles_dir_cli_arg)]) - # get the output of `dbt debug` regarless of the exit code - return run_dbt_and_capture_stdout(command, expect_pass=None) + # get the output of `dbt debug` regardless of the exit code + return run_dbt_and_capture(command, expect_pass=None) @pytest.mark.parametrize( "project_dir_cli_arg, working_directory", diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 5e412e34f33..7fa586cefa9 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -383,6 +383,9 @@ def test_event_codes(self): FlushEventsFailure(), TrackingInitializeFailure(), RunResultWarningMessage(), + DebugCmdOut(), + DebugCmdResult(), + ListCmdOut(), # T - tests ====================== IntegrationTestInfo(), IntegrationTestDebug(), From 5c765bf3e20f30270b1d98f525fb970fb9e89b26 Mon Sep 17 00:00:00 2001 From: Sean McIntyre Date: Mon, 23 Jan 2023 17:09:09 +0100 Subject: [PATCH 129/156] Cheeky performance improvement on big DAGs (#6694) * Short-circuit set operations for nice speed boost * Add changelog * Fix issue * Update .changes/unreleased/Under the Hood-20230122-215235.yaml Co-authored-by: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> Co-authored-by: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> --- .changes/unreleased/Under the Hood-20230122-215235.yaml | 6 ++++++ core/dbt/graph/queue.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Under the Hood-20230122-215235.yaml diff --git a/.changes/unreleased/Under the Hood-20230122-215235.yaml b/.changes/unreleased/Under the Hood-20230122-215235.yaml new file mode 100644 index 00000000000..760d8ea4838 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230122-215235.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Small optimization on manifest parsing benefitting large DAGs +time: 2023-01-22T21:52:35.549814+01:00 +custom: + Author: boxysean + Issue: "6697" diff --git a/core/dbt/graph/queue.py b/core/dbt/graph/queue.py index 3c3b9625d27..dd439faf37e 100644 --- a/core/dbt/graph/queue.py +++ b/core/dbt/graph/queue.py @@ -161,7 +161,7 @@ def _find_new_additions(self) -> None: queue and add them. """ for node, in_degree in self.graph.in_degree(): - if not self._already_known(node) and in_degree == 0: + if in_degree == 0 and not self._already_known(node): self.inner.put((self._scores[node], node)) self.queued.add(node) From 7b464b8a4957ec7969f19234020e110be1987923 Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Mon, 23 Jan 2023 16:39:29 -0500 Subject: [PATCH 130/156] CT-1718: Add Note and Formatting event types (#6691) * CT-1718: Add Note and Formatting event types * CT-1718: Add changelog entry --- .../Under the Hood-20230120-172254.yaml | 7 ++++ core/dbt/events/functions.py | 6 ++-- core/dbt/events/proto_types.py | 34 +++++++++---------- core/dbt/events/types.proto | 26 +++++++------- core/dbt/events/types.py | 29 ++++++++++------ core/dbt/task/deps.py | 4 +-- core/dbt/task/printer.py | 12 +++---- core/dbt/task/run.py | 8 ++--- core/dbt/task/runnable.py | 8 ++--- core/dbt/task/seed.py | 9 +++-- core/dbt/task/serve.py | 11 ++++-- tests/unit/test_events.py | 4 +-- 12 files changed, 89 insertions(+), 69 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230120-172254.yaml diff --git a/.changes/unreleased/Under the Hood-20230120-172254.yaml b/.changes/unreleased/Under the Hood-20230120-172254.yaml new file mode 100644 index 00000000000..3f65b39f99e --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230120-172254.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Replaced the EmptyLine event with a more general Formatting event, and added + a Note event. +time: 2023-01-20T17:22:54.45828-05:00 +custom: + Author: peterallenwebb + Issue: "6481" diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py index f32287c3049..786e9cdf91d 100644 --- a/core/dbt/events/functions.py +++ b/core/dbt/events/functions.py @@ -3,7 +3,7 @@ from dbt.events.base_types import BaseEvent, Cache, EventLevel, NoFile, NoStdOut, EventMsg from dbt.events.eventmgr import EventManager, LoggerConfig, LineFormat, NoFilter from dbt.events.helpers import env_secrets, scrub_secrets -from dbt.events.types import EmptyLine +from dbt.events.types import Formatting import dbt.flags as flags from dbt.logger import GLOBAL_LOGGER, make_log_dir_if_missing from functools import partial @@ -65,7 +65,7 @@ def _stdout_filter( and (not isinstance(msg.data, Cache) or log_cache_events) and (EventLevel(msg.info.level) != EventLevel.DEBUG or debug_mode) and (EventLevel(msg.info.level) == EventLevel.ERROR or not quiet_mode) - and not (flags.LOG_FORMAT == "json" and type(msg.data) == EmptyLine) + and not (flags.LOG_FORMAT == "json" and type(msg.data) == Formatting) ) @@ -85,7 +85,7 @@ def _logfile_filter(log_cache_events: bool, msg: EventMsg) -> bool: return ( not isinstance(msg.data, NoFile) and not (isinstance(msg.data, Cache) and not log_cache_events) - and not (flags.LOG_FORMAT == "json" and type(msg.data) == EmptyLine) + and not (flags.LOG_FORMAT == "json" and type(msg.data) == Formatting) ) diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index 0bae4be273f..ee11e01d172 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -1859,19 +1859,6 @@ class SeedHeaderMsg(betterproto.Message): data: "SeedHeader" = betterproto.message_field(2) -@dataclass -class SeedHeaderSeparator(betterproto.Message): - """Q005""" - - len_header: int = betterproto.int32_field(1) - - -@dataclass -class SeedHeaderSeparatorMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "SeedHeaderSeparator" = betterproto.message_field(2) - - @dataclass class SQLRunnerException(betterproto.Message): """Q006""" @@ -2511,16 +2498,16 @@ class OpenCommandMsg(betterproto.Message): @dataclass -class EmptyLine(betterproto.Message): +class Formatting(betterproto.Message): """Z017""" - pass + msg: str = betterproto.string_field(1) @dataclass -class EmptyLineMsg(betterproto.Message): +class FormattingMsg(betterproto.Message): info: "EventInfo" = betterproto.message_field(1) - data: "EmptyLine" = betterproto.message_field(2) + data: "Formatting" = betterproto.message_field(2) @dataclass @@ -2886,6 +2873,19 @@ class ListCmdOutMsg(betterproto.Message): data: "ListCmdOut" = betterproto.message_field(2) +@dataclass +class Note(betterproto.Message): + """Z050""" + + msg: str = betterproto.string_field(1) + + +@dataclass +class NoteMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "Note" = betterproto.message_field(2) + + @dataclass class IntegrationTestInfo(betterproto.Message): """T001""" diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index fc251042005..2870b1bc610 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -1473,15 +1473,7 @@ message SeedHeaderMsg { SeedHeader data = 2; } -// Q005 -message SeedHeaderSeparator { - int32 len_header = 1; -} - -message SeedHeaderSeparatorMsg { - EventInfo info = 1; - SeedHeaderSeparator data = 2; -} +// Skipped Q005 // Q006 message SQLRunnerException { @@ -2004,12 +1996,13 @@ message OpenCommandMsg { } // Z017 -message EmptyLine { +message Formatting { + string msg = 1; } -message EmptyLineMsg { +message FormattingMsg { EventInfo info = 1; - EmptyLine data = 2; + Formatting data = 2; } // Z018 @@ -2288,6 +2281,15 @@ message ListCmdOutMsg { ListCmdOut data = 2; } +// Z050 +message Note { + string msg = 1; +} + +message NoteMsg { + EventInfo info = 1; + Note data = 2; +} // T - Integration tests diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index e74327be179..ab2d090a93a 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -1498,15 +1498,6 @@ def message(self) -> str: return self.header -@dataclass -class SeedHeaderSeparator(InfoLevel, pt.SeedHeaderSeparator): - def code(self): - return "Q005" - - def message(self) -> str: - return "-" * self.len_header - - @dataclass class SQLRunnerException(DebugLevel, pt.SQLRunnerException): # noqa def code(self): @@ -2084,13 +2075,18 @@ def message(self) -> str: return msg +# We use events to create console output, but also think of them as a sequence of important and +# meaningful occurrences to be used for debugging and monitoring. The Formatting event helps eases +# the tension between these two goals by allowing empty lines, heading separators, and other +# formatting to be written to the console, while they can be ignored for other purposes. For +# general information that isn't simple formatting, the Note event should be used instead. @dataclass -class EmptyLine(InfoLevel, pt.EmptyLine): +class Formatting(InfoLevel, pt.Formatting): def code(self): return "Z017" def message(self) -> str: - return "" + return self.msg @dataclass @@ -2372,3 +2368,14 @@ def code(self): def message(self) -> str: return self.msg + + +# The Note event provides a way to log messages which aren't likely to be useful as more structured events. +# For conslole formatting text like empty lines and separator bars, use the Formatting event instead. +@dataclass +class Note(InfoLevel, pt.Note): + def code(self): + return "Z050" + + def message(self) -> str: + return self.msg diff --git a/core/dbt/task/deps.py b/core/dbt/task/deps.py index 0052840c570..ac6a6c41af3 100644 --- a/core/dbt/task/deps.py +++ b/core/dbt/task/deps.py @@ -20,7 +20,7 @@ DepsInstallInfo, DepsListSubdirectory, DepsNotifyUpdatesAvailable, - EmptyLine, + Formatting, ) from dbt.clients import system @@ -88,7 +88,7 @@ def run(self) -> None: package_name=package_name, source_type=source_type, version=version ) if packages_to_upgrade: - fire_event(EmptyLine()) + fire_event(Formatting("")) fire_event(DepsNotifyUpdatesAvailable(packages=ListOfStrings(packages_to_upgrade))) @classmethod diff --git a/core/dbt/task/printer.py b/core/dbt/task/printer.py index edb2592d194..9fae854bdb4 100644 --- a/core/dbt/task/printer.py +++ b/core/dbt/task/printer.py @@ -5,7 +5,7 @@ ) from dbt.events.functions import fire_event from dbt.events.types import ( - EmptyLine, + Formatting, RunResultWarning, RunResultWarningMessage, RunResultFailure, @@ -72,14 +72,14 @@ def print_run_status_line(results) -> None: stats["total"] += 1 with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) fire_event(StatsLine(stats=stats)) def print_run_result_error(result, newline: bool = True, is_warning: bool = False) -> None: if newline: with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) if result.status == NodeStatus.Fail or (is_warning and result.status == NodeStatus.Warn): if is_warning: @@ -109,12 +109,12 @@ def print_run_result_error(result, newline: bool = True, is_warning: bool = Fals if result.node.build_path is not None: with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) fire_event(SQLCompiledPath(path=result.node.compiled_path)) if result.node.should_store_failures: with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) fire_event(CheckNodeTestFailure(relation_name=result.node.relation_name)) elif result.message is not None: @@ -143,7 +143,7 @@ def print_run_end_messages(results, keyboard_interrupt: bool = False) -> None: with DbtStatusMessage(), InvocationProcessor(): with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) fire_event( EndOfRunSummary( num_errors=len(errors), diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 411c57af663..fd24dd06ba2 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -30,7 +30,7 @@ from dbt.events.functions import fire_event, get_invocation_id from dbt.events.types import ( DatabaseErrorRunningHook, - EmptyLine, + Formatting, HooksRunning, FinishedRunningStats, LogModelResult, @@ -335,7 +335,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): num_hooks = len(ordered_hooks) with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) fire_event(HooksRunning(num_hooks=num_hooks, hook_type=hook_type)) startctx = TimestampNamed("node_started_at") @@ -388,7 +388,7 @@ def run_hooks(self, adapter, hook_type: RunHookType, extra_context): self._total_executed += len(ordered_hooks) with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) def safe_run_hooks( self, adapter, hook_type: RunHookType, extra_context: Dict[str, Any] @@ -419,7 +419,7 @@ def print_results_line(self, results, execution_time): execution = utils.humanize_execution_time(execution_time=execution_time) with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) fire_event( FinishedRunningStats( stat_line=stat_line, execution=execution, execution_time=execution_time diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index fee5fadc891..fd383226770 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -28,7 +28,7 @@ ) from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( - EmptyLine, + Formatting, LogCancelLine, DefaultSelector, NodeStart, @@ -377,7 +377,7 @@ def execute_nodes(self): ) ) with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) pool = ThreadPool(num_threads) try: @@ -458,7 +458,7 @@ def run(self): if len(self._flattened_nodes) == 0: with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) warn_or_error(NothingToDo()) result = self.get_result( results=[], @@ -467,7 +467,7 @@ def run(self): ) else: with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) selected_uids = frozenset(n.unique_id for n in self._flattened_nodes) result = self.execute_with_hooks(selected_uids) diff --git a/core/dbt/task/seed.py b/core/dbt/task/seed.py index 58b6aa25bda..9ec1df3b81f 100644 --- a/core/dbt/task/seed.py +++ b/core/dbt/task/seed.py @@ -12,8 +12,7 @@ from dbt.events.functions import fire_event from dbt.events.types import ( SeedHeader, - SeedHeaderSeparator, - EmptyLine, + Formatting, LogSeedResult, LogStartLine, ) @@ -99,13 +98,13 @@ def show_table(self, result): header = "Random sample of table: {}.{}".format(schema, alias) with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) fire_event(SeedHeader(header=header)) - fire_event(SeedHeaderSeparator(len_header=len(header))) + fire_event(Formatting("-" * len(header))) rand_table.print_table(max_rows=10, max_columns=None) with TextOnly(): - fire_event(EmptyLine()) + fire_event(Formatting("")) def show_tables(self, results): for result in results: diff --git a/core/dbt/task/serve.py b/core/dbt/task/serve.py index 4d702234d0e..cbdc48a8010 100644 --- a/core/dbt/task/serve.py +++ b/core/dbt/task/serve.py @@ -6,7 +6,12 @@ from http.server import SimpleHTTPRequestHandler from socketserver import TCPServer from dbt.events.functions import fire_event -from dbt.events.types import ServingDocsPort, ServingDocsAccessInfo, ServingDocsExitInfo, EmptyLine +from dbt.events.types import ( + ServingDocsPort, + ServingDocsAccessInfo, + ServingDocsExitInfo, + Formatting, +) from dbt.task.base import ConfiguredTask @@ -22,8 +27,8 @@ def run(self): fire_event(ServingDocsPort(address=address, port=port)) fire_event(ServingDocsAccessInfo(port=port)) - fire_event(EmptyLine()) - fire_event(EmptyLine()) + fire_event(Formatting("")) + fire_event(Formatting("")) fire_event(ServingDocsExitInfo()) # mypy doesn't think SimpleHTTPRequestHandler is ok here, but it is diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 7fa586cefa9..529a11f5ed9 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -261,7 +261,6 @@ def test_event_codes(self): CompileComplete(), FreshnessCheckComplete(), SeedHeader(header=""), - SeedHeaderSeparator(len_header=0), SQLRunnerException(exc=""), LogTestResult( name="", @@ -358,7 +357,7 @@ def test_event_codes(self): ProtectedCleanPath(path=""), FinishedCleanPaths(), OpenCommand(open_cmd="", profiles_dir=""), - EmptyLine(), + Formatting(msg=""), ServingDocsPort(address="", port=0), ServingDocsAccessInfo(port=""), ServingDocsExitInfo(), @@ -386,6 +385,7 @@ def test_event_codes(self): DebugCmdOut(), DebugCmdResult(), ListCmdOut(), + Note(msg="This is a note."), # T - tests ====================== IntegrationTestInfo(), IntegrationTestDebug(), From 17014bfad3e204279c94c6a0285f784fc10711eb Mon Sep 17 00:00:00 2001 From: Aezo <45879156+aezomz@users.noreply.github.com> Date: Wed, 25 Jan 2023 01:58:08 +0800 Subject: [PATCH 131/156] add adapter_response for test (#6645) resolves https://github.com/dbt-labs/dbt-core/issues/2964 --- .changes/unreleased/Features-20230118-233801.yaml | 6 ++++++ core/dbt/adapters/base/impl.py | 15 ++++++++------- .../global_project/macros/adapters/freshness.sql | 2 +- core/dbt/task/freshness.py | 6 +++--- core/dbt/task/test.py | 5 ++++- .../test_custom_singular_tests.py | 5 +++++ .../sources/test_source_fresher_state.py | 2 +- tests/functional/sources/test_source_freshness.py | 2 +- 8 files changed, 29 insertions(+), 14 deletions(-) create mode 100644 .changes/unreleased/Features-20230118-233801.yaml diff --git a/.changes/unreleased/Features-20230118-233801.yaml b/.changes/unreleased/Features-20230118-233801.yaml new file mode 100644 index 00000000000..38affa143e8 --- /dev/null +++ b/.changes/unreleased/Features-20230118-233801.yaml @@ -0,0 +1,6 @@ +kind: Features +body: add adapter_response to dbt test and freshness result +time: 2023-01-18T23:38:01.857342+08:00 +custom: + Author: aezomz + Issue: "2964" diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 98b78217c14..8234f90910c 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -17,7 +17,6 @@ Iterator, Set, ) - import agate import pytz @@ -54,7 +53,7 @@ CodeExecutionStatus, CatalogGenerationError, ) -from dbt.utils import filter_null_values, executor, cast_to_str +from dbt.utils import filter_null_values, executor, cast_to_str, AttrDict from dbt.adapters.base.connections import Connection, AdapterResponse from dbt.adapters.base.meta import AdapterMeta, available @@ -943,7 +942,7 @@ def execute_macro( context_override: Optional[Dict[str, Any]] = None, kwargs: Dict[str, Any] = None, text_only_columns: Optional[Iterable[str]] = None, - ) -> agate.Table: + ) -> AttrDict: """Look macro_name up in the manifest and execute its results. :param macro_name: The name of the macro to execute. @@ -1028,7 +1027,7 @@ def _get_one_catalog( manifest=manifest, ) - results = self._catalog_filter_table(table, manifest) + results = self._catalog_filter_table(table, manifest) # type: ignore[arg-type] return results def get_catalog(self, manifest: Manifest) -> Tuple[agate.Table, List[Exception]]: @@ -1060,7 +1059,7 @@ def calculate_freshness( loaded_at_field: str, filter: Optional[str], manifest: Optional[Manifest] = None, - ) -> Dict[str, Any]: + ) -> Tuple[AdapterResponse, Dict[str, Any]]: """Calculate the freshness of sources in dbt, and return it""" kwargs: Dict[str, Any] = { "source": source, @@ -1069,7 +1068,8 @@ def calculate_freshness( } # run the macro - table = self.execute_macro(FRESHNESS_MACRO_NAME, kwargs=kwargs, manifest=manifest) + result = self.execute_macro(FRESHNESS_MACRO_NAME, kwargs=kwargs, manifest=manifest) + adapter_response, table = result.response, result.table # type: ignore[attr-defined] # now we have a 1-row table of the maximum `loaded_at_field` value and # the current time according to the db. if len(table) != 1 or len(table[0]) != 2: @@ -1083,11 +1083,12 @@ def calculate_freshness( snapshotted_at = _utc(table[0][1], source, loaded_at_field) age = (snapshotted_at - max_loaded_at).total_seconds() - return { + freshness = { "max_loaded_at": max_loaded_at, "snapshotted_at": snapshotted_at, "age": age, } + return adapter_response, freshness def pre_model_hook(self, config: Mapping[str, Any]) -> Any: """A hook for running some operation before the model materialization diff --git a/core/dbt/include/global_project/macros/adapters/freshness.sql b/core/dbt/include/global_project/macros/adapters/freshness.sql index 6a5bd79d1d0..f18499a2391 100644 --- a/core/dbt/include/global_project/macros/adapters/freshness.sql +++ b/core/dbt/include/global_project/macros/adapters/freshness.sql @@ -12,5 +12,5 @@ where {{ filter }} {% endif %} {% endcall %} - {{ return(load_result('collect_freshness').table) }} + {{ return(load_result('collect_freshness')) }} {% endmacro %} diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index 819bc4164a3..95ff76083a9 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -105,10 +105,10 @@ def execute(self, compiled_node, manifest): ) relation = self.adapter.Relation.create_from_source(compiled_node) - # given a Source, calculate its fresnhess. + # given a Source, calculate its freshness. with self.adapter.connection_for(compiled_node): self.adapter.clear_transaction() - freshness = self.adapter.calculate_freshness( + adapter_response, freshness = self.adapter.calculate_freshness( relation, compiled_node.loaded_at_field, compiled_node.freshness.filter, @@ -124,7 +124,7 @@ def execute(self, compiled_node, manifest): timing=[], execution_time=0, message=None, - adapter_response={}, + adapter_response=adapter_response.to_dict(omit_none=True), failures=None, **freshness, ) diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index e7f449873aa..3ba1b0f85f2 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -5,6 +5,7 @@ from dbt.events.format import pluralize from dbt.dataclass_schema import dbtClassMixin import threading +from typing import Dict, Any from .compile import CompileRunner from .run import RunTask @@ -38,6 +39,7 @@ class TestResultData(dbtClassMixin): failures: int should_warn: bool should_error: bool + adapter_response: Dict[str, Any] @classmethod def validate(cls, data): @@ -137,6 +139,7 @@ def execute_test(self, test: TestNode, manifest: Manifest) -> TestResultData: map(_coerce_decimal, table.rows[0]), ) ) + test_result_dct["adapter_response"] = result["response"].to_dict(omit_none=True) TestResultData.validate(test_result_dct) return TestResultData.from_dict(test_result_dct) @@ -171,7 +174,7 @@ def execute(self, test: TestNode, manifest: Manifest): thread_id=thread_id, execution_time=0, message=message, - adapter_response={}, + adapter_response=result.adapter_response, failures=failures, ) diff --git a/tests/functional/custom_singular_tests/test_custom_singular_tests.py b/tests/functional/custom_singular_tests/test_custom_singular_tests.py index 9a8df339374..aec0586b873 100644 --- a/tests/functional/custom_singular_tests/test_custom_singular_tests.py +++ b/tests/functional/custom_singular_tests/test_custom_singular_tests.py @@ -103,3 +103,8 @@ def test_data_tests(self, project, tests): assert result.status == "fail" assert not result.skipped assert result.failures > 0 + assert result.adapter_response == { + "_message": "SELECT 1", + "code": "SELECT", + "rows_affected": 1, + } diff --git a/tests/functional/sources/test_source_fresher_state.py b/tests/functional/sources/test_source_fresher_state.py index a97694a9c5a..3ad69d97e6f 100644 --- a/tests/functional/sources/test_source_fresher_state.py +++ b/tests/functional/sources/test_source_fresher_state.py @@ -112,7 +112,7 @@ def _assert_freshness_results(self, path, state): "warn_after": {"count": 10, "period": "hour"}, "error_after": {"count": 18, "period": "hour"}, }, - "adapter_response": {}, + "adapter_response": {"_message": "SELECT 1", "code": "SELECT", "rows_affected": 1}, "thread_id": AnyStringWith("Thread-"), "execution_time": AnyFloat(), "timing": [ diff --git a/tests/functional/sources/test_source_freshness.py b/tests/functional/sources/test_source_freshness.py index 630f59a0205..e7e1f08ebc5 100644 --- a/tests/functional/sources/test_source_freshness.py +++ b/tests/functional/sources/test_source_freshness.py @@ -103,7 +103,7 @@ def _assert_freshness_results(self, path, state): "warn_after": {"count": 10, "period": "hour"}, "error_after": {"count": 18, "period": "hour"}, }, - "adapter_response": {}, + "adapter_response": {"_message": "SELECT 1", "code": "SELECT", "rows_affected": 1}, "thread_id": AnyStringWith("Thread-"), "execution_time": AnyFloat(), "timing": [ From e2ccf011d98a055c3ce1805ecbdc4740ffc99041 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Tue, 24 Jan 2023 14:25:32 -0500 Subject: [PATCH 132/156] CT 1886 include adapter_response in NodeFinished log message (#6709) * Include adapter_response in run_result in NodeFinished log event * Changie --- .changes/unreleased/Fixes-20230124-115837.yaml | 6 ++++++ core/dbt/contracts/results.py | 5 ++--- tests/functional/logging/test_logging.py | 1 + 3 files changed, 9 insertions(+), 3 deletions(-) create mode 100644 .changes/unreleased/Fixes-20230124-115837.yaml diff --git a/.changes/unreleased/Fixes-20230124-115837.yaml b/.changes/unreleased/Fixes-20230124-115837.yaml new file mode 100644 index 00000000000..c74e83bbaf0 --- /dev/null +++ b/.changes/unreleased/Fixes-20230124-115837.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Include adapter_response in NodeFinished run_result log event +time: 2023-01-24T11:58:37.74179-05:00 +custom: + Author: gshank + Issue: "6703" diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index 9243750284f..4378d207ac2 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -13,7 +13,7 @@ from dbt.events.proto_types import RunResultMsg, TimingInfoMsg from dbt.events.contextvars import get_node_info from dbt.logger import TimingProcessor -from dbt.utils import lowercase, cast_to_str, cast_to_int +from dbt.utils import lowercase, cast_to_str, cast_to_int, cast_dict_to_dict_of_strings from dbt.dataclass_schema import dbtClassMixin, StrEnum import agate @@ -130,7 +130,6 @@ def __pre_deserialize__(cls, data): return data def to_msg(self): - # TODO: add more fields msg = RunResultMsg() msg.status = str(self.status) msg.message = cast_to_str(self.message) @@ -138,7 +137,7 @@ def to_msg(self): msg.execution_time = self.execution_time msg.num_failures = cast_to_int(self.failures) msg.timing_info = [ti.to_msg() for ti in self.timing] - # adapter_response + msg.adapter_response = cast_dict_to_dict_of_strings(self.adapter_response) return msg diff --git a/tests/functional/logging/test_logging.py b/tests/functional/logging/test_logging.py index afcd90d4afb..fc63e5da5dc 100644 --- a/tests/functional/logging/test_logging.py +++ b/tests/functional/logging/test_logging.py @@ -44,6 +44,7 @@ def test_basic(project, logs_dir): node_start = True if log_event == "NodeFinished": node_finished = True + assert log_data["run_result"]["adapter_response"] if node_start and not node_finished: if log_event == "NodeExecuting": assert "node_info" in log_data From cbb9117ab997289062cd5bacaf4cfd4466ab9b1e Mon Sep 17 00:00:00 2001 From: Michelle Ark Date: Tue, 24 Jan 2023 16:22:26 -0500 Subject: [PATCH 133/156] test_init conversion (#6610) * convert 044_init_tests --- test/integration/040_init_tests/test_init.py | 755 ------------------- tests/functional/init/test_init.py | 688 +++++++++++++++++ 2 files changed, 688 insertions(+), 755 deletions(-) delete mode 100644 test/integration/040_init_tests/test_init.py create mode 100644 tests/functional/init/test_init.py diff --git a/test/integration/040_init_tests/test_init.py b/test/integration/040_init_tests/test_init.py deleted file mode 100644 index 6a814fa7794..00000000000 --- a/test/integration/040_init_tests/test_init.py +++ /dev/null @@ -1,755 +0,0 @@ -import os -import shutil -from unittest import mock -from unittest.mock import Mock, call -from pathlib import Path - -import click - -from test.integration.base import DBTIntegrationTest, use_profile -from pytest import mark - -class TestInit(DBTIntegrationTest): - def tearDown(self): - project_name = self.get_project_name() - - if os.path.exists(project_name): - shutil.rmtree(project_name) - - super().tearDown() - - def get_project_name(self): - return 'my_project_{}'.format(self.unique_schema()) - - @property - def schema(self): - return 'init_040' - - @property - def models(self): - return 'models' - - # See CT-570 / GH 5180 - @mark.skip( - reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171" - ) - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - def test_postgres_init_task_in_project_with_existing_profiles_yml(self, mock_prompt, mock_confirm, mock_get_adapter): - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - manager.confirm.side_effect = ["y"] - manager.prompt.side_effect = [ - 1, - 'localhost', - 5432, - 'test_user', - 'test_password', - 'test_db', - 'test_schema', - 4, - ] - mock_get_adapter.return_value = [1] - - self.run_dbt(['init']) - - manager.assert_has_calls([ - call.confirm(f"The profile test already exists in {os.path.join(self.test_root_dir, 'profiles.yml')}. Continue and overwrite it?"), - call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT), - call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None), - call.prompt('port', default=5432, hide_input=False, type=click.INT), - call.prompt('user (dev username)', default=None, hide_input=False, type=None), - call.prompt('pass (dev password)', default=None, hide_input=True, type=None), - call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT), - ]) - - with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f: - assert f.read() == """config: - send_anonymous_usage_stats: false -test: - outputs: - dev: - dbname: test_db - host: localhost - pass: test_password - port: 5432 - schema: test_schema - threads: 4 - type: postgres - user: test_user - target: dev -""" - - # See CT-570 / GH 5180 - @mark.skip( - reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171" - ) - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - @mock.patch.object(Path, 'exists', autospec=True) - def test_postgres_init_task_in_project_without_existing_profiles_yml(self, exists, mock_prompt, mock_confirm, mock_get_adapter): - - def exists_side_effect(path): - # Override responses on specific files, default to 'real world' if not overriden - return { - 'profiles.yml': False - }.get(path.name, os.path.exists(path)) - - exists.side_effect = exists_side_effect - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.prompt.side_effect = [ - 1, - 'localhost', - 5432, - 'test_user', - 'test_password', - 'test_db', - 'test_schema', - 4, - ] - mock_get_adapter.return_value = [1] - - self.run_dbt(['init']) - - manager.assert_has_calls([ - call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT), - call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None), - call.prompt('port', default=5432, hide_input=False, type=click.INT), - call.prompt('user (dev username)', default=None, hide_input=False, type=None), - call.prompt('pass (dev password)', default=None, hide_input=True, type=None), - call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT) - ]) - - with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f: - assert f.read() == """test: - outputs: - dev: - dbname: test_db - host: localhost - pass: test_password - port: 5432 - schema: test_schema - threads: 4 - type: postgres - user: test_user - target: dev -""" - - # See CT-570 / GH 5180 - @mark.skip( - reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171" - ) - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - @mock.patch.object(Path, 'exists', autospec=True) - def test_postgres_init_task_in_project_without_existing_profiles_yml_or_profile_template(self, exists, mock_prompt, mock_confirm, mock_get_adapter): - - def exists_side_effect(path): - # Override responses on specific files, default to 'real world' if not overriden - return { - 'profiles.yml': False, - 'profile_template.yml': False, - }.get(path.name, os.path.exists(path)) - - exists.side_effect = exists_side_effect - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - manager.prompt.side_effect = [ - 1, - ] - mock_get_adapter.return_value = [1] - self.run_dbt(['init']) - manager.assert_has_calls([ - call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT), - ]) - - with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f: - assert f.read() == """test: - outputs: - - dev: - type: postgres - threads: [1 or more] - host: [host] - port: [port] - user: [dev_username] - pass: [dev_password] - dbname: [dbname] - schema: [dev_schema] - - prod: - type: postgres - threads: [1 or more] - host: [host] - port: [port] - user: [prod_username] - pass: [prod_password] - dbname: [dbname] - schema: [prod_schema] - - target: dev -""" - - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - @mock.patch.object(Path, 'exists', autospec=True) - def test_postgres_init_task_in_project_with_profile_template_without_existing_profiles_yml(self, exists, mock_prompt, mock_confirm, mock_get_adapter): - - def exists_side_effect(path): - # Override responses on specific files, default to 'real world' if not overriden - return { - 'profiles.yml': False, - }.get(path.name, os.path.exists(path)) - exists.side_effect = exists_side_effect - - with open("profile_template.yml", 'w') as f: - f.write("""fixed: - type: postgres - threads: 4 - host: localhost - dbname: my_db - schema: my_schema - target: my_target -prompts: - target: - hint: 'The target name' - type: string - port: - hint: 'The port (for integer test purposes)' - type: int - default: 5432 - user: - hint: 'Your username' - pass: - hint: 'Your password' - hide_input: true""") - - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - manager.prompt.side_effect = [ - 'my_target', - 5432, - 'test_username', - 'test_password' - ] - mock_get_adapter.return_value = [1] - self.run_dbt(['init']) - manager.assert_has_calls([ - call.prompt('target (The target name)', default=None, hide_input=False, type=click.STRING), - call.prompt('port (The port (for integer test purposes))', default=5432, hide_input=False, type=click.INT), - call.prompt('user (Your username)', default=None, hide_input=False, type=None), - call.prompt('pass (Your password)', default=None, hide_input=True, type=None) - ]) - - with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f: - assert f.read() == """test: - outputs: - my_target: - dbname: my_db - host: localhost - pass: test_password - port: 5432 - schema: my_schema - threads: 4 - type: postgres - user: test_username - target: my_target -""" - # See CT-570 / GH 5180 - @mark.skip( - reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171" - ) - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - def test_postgres_init_task_in_project_with_invalid_profile_template(self, mock_prompt, mock_confirm, mock_get_adapter): - """Test that when an invalid profile_template.yml is provided in the project, - init command falls back to the target's profile_template.yml""" - - with open("profile_template.yml", 'w') as f: - f.write("""invalid template""") - - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - manager.confirm.side_effect = ["y"] - manager.prompt.side_effect = [ - 1, - 'localhost', - 5432, - 'test_username', - 'test_password', - 'test_db', - 'test_schema', - 4, - ] - mock_get_adapter.return_value = [1] - - self.run_dbt(['init']) - - manager.assert_has_calls([ - call.confirm(f"The profile test already exists in {os.path.join(self.test_root_dir, 'profiles.yml')}. Continue and overwrite it?"), - call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT), - call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None), - call.prompt('port', default=5432, hide_input=False, type=click.INT), - call.prompt('user (dev username)', default=None, hide_input=False, type=None), - call.prompt('pass (dev password)', default=None, hide_input=True, type=None), - call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT) - ]) - - with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f: - assert f.read() == """config: - send_anonymous_usage_stats: false -test: - outputs: - dev: - dbname: test_db - host: localhost - pass: test_password - port: 5432 - schema: test_schema - threads: 4 - type: postgres - user: test_username - target: dev -""" - # See CT-570 / GH 5180 - @mark.skip( - reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171" - ) - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - def test_postgres_init_task_outside_of_project(self, mock_prompt, mock_confirm, mock_get_adapter): - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - - # Start by removing the dbt_project.yml so that we're not in an existing project - os.remove('dbt_project.yml') - - project_name = self.get_project_name() - manager.prompt.side_effect = [ - project_name, - 1, - 'localhost', - 5432, - 'test_username', - 'test_password', - 'test_db', - 'test_schema', - 4, - ] - mock_get_adapter.return_value = [1] - self.run_dbt(['init']) - manager.assert_has_calls([ - call.prompt("Enter a name for your project (letters, digits, underscore)"), - call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT), - call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None), - call.prompt('port', default=5432, hide_input=False, type=click.INT), - call.prompt('user (dev username)', default=None, hide_input=False, type=None), - call.prompt('pass (dev password)', default=None, hide_input=True, type=None), - call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT), - ]) - - with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f: - assert f.read() == f"""config: - send_anonymous_usage_stats: false -{project_name}: - outputs: - dev: - dbname: test_db - host: localhost - pass: test_password - port: 5432 - schema: test_schema - threads: 4 - type: postgres - user: test_username - target: dev -test: - outputs: - default2: - dbname: dbt - host: localhost - pass: password - port: 5432 - schema: {self.unique_schema()} - threads: 4 - type: postgres - user: root - noaccess: - dbname: dbt - host: localhost - pass: password - port: 5432 - schema: {self.unique_schema()} - threads: 4 - type: postgres - user: noaccess - target: default2 -""" - - with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f: - assert f.read() == f""" -# Name your project! Project names should contain only lowercase characters -# and underscores. A good package name should reflect your organization's -# name or the intended use of these models -name: '{project_name}' -version: '1.0.0' -config-version: 2 - -# This setting configures which "profile" dbt uses for this project. -profile: '{project_name}' - -# These configurations specify where dbt should look for different types of files. -# The `model-paths` config, for example, states that models in this project can be -# found in the "models/" directory. You probably won't need to change these! -model-paths: ["models"] -analysis-paths: ["analyses"] -test-paths: ["tests"] -seed-paths: ["seeds"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] - -target-path: "target" # directory which will store compiled SQL files -clean-targets: # directories to be removed by `dbt clean` - - "target" - - "dbt_packages" - - -# Configuring models -# Full documentation: https://docs.getdbt.com/docs/configuring-models - -# In this example config, we tell dbt to build all models in the example/ -# directory as views. These settings can be overridden in the individual model -# files using the `{{{{ config(...) }}}}` macro. -models: - {project_name}: - # Config indicated by + and applies to all files under models/example/ - example: - +materialized: view -""" - # See CT-570 / GH 5180 - @mark.skip( - reason="Broken because of https://github.com/dbt-labs/dbt-core/pull/5171" - ) - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - def test_postgres_init_with_provided_project_name(self, mock_prompt, mock_confirm, mock_get_adapter): - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - - # Start by removing the dbt_project.yml so that we're not in an existing project - os.remove('dbt_project.yml') - - manager.prompt.side_effect = [ - 1, - 'localhost', - 5432, - 'test_username', - 'test_password', - 'test_db', - 'test_schema', - 4, - ] - mock_get_adapter.return_value = [1] - - # Provide project name through the init command. - project_name = self.get_project_name() - self.run_dbt(['init', project_name]) - manager.assert_has_calls([ - call.prompt("Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", type=click.INT), - call.prompt('host (hostname for the instance)', default=None, hide_input=False, type=None), - call.prompt('port', default=5432, hide_input=False, type=click.INT), - call.prompt('user (dev username)', default=None, hide_input=False, type=None), - call.prompt('pass (dev password)', default=None, hide_input=True, type=None), - call.prompt('dbname (default database that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('schema (default schema that dbt will build objects in)', default=None, hide_input=False, type=None), - call.prompt('threads (1 or more)', default=1, hide_input=False, type=click.INT), - ]) - - with open(os.path.join(self.test_root_dir, 'profiles.yml'), 'r') as f: - assert f.read() == f"""config: - send_anonymous_usage_stats: false -{project_name}: - outputs: - dev: - dbname: test_db - host: localhost - pass: test_password - port: 5432 - schema: test_schema - threads: 4 - type: postgres - user: test_username - target: dev -test: - outputs: - default2: - dbname: dbt - host: localhost - pass: password - port: 5432 - schema: {self.unique_schema()} - threads: 4 - type: postgres - user: root - noaccess: - dbname: dbt - host: localhost - pass: password - port: 5432 - schema: {self.unique_schema()} - threads: 4 - type: postgres - user: noaccess - target: default2 -""" - - with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f: - assert f.read() == f""" -# Name your project! Project names should contain only lowercase characters -# and underscores. A good package name should reflect your organization's -# name or the intended use of these models -name: '{project_name}' -version: '1.0.0' -config-version: 2 - -# This setting configures which "profile" dbt uses for this project. -profile: '{project_name}' - -# These configurations specify where dbt should look for different types of files. -# The `model-paths` config, for example, states that models in this project can be -# found in the "models/" directory. You probably won't need to change these! -model-paths: ["models"] -analysis-paths: ["analyses"] -test-paths: ["tests"] -seed-paths: ["seeds"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] - -target-path: "target" # directory which will store compiled SQL files -clean-targets: # directories to be removed by `dbt clean` - - "target" - - "dbt_packages" - - -# Configuring models -# Full documentation: https://docs.getdbt.com/docs/configuring-models - -# In this example config, we tell dbt to build all models in the example/ -# directory as views. These settings can be overridden in the individual model -# files using the `{{{{ config(...) }}}}` macro. -models: - {project_name}: - # Config indicated by + and applies to all files under models/example/ - example: - +materialized: view -""" - - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - def test_postgres_init_invalid_project_name_cli(self, mock_prompt, mock_confirm, mock_get_adapter): - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - - os.remove('dbt_project.yml') - invalid_name = 'name-with-hyphen' - valid_name = self.get_project_name() - manager.prompt.side_effect = [ - valid_name - ] - mock_get_adapter.return_value = [1] - - self.run_dbt(['init', invalid_name, '-s']) - manager.assert_has_calls([ - call.prompt("Enter a name for your project (letters, digits, underscore)"), - ]) - - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - def test_postgres_init_invalid_project_name_prompt(self, mock_prompt, mock_confirm, mock_get_adapter): - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - - os.remove('dbt_project.yml') - - invalid_name = 'name-with-hyphen' - valid_name = self.get_project_name() - manager.prompt.side_effect = [ - invalid_name, valid_name - ] - mock_get_adapter.return_value = [1] - - self.run_dbt(['init', '-s']) - manager.assert_has_calls([ - call.prompt("Enter a name for your project (letters, digits, underscore)"), - call.prompt("Enter a name for your project (letters, digits, underscore)"), - ]) - - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - def test_postgres_init_skip_profile_setup(self, mock_prompt, mock_confirm, mock_get_adapter): - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - - # Start by removing the dbt_project.yml so that we're not in an existing project - os.remove('dbt_project.yml') - - project_name = self.get_project_name() - manager.prompt.side_effect = [ - project_name, - ] - mock_get_adapter.return_value = [1] - - # provide project name through the ini command - self.run_dbt(['init', '-s']) - manager.assert_has_calls([ - call.prompt("Enter a name for your project (letters, digits, underscore)") - ]) - - with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f: - assert f.read() == f""" -# Name your project! Project names should contain only lowercase characters -# and underscores. A good package name should reflect your organization's -# name or the intended use of these models -name: '{project_name}' -version: '1.0.0' -config-version: 2 - -# This setting configures which "profile" dbt uses for this project. -profile: '{project_name}' - -# These configurations specify where dbt should look for different types of files. -# The `model-paths` config, for example, states that models in this project can be -# found in the "models/" directory. You probably won't need to change these! -model-paths: ["models"] -analysis-paths: ["analyses"] -test-paths: ["tests"] -seed-paths: ["seeds"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] - -target-path: "target" # directory which will store compiled SQL files -clean-targets: # directories to be removed by `dbt clean` - - "target" - - "dbt_packages" - - -# Configuring models -# Full documentation: https://docs.getdbt.com/docs/configuring-models - -# In this example config, we tell dbt to build all models in the example/ -# directory as views. These settings can be overridden in the individual model -# files using the `{{{{ config(...) }}}}` macro. -models: - {project_name}: - # Config indicated by + and applies to all files under models/example/ - example: - +materialized: view -""" - - @use_profile('postgres') - @mock.patch('dbt.task.init._get_adapter_plugin_names') - @mock.patch('click.confirm') - @mock.patch('click.prompt') - def test_postgres_init_provided_project_name_and_skip_profile_setup(self, mock_prompt, mock_confirm, mock_get_adapter): - manager = Mock() - manager.attach_mock(mock_prompt, 'prompt') - manager.attach_mock(mock_confirm, 'confirm') - - # Start by removing the dbt_project.yml so that we're not in an existing project - os.remove('dbt_project.yml') - - manager.prompt.side_effect = [ - 1, - 'localhost', - 5432, - 'test_username', - 'test_password', - 'test_db', - 'test_schema', - 4, - ] - mock_get_adapter.return_value = [1] - - # provide project name through the ini command - project_name = self.get_project_name() - self.run_dbt(['init', project_name, '-s']) - manager.assert_not_called() - - with open(os.path.join(self.test_root_dir, project_name, 'dbt_project.yml'), 'r') as f: - assert f.read() == f""" -# Name your project! Project names should contain only lowercase characters -# and underscores. A good package name should reflect your organization's -# name or the intended use of these models -name: '{project_name}' -version: '1.0.0' -config-version: 2 - -# This setting configures which "profile" dbt uses for this project. -profile: '{project_name}' - -# These configurations specify where dbt should look for different types of files. -# The `model-paths` config, for example, states that models in this project can be -# found in the "models/" directory. You probably won't need to change these! -model-paths: ["models"] -analysis-paths: ["analyses"] -test-paths: ["tests"] -seed-paths: ["seeds"] -macro-paths: ["macros"] -snapshot-paths: ["snapshots"] - -target-path: "target" # directory which will store compiled SQL files -clean-targets: # directories to be removed by `dbt clean` - - "target" - - "dbt_packages" - - -# Configuring models -# Full documentation: https://docs.getdbt.com/docs/configuring-models - -# In this example config, we tell dbt to build all models in the example/ -# directory as views. These settings can be overridden in the individual model -# files using the `{{{{ config(...) }}}}` macro. -models: - {project_name}: - # Config indicated by + and applies to all files under models/example/ - example: - +materialized: view -""" diff --git a/tests/functional/init/test_init.py b/tests/functional/init/test_init.py new file mode 100644 index 00000000000..6a79dfe9807 --- /dev/null +++ b/tests/functional/init/test_init.py @@ -0,0 +1,688 @@ +import click +import os +import pytest +from pathlib import Path +from unittest import mock +from unittest.mock import Mock, call + +from dbt.tests.util import run_dbt + + +class TestInitProjectWithExistingProfilesYml: + @mock.patch("dbt.task.init._get_adapter_plugin_names") + @mock.patch("click.confirm") + @mock.patch("click.prompt") + def test_init_task_in_project_with_existing_profiles_yml( + self, mock_prompt, mock_confirm, mock_get_adapter, project + ): + manager = Mock() + manager.attach_mock(mock_prompt, "prompt") + manager.attach_mock(mock_confirm, "confirm") + manager.confirm.side_effect = ["y"] + manager.prompt.side_effect = [ + 1, + "localhost", + 5432, + "test_user", + "test_password", + "test_db", + "test_schema", + 4, + ] + mock_get_adapter.return_value = [project.adapter.type()] + + run_dbt(["init"]) + + manager.assert_has_calls( + [ + call.confirm( + f"The profile test already exists in {os.path.join(project.profiles_dir, 'profiles.yml')}. Continue and overwrite it?" + ), + call.prompt( + "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", + type=click.INT, + ), + call.prompt( + "host (hostname for the instance)", default=None, hide_input=False, type=None + ), + call.prompt("port", default=5432, hide_input=False, type=click.INT), + call.prompt("user (dev username)", default=None, hide_input=False, type=None), + call.prompt("pass (dev password)", default=None, hide_input=True, type=None), + call.prompt( + "dbname (default database that dbt will build objects in)", + default=None, + hide_input=False, + type=None, + ), + call.prompt( + "schema (default schema that dbt will build objects in)", + default=None, + hide_input=False, + type=None, + ), + call.prompt("threads (1 or more)", default=1, hide_input=False, type=click.INT), + ] + ) + + with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f: + assert ( + f.read() + == """config: + send_anonymous_usage_stats: false +test: + outputs: + dev: + dbname: test_db + host: localhost + pass: test_password + port: 5432 + schema: test_schema + threads: 4 + type: postgres + user: test_user + target: dev +""" + ) + + +class TestInitProjectWithoutExistingProfilesYml: + @mock.patch("dbt.task.init._get_adapter_plugin_names") + @mock.patch("click.prompt") + @mock.patch.object(Path, "exists", autospec=True) + def test_init_task_in_project_without_existing_profiles_yml( + self, exists, mock_prompt, mock_get_adapter, project + ): + def exists_side_effect(path): + # Override responses on specific files, default to 'real world' if not overriden + return {"profiles.yml": False}.get(path.name, os.path.exists(path)) + + exists.side_effect = exists_side_effect + manager = Mock() + manager.attach_mock(mock_prompt, "prompt") + manager.prompt.side_effect = [ + 1, + "localhost", + 5432, + "test_user", + "test_password", + "test_db", + "test_schema", + 4, + ] + mock_get_adapter.return_value = [project.adapter.type()] + + run_dbt(["init"]) + + manager.assert_has_calls( + [ + call.prompt( + "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", + type=click.INT, + ), + call.prompt( + "host (hostname for the instance)", default=None, hide_input=False, type=None + ), + call.prompt("port", default=5432, hide_input=False, type=click.INT), + call.prompt("user (dev username)", default=None, hide_input=False, type=None), + call.prompt("pass (dev password)", default=None, hide_input=True, type=None), + call.prompt( + "dbname (default database that dbt will build objects in)", + default=None, + hide_input=False, + type=None, + ), + call.prompt( + "schema (default schema that dbt will build objects in)", + default=None, + hide_input=False, + type=None, + ), + call.prompt("threads (1 or more)", default=1, hide_input=False, type=click.INT), + ] + ) + + with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f: + assert ( + f.read() + == """test: + outputs: + dev: + dbname: test_db + host: localhost + pass: test_password + port: 5432 + schema: test_schema + threads: 4 + type: postgres + user: test_user + target: dev +""" + ) + + +class TestInitProjectWithoutExistingProfilesYmlOrTemplate: + @mock.patch("dbt.task.init._get_adapter_plugin_names") + @mock.patch("click.confirm") + @mock.patch("click.prompt") + @mock.patch.object(Path, "exists", autospec=True) + def test_init_task_in_project_without_existing_profiles_yml_or_profile_template( + self, exists, mock_prompt, mock_confirm, mock_get_adapter, project + ): + def exists_side_effect(path): + # Override responses on specific files, default to 'real world' if not overriden + return { + "profiles.yml": False, + "profile_template.yml": False, + }.get(path.name, os.path.exists(path)) + + exists.side_effect = exists_side_effect + manager = Mock() + manager.attach_mock(mock_prompt, "prompt") + manager.attach_mock(mock_confirm, "confirm") + manager.prompt.side_effect = [ + 1, + ] + mock_get_adapter.return_value = [project.adapter.type()] + run_dbt(["init"]) + manager.assert_has_calls( + [ + call.prompt( + "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", + type=click.INT, + ), + ] + ) + + with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f: + assert ( + f.read() + == """test: + outputs: + + dev: + type: postgres + threads: [1 or more] + host: [host] + port: [port] + user: [dev_username] + pass: [dev_password] + dbname: [dbname] + schema: [dev_schema] + + prod: + type: postgres + threads: [1 or more] + host: [host] + port: [port] + user: [prod_username] + pass: [prod_password] + dbname: [dbname] + schema: [prod_schema] + + target: dev +""" + ) + + +class TestInitProjectWithProfileTemplateWithoutExistingProfilesYml: + @mock.patch("dbt.task.init._get_adapter_plugin_names") + @mock.patch("click.confirm") + @mock.patch("click.prompt") + @mock.patch.object(Path, "exists", autospec=True) + def test_init_task_in_project_with_profile_template_without_existing_profiles_yml( + self, exists, mock_prompt, mock_confirm, mock_get_adapter, project + ): + def exists_side_effect(path): + # Override responses on specific files, default to 'real world' if not overriden + return { + "profiles.yml": False, + }.get(path.name, os.path.exists(path)) + + exists.side_effect = exists_side_effect + + with open("profile_template.yml", "w") as f: + f.write( + """fixed: + type: postgres + threads: 4 + host: localhost + dbname: my_db + schema: my_schema + target: my_target +prompts: + target: + hint: 'The target name' + type: string + port: + hint: 'The port (for integer test purposes)' + type: int + default: 5432 + user: + hint: 'Your username' + pass: + hint: 'Your password' + hide_input: true""" + ) + + manager = Mock() + manager.attach_mock(mock_prompt, "prompt") + manager.attach_mock(mock_confirm, "confirm") + manager.prompt.side_effect = ["my_target", 5432, "test_username", "test_password"] + mock_get_adapter.return_value = [project.adapter.type()] + run_dbt(["init"]) + manager.assert_has_calls( + [ + call.prompt( + "target (The target name)", default=None, hide_input=False, type=click.STRING + ), + call.prompt( + "port (The port (for integer test purposes))", + default=5432, + hide_input=False, + type=click.INT, + ), + call.prompt("user (Your username)", default=None, hide_input=False, type=None), + call.prompt("pass (Your password)", default=None, hide_input=True, type=None), + ] + ) + + with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f: + assert ( + f.read() + == """test: + outputs: + my_target: + dbname: my_db + host: localhost + pass: test_password + port: 5432 + schema: my_schema + threads: 4 + type: postgres + user: test_username + target: my_target +""" + ) + + +class TestInitInvalidProfileTemplate: + @mock.patch("dbt.task.init._get_adapter_plugin_names") + @mock.patch("click.confirm") + @mock.patch("click.prompt") + def test_init_task_in_project_with_invalid_profile_template( + self, mock_prompt, mock_confirm, mock_get_adapter, project + ): + """Test that when an invalid profile_template.yml is provided in the project, + init command falls back to the target's profile_template.yml""" + with open(os.path.join(project.project_root, "profile_template.yml"), "w") as f: + f.write("""invalid template""") + + manager = Mock() + manager.attach_mock(mock_prompt, "prompt") + manager.attach_mock(mock_confirm, "confirm") + manager.confirm.side_effect = ["y"] + manager.prompt.side_effect = [ + 1, + "localhost", + 5432, + "test_username", + "test_password", + "test_db", + "test_schema", + 4, + ] + mock_get_adapter.return_value = [project.adapter.type()] + + run_dbt(["init"]) + + manager.assert_has_calls( + [ + call.confirm( + f"The profile test already exists in {os.path.join(project.profiles_dir, 'profiles.yml')}. Continue and overwrite it?" + ), + call.prompt( + "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", + type=click.INT, + ), + call.prompt( + "host (hostname for the instance)", default=None, hide_input=False, type=None + ), + call.prompt("port", default=5432, hide_input=False, type=click.INT), + call.prompt("user (dev username)", default=None, hide_input=False, type=None), + call.prompt("pass (dev password)", default=None, hide_input=True, type=None), + call.prompt( + "dbname (default database that dbt will build objects in)", + default=None, + hide_input=False, + type=None, + ), + call.prompt( + "schema (default schema that dbt will build objects in)", + default=None, + hide_input=False, + type=None, + ), + call.prompt("threads (1 or more)", default=1, hide_input=False, type=click.INT), + ] + ) + + with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f: + assert ( + f.read() + == """config: + send_anonymous_usage_stats: false +test: + outputs: + dev: + dbname: test_db + host: localhost + pass: test_password + port: 5432 + schema: test_schema + threads: 4 + type: postgres + user: test_username + target: dev +""" + ) + + +class TestInitOutsideOfProjectBase: + @pytest.fixture(scope="class") + def project_name(self, unique_schema): + return f"my_project_{unique_schema}" + + @pytest.fixture(scope="class", autouse=True) + def setup(self, project): + # Start by removing the dbt_project.yml so that we're not in an existing project + os.remove(os.path.join(project.project_root, "dbt_project.yml")) + + +class TestInitOutsideOfProject(TestInitOutsideOfProjectBase): + @pytest.fixture(scope="class") + def dbt_profile_data(self, unique_schema): + return { + "config": {"send_anonymous_usage_stats": False}, + "test": { + "outputs": { + "default2": { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": os.getenv("POSTGRES_TEST_USER", "root"), + "pass": os.getenv("POSTGRES_TEST_PASS", "password"), + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + "schema": unique_schema, + }, + "noaccess": { + "type": "postgres", + "threads": 4, + "host": "localhost", + "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)), + "user": "noaccess", + "pass": "password", + "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"), + "schema": unique_schema, + }, + }, + "target": "default2", + }, + } + + @mock.patch("dbt.task.init._get_adapter_plugin_names") + @mock.patch("click.confirm") + @mock.patch("click.prompt") + def test_init_task_outside_of_project( + self, mock_prompt, mock_confirm, mock_get_adapter, project, project_name, unique_schema + ): + manager = Mock() + manager.attach_mock(mock_prompt, "prompt") + manager.attach_mock(mock_confirm, "confirm") + manager.prompt.side_effect = [ + project_name, + 1, + "localhost", + 5432, + "test_username", + "test_password", + "test_db", + "test_schema", + 4, + ] + mock_get_adapter.return_value = [project.adapter.type()] + run_dbt(["init"]) + + manager.assert_has_calls( + [ + call.prompt("Enter a name for your project (letters, digits, underscore)"), + call.prompt( + "Which database would you like to use?\n[1] postgres\n\n(Don't see the one you want? https://docs.getdbt.com/docs/available-adapters)\n\nEnter a number", + type=click.INT, + ), + call.prompt( + "host (hostname for the instance)", default=None, hide_input=False, type=None + ), + call.prompt("port", default=5432, hide_input=False, type=click.INT), + call.prompt("user (dev username)", default=None, hide_input=False, type=None), + call.prompt("pass (dev password)", default=None, hide_input=True, type=None), + call.prompt( + "dbname (default database that dbt will build objects in)", + default=None, + hide_input=False, + type=None, + ), + call.prompt( + "schema (default schema that dbt will build objects in)", + default=None, + hide_input=False, + type=None, + ), + call.prompt("threads (1 or more)", default=1, hide_input=False, type=click.INT), + ] + ) + + with open(os.path.join(project.profiles_dir, "profiles.yml"), "r") as f: + assert ( + f.read() + == f"""config: + send_anonymous_usage_stats: false +{project_name}: + outputs: + dev: + dbname: test_db + host: localhost + pass: test_password + port: 5432 + schema: test_schema + threads: 4 + type: postgres + user: test_username + target: dev +test: + outputs: + default2: + dbname: dbt + host: localhost + pass: password + port: 5432 + schema: {unique_schema} + threads: 4 + type: postgres + user: root + noaccess: + dbname: dbt + host: localhost + pass: password + port: 5432 + schema: {unique_schema} + threads: 4 + type: postgres + user: noaccess + target: default2 +""" + ) + + with open(os.path.join(project.project_root, project_name, "dbt_project.yml"), "r") as f: + assert ( + f.read() + == f""" +# Name your project! Project names should contain only lowercase characters +# and underscores. A good package name should reflect your organization's +# name or the intended use of these models +name: '{project_name}' +version: '1.0.0' +config-version: 2 + +# This setting configures which "profile" dbt uses for this project. +profile: '{project_name}' + +# These configurations specify where dbt should look for different types of files. +# The `model-paths` config, for example, states that models in this project can be +# found in the "models/" directory. You probably won't need to change these! +model-paths: ["models"] +analysis-paths: ["analyses"] +test-paths: ["tests"] +seed-paths: ["seeds"] +macro-paths: ["macros"] +snapshot-paths: ["snapshots"] + +target-path: "target" # directory which will store compiled SQL files +clean-targets: # directories to be removed by `dbt clean` + - "target" + - "dbt_packages" + + +# Configuring models +# Full documentation: https://docs.getdbt.com/docs/configuring-models + +# In this example config, we tell dbt to build all models in the example/ +# directory as views. These settings can be overridden in the individual model +# files using the `{{{{ config(...) }}}}` macro. +models: + {project_name}: + # Config indicated by + and applies to all files under models/example/ + example: + +materialized: view +""" + ) + + +class TestInitInvalidProjectNameCLI(TestInitOutsideOfProjectBase): + @mock.patch("dbt.task.init._get_adapter_plugin_names") + @mock.patch("click.confirm") + @mock.patch("click.prompt") + def test_init_invalid_project_name_cli( + self, mock_prompt, mock_confirm, mock_get_adapter, project_name, project + ): + manager = Mock() + manager.attach_mock(mock_prompt, "prompt") + manager.attach_mock(mock_confirm, "confirm") + + invalid_name = "name-with-hyphen" + valid_name = project_name + manager.prompt.side_effect = [valid_name] + mock_get_adapter.return_value = [project.adapter.type()] + + run_dbt(["init", invalid_name, "-s"]) + manager.assert_has_calls( + [ + call.prompt("Enter a name for your project (letters, digits, underscore)"), + ] + ) + + +class TestInitInvalidProjectNamePrompt(TestInitOutsideOfProjectBase): + @mock.patch("dbt.task.init._get_adapter_plugin_names") + @mock.patch("click.confirm") + @mock.patch("click.prompt") + def test_init_invalid_project_name_prompt( + self, mock_prompt, mock_confirm, mock_get_adapter, project_name, project + ): + manager = Mock() + manager.attach_mock(mock_prompt, "prompt") + manager.attach_mock(mock_confirm, "confirm") + + invalid_name = "name-with-hyphen" + valid_name = project_name + manager.prompt.side_effect = [invalid_name, valid_name] + mock_get_adapter.return_value = [project.adapter.type()] + + run_dbt(["init", "-s"]) + manager.assert_has_calls( + [ + call.prompt("Enter a name for your project (letters, digits, underscore)"), + call.prompt("Enter a name for your project (letters, digits, underscore)"), + ] + ) + + +class TestInitProvidedProjectNameAndSkipProfileSetup(TestInitOutsideOfProjectBase): + @mock.patch("dbt.task.init._get_adapter_plugin_names") + @mock.patch("click.confirm") + @mock.patch("click.prompt") + def test_init_provided_project_name_and_skip_profile_setup( + self, mock_prompt, mock_confirm, mock_get, project, project_name + ): + manager = mock.Mock() + manager.attach_mock(mock_prompt, "prompt") + manager.attach_mock(mock_confirm, "confirm") + manager.prompt.side_effect = [ + 1, + "localhost", + 5432, + "test_username", + "test_password", + "test_db", + "test_schema", + 4, + ] + mock_get.return_value = [project.adapter.type()] + + # provide project name through the init command + run_dbt(["init", project_name, "-s"]) + manager.assert_not_called() + + with open(os.path.join(project.project_root, project_name, "dbt_project.yml"), "r") as f: + assert ( + f.read() + == f""" +# Name your project! Project names should contain only lowercase characters +# and underscores. A good package name should reflect your organization's +# name or the intended use of these models +name: '{project_name}' +version: '1.0.0' +config-version: 2 + +# This setting configures which "profile" dbt uses for this project. +profile: '{project_name}' + +# These configurations specify where dbt should look for different types of files. +# The `model-paths` config, for example, states that models in this project can be +# found in the "models/" directory. You probably won't need to change these! +model-paths: ["models"] +analysis-paths: ["analyses"] +test-paths: ["tests"] +seed-paths: ["seeds"] +macro-paths: ["macros"] +snapshot-paths: ["snapshots"] + +target-path: "target" # directory which will store compiled SQL files +clean-targets: # directories to be removed by `dbt clean` + - "target" + - "dbt_packages" + + +# Configuring models +# Full documentation: https://docs.getdbt.com/docs/configuring-models + +# In this example config, we tell dbt to build all models in the example/ +# directory as views. These settings can be overridden in the individual model +# files using the `{{{{ config(...) }}}}` macro. +models: + {project_name}: + # Config indicated by + and applies to all files under models/example/ + example: + +materialized: view +""" + ) From db99e2f68ddf0512ff4099084fda6b745437dcda Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Wed, 25 Jan 2023 13:51:52 -0500 Subject: [PATCH 134/156] Event Clean-Up (#6716) * CT-1857: Event cleanup * Add changelog entry. --- .../Under the Hood-20230124-153553.yaml | 6 + core/dbt/events/proto_types.py | 175 +++--------------- core/dbt/events/types.proto | 137 +++----------- core/dbt/events/types.py | 145 ++++----------- core/dbt/parser/models.py | 61 +++--- tests/unit/test_events.py | 13 +- 6 files changed, 131 insertions(+), 406 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230124-153553.yaml diff --git a/.changes/unreleased/Under the Hood-20230124-153553.yaml b/.changes/unreleased/Under the Hood-20230124-153553.yaml new file mode 100644 index 00000000000..0a540d6da55 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230124-153553.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: Revised and simplified various structured logging events +time: 2023-01-24T15:35:53.065356-05:00 +custom: + Author: peterallenwebb + Issue: 6664 6665 6666 diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index ee11e01d172..e2a001be763 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -1,5 +1,5 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! -# sources: core/dbt/events/types.proto +# sources: types.proto # plugin: python-betterproto from dataclasses import dataclass from datetime import datetime @@ -1083,123 +1083,6 @@ class ParsedFileLoadFailedMsg(betterproto.Message): data: "ParsedFileLoadFailed" = betterproto.message_field(2) -@dataclass -class StaticParserCausedJinjaRendering(betterproto.Message): - """I031""" - - path: str = betterproto.string_field(1) - - -@dataclass -class StaticParserCausedJinjaRenderingMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "StaticParserCausedJinjaRendering" = betterproto.message_field(2) - - -@dataclass -class UsingExperimentalParser(betterproto.Message): - """I032""" - - path: str = betterproto.string_field(1) - - -@dataclass -class UsingExperimentalParserMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "UsingExperimentalParser" = betterproto.message_field(2) - - -@dataclass -class SampleFullJinjaRendering(betterproto.Message): - """I033""" - - path: str = betterproto.string_field(1) - - -@dataclass -class SampleFullJinjaRenderingMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "SampleFullJinjaRendering" = betterproto.message_field(2) - - -@dataclass -class StaticParserFallbackJinjaRendering(betterproto.Message): - """I034""" - - path: str = betterproto.string_field(1) - - -@dataclass -class StaticParserFallbackJinjaRenderingMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "StaticParserFallbackJinjaRendering" = betterproto.message_field(2) - - -@dataclass -class StaticParsingMacroOverrideDetected(betterproto.Message): - """I035""" - - path: str = betterproto.string_field(1) - - -@dataclass -class StaticParsingMacroOverrideDetectedMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "StaticParsingMacroOverrideDetected" = betterproto.message_field(2) - - -@dataclass -class StaticParserSuccess(betterproto.Message): - """I036""" - - path: str = betterproto.string_field(1) - - -@dataclass -class StaticParserSuccessMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "StaticParserSuccess" = betterproto.message_field(2) - - -@dataclass -class StaticParserFailure(betterproto.Message): - """I037""" - - path: str = betterproto.string_field(1) - - -@dataclass -class StaticParserFailureMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "StaticParserFailure" = betterproto.message_field(2) - - -@dataclass -class ExperimentalParserSuccess(betterproto.Message): - """I038""" - - path: str = betterproto.string_field(1) - - -@dataclass -class ExperimentalParserSuccessMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "ExperimentalParserSuccess" = betterproto.message_field(2) - - -@dataclass -class ExperimentalParserFailure(betterproto.Message): - """I039""" - - path: str = betterproto.string_field(1) - - -@dataclass -class ExperimentalParserFailureMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "ExperimentalParserFailure" = betterproto.message_field(2) - - @dataclass class PartialParsingEnabled(betterproto.Message): """I040""" @@ -1408,6 +1291,34 @@ class JinjaLogWarningMsg(betterproto.Message): data: "JinjaLogWarning" = betterproto.message_field(2) +@dataclass +class JinjaLogInfo(betterproto.Message): + """I062""" + + node_info: "NodeInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(2) + + +@dataclass +class JinjaLogInfoMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "JinjaLogInfo" = betterproto.message_field(2) + + +@dataclass +class JinjaLogDebug(betterproto.Message): + """I063""" + + node_info: "NodeInfo" = betterproto.message_field(1) + msg: str = betterproto.string_field(2) + + +@dataclass +class JinjaLogDebugMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "JinjaLogDebug" = betterproto.message_field(2) + + @dataclass class GitSparseCheckoutSubdirectory(betterproto.Message): """M001""" @@ -1542,34 +1453,6 @@ class SelectorReportInvalidSelectorMsg(betterproto.Message): data: "SelectorReportInvalidSelector" = betterproto.message_field(2) -@dataclass -class JinjaLogInfo(betterproto.Message): - """M011""" - - node_info: "NodeInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) - - -@dataclass -class JinjaLogInfoMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "JinjaLogInfo" = betterproto.message_field(2) - - -@dataclass -class JinjaLogDebug(betterproto.Message): - """M012""" - - node_info: "NodeInfo" = betterproto.message_field(1) - msg: str = betterproto.string_field(2) - - -@dataclass -class JinjaLogDebugMsg(betterproto.Message): - info: "EventInfo" = betterproto.message_field(1) - data: "JinjaLogDebug" = betterproto.message_field(2) - - @dataclass class DepsNoPackagesFound(betterproto.Message): """M013""" diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 2870b1bc610..8f49d5afd8f 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -863,98 +863,7 @@ message ParsedFileLoadFailedMsg { ParsedFileLoadFailed data = 2; } -// Skipping I030 - - -// I031 -message StaticParserCausedJinjaRendering { - string path = 1; -} - -message StaticParserCausedJinjaRenderingMsg { - EventInfo info = 1; - StaticParserCausedJinjaRendering data = 2; -} - -// I032 -message UsingExperimentalParser { - string path = 1; -} - -message UsingExperimentalParserMsg { - EventInfo info = 1; - UsingExperimentalParser data = 2; -} - -// I033 -message SampleFullJinjaRendering { - string path = 1; -} - -message SampleFullJinjaRenderingMsg { - EventInfo info = 1; - SampleFullJinjaRendering data = 2; -} - -// I034 -message StaticParserFallbackJinjaRendering { - string path = 1; -} - -message StaticParserFallbackJinjaRenderingMsg { - EventInfo info = 1; - StaticParserFallbackJinjaRendering data = 2; -} - -// I035 -message StaticParsingMacroOverrideDetected { - string path = 1; -} - -message StaticParsingMacroOverrideDetectedMsg { - EventInfo info = 1; - StaticParsingMacroOverrideDetected data = 2; -} - -// I036 -message StaticParserSuccess { - string path = 1; -} - -message StaticParserSuccessMsg { - EventInfo info = 1; - StaticParserSuccess data = 2; -} - -// I037 -message StaticParserFailure { - string path = 1; -} - -message StaticParserFailureMsg { - EventInfo info = 1; - StaticParserFailure data = 2; -} - -// I038 -message ExperimentalParserSuccess { - string path = 1; -} - -message ExperimentalParserSuccessMsg { - EventInfo info = 1; - ExperimentalParserSuccess data = 2; -} - -// I039 -message ExperimentalParserFailure { - string path = 1; -} - -message ExperimentalParserFailureMsg { - EventInfo info = 1; - ExperimentalParserFailure data = 2; -} +// Skipping I030 - I039 // I040 message PartialParsingEnabled { @@ -1124,6 +1033,28 @@ message JinjaLogWarningMsg { JinjaLogWarning data = 2; } +// I062 +message JinjaLogInfo { + NodeInfo node_info = 1; + string msg = 2; +} + +message JinjaLogInfoMsg { + EventInfo info = 1; + JinjaLogInfo data = 2; +} + +// I063 +message JinjaLogDebug { + NodeInfo node_info = 1; + string msg = 2; +} + +message JinjaLogDebugMsg { + EventInfo info = 1; + JinjaLogDebug data = 2; +} + // M - Deps generation // M001 @@ -1230,27 +1161,7 @@ message SelectorReportInvalidSelectorMsg { SelectorReportInvalidSelector data = 2; } -// M011 -message JinjaLogInfo { - NodeInfo node_info = 1; - string msg = 2; -} - -message JinjaLogInfoMsg { - EventInfo info = 1; - JinjaLogInfo data = 2; -} - -// M012 -message JinjaLogDebug { - NodeInfo node_info = 1; - string msg = 2; -} - -message JinjaLogDebugMsg { - EventInfo info = 1; - JinjaLogDebug data = 2; -} +// Skipped M011 and M012 // M013 message DepsNoPackagesFound { diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index ab2d090a93a..ab212a6c022 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -864,90 +864,7 @@ def message(self) -> str: return f"Failed to load parsed file from disk at {self.path}: {self.exc}" -# Skipped I030 - - -@dataclass -class StaticParserCausedJinjaRendering(DebugLevel, pt.StaticParserCausedJinjaRendering): - def code(self): - return "I031" - - def message(self) -> str: - return f"1605: jinja rendering because of STATIC_PARSER flag. file: {self.path}" - - -# TODO: Experimental/static parser uses these for testing and some may be a good use case for -# the `TestLevel` logger once we implement it. Some will probably stay `DebugLevel`. -@dataclass -class UsingExperimentalParser(DebugLevel, pt.UsingExperimentalParser): - def code(self): - return "I032" - - def message(self) -> str: - return f"1610: conducting experimental parser sample on {self.path}" - - -@dataclass -class SampleFullJinjaRendering(DebugLevel, pt.SampleFullJinjaRendering): - def code(self): - return "I033" - - def message(self) -> str: - return f"1611: conducting full jinja rendering sample on {self.path}" - - -@dataclass -class StaticParserFallbackJinjaRendering(DebugLevel, pt.StaticParserFallbackJinjaRendering): - def code(self): - return "I034" - - def message(self) -> str: - return f"1602: parser fallback to jinja rendering on {self.path}" - - -@dataclass -class StaticParsingMacroOverrideDetected(DebugLevel, pt.StaticParsingMacroOverrideDetected): - def code(self): - return "I035" - - def message(self) -> str: - return f"1601: detected macro override of ref/source/config in the scope of {self.path}" - - -@dataclass -class StaticParserSuccess(DebugLevel, pt.StaticParserSuccess): - def code(self): - return "I036" - - def message(self) -> str: - return f"1699: static parser successfully parsed {self.path}" - - -@dataclass -class StaticParserFailure(DebugLevel, pt.StaticParserFailure): - def code(self): - return "I037" - - def message(self) -> str: - return f"1603: static parser failed on {self.path}" - - -@dataclass -class ExperimentalParserSuccess(DebugLevel, pt.ExperimentalParserSuccess): - def code(self): - return "I038" - - def message(self) -> str: - return f"1698: experimental parser successfully parsed {self.path}" - - -@dataclass -class ExperimentalParserFailure(DebugLevel, pt.ExperimentalParserFailure): - def code(self): - return "I039" - - def message(self) -> str: - return f"1604: experimental parser failed on {self.path}" +# Skipped I030-I039 @dataclass @@ -1162,6 +1079,26 @@ def message(self) -> str: return self.msg +@dataclass +class JinjaLogInfo(InfoLevel, EventStringFunctor, pt.JinjaLogInfo): + def code(self): + return "I062" + + def message(self) -> str: + # This is for the log method used in macros so msg cannot be built here + return self.msg + + +@dataclass +class JinjaLogDebug(DebugLevel, EventStringFunctor, pt.JinjaLogDebug): + def code(self): + return "I063" + + def message(self) -> str: + # This is for the log method used in macros so msg cannot be built here + return self.msg + + # ======================================================= # M - Deps generation # ======================================================= @@ -1173,7 +1110,7 @@ def code(self): return "M001" def message(self) -> str: - return f" Subdirectory specified: {self.subdir}, using sparse checkout." + return f"Subdirectory specified: {self.subdir}, using sparse checkout." @dataclass @@ -1182,7 +1119,7 @@ def code(self): return "M002" def message(self) -> str: - return f" Checking out revision {self.revision}." + return f"Checking out revision {self.revision}." @dataclass @@ -1218,7 +1155,7 @@ def code(self): return "M006" def message(self) -> str: - return f" Updated checkout from {self.start_sha} to {self.end_sha}." + return f"Updated checkout from {self.start_sha} to {self.end_sha}." @dataclass @@ -1227,7 +1164,7 @@ def code(self): return "M007" def message(self) -> str: - return f" Checked out at {self.end_sha}." + return f"Checked out at {self.end_sha}." @dataclass @@ -1260,26 +1197,6 @@ def message(self) -> str: ) -@dataclass -class JinjaLogInfo(InfoLevel, EventStringFunctor, pt.JinjaLogInfo): - def code(self): - return "M011" - - def message(self) -> str: - # This is for the log method used in macros so msg cannot be built here - return self.msg - - -@dataclass -class JinjaLogDebug(DebugLevel, EventStringFunctor, pt.JinjaLogDebug): - def code(self): - return "M012" - - def message(self) -> str: - # This is for the log method used in macros so msg cannot be built here - return self.msg - - @dataclass class DepsNoPackagesFound(InfoLevel, pt.DepsNoPackagesFound): def code(self): @@ -1304,7 +1221,7 @@ def code(self): return "M015" def message(self) -> str: - return f" Installed from {self.version_name}" + return f"Installed from {self.version_name}" @dataclass @@ -1313,7 +1230,7 @@ def code(self): return "M016" def message(self) -> str: - return f" Updated version available: {self.version_latest}" + return f"Updated version available: {self.version_latest}" @dataclass @@ -1322,7 +1239,7 @@ def code(self): return "M017" def message(self) -> str: - return " Up to date!" + return "Up to date!" @dataclass @@ -1331,7 +1248,7 @@ def code(self): return "M018" def message(self) -> str: - return f" and subdirectory {self.subdirectory}" + return f"and subdirectory {self.subdirectory}" @dataclass @@ -2262,7 +2179,7 @@ def code(self): return "Z037" def message(self) -> str: - return " Creating symlink to local dependency." + return "Creating symlink to local dependency." @dataclass @@ -2271,7 +2188,7 @@ def code(self): return "Z038" def message(self) -> str: - return " Symlinks are not available on this OS, copying dependency." + return "Symlinks are not available on this OS, copying dependency." @dataclass diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index 597200abba5..710df10f145 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -1,19 +1,10 @@ from copy import deepcopy from dbt.context.context_config import ContextConfig from dbt.contracts.graph.nodes import ModelNode -import dbt.flags as flags +from dbt.events.base_types import EventLevel +from dbt.events.types import Note from dbt.events.functions import fire_event -from dbt.events.types import ( - StaticParserCausedJinjaRendering, - UsingExperimentalParser, - SampleFullJinjaRendering, - StaticParserFallbackJinjaRendering, - StaticParsingMacroOverrideDetected, - StaticParserSuccess, - StaticParserFailure, - ExperimentalParserSuccess, - ExperimentalParserFailure, -) +import dbt.flags as flags from dbt.node_types import NodeType, ModelLanguage from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock @@ -261,7 +252,10 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None: elif not flags.STATIC_PARSER: # jinja rendering super().render_update(node, config) - fire_event(StaticParserCausedJinjaRendering(path=node.path)) + fire_event( + Note(f"1605: jinja rendering because of STATIC_PARSER flag. file: {node.path}"), + EventLevel.DEBUG, + ) return # only sample for experimental parser correctness on normal runs, @@ -295,7 +289,10 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None: # sample the experimental parser only during a normal run if exp_sample and not flags.USE_EXPERIMENTAL_PARSER: - fire_event(UsingExperimentalParser(path=node.path)) + fire_event( + Note(f"1610: conducting experimental parser sample on {node.path}"), + EventLevel.DEBUG, + ) experimental_sample = self.run_experimental_parser(node) # if the experimental parser succeeded, make a full copy of model parser # and populate _everything_ into it so it can be compared apples-to-apples @@ -325,7 +322,10 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None: # sampling rng here, but the effect would be the same since we would only roll # it 40% of the time. So I've opted to keep all the rng code colocated above. if stable_sample and not flags.USE_EXPERIMENTAL_PARSER: - fire_event(SampleFullJinjaRendering(path=node.path)) + fire_event( + Note(f"1611: conducting full jinja rendering sample on {node.path}"), + EventLevel.DEBUG, + ) # if this will _never_ mutate anything `self` we could avoid these deep copies, # but we can't really guarantee that going forward. model_parser_copy = self.partial_deepcopy() @@ -360,7 +360,9 @@ def render_update(self, node: ModelNode, config: ContextConfig) -> None: else: # jinja rendering super().render_update(node, config) - fire_event(StaticParserFallbackJinjaRendering(path=node.path)) + fire_event( + Note(f"1602: parser fallback to jinja rendering on {node.path}"), EventLevel.DEBUG + ) # if sampling, add the correct messages for tracking if exp_sample and isinstance(experimental_sample, str): @@ -396,19 +398,26 @@ def run_static_parser(self, node: ModelNode) -> Optional[Union[str, Dict[str, Li # this log line is used for integration testing. If you change # the code at the beginning of the line change the tests in # test/integration/072_experimental_parser_tests/test_all_experimental_parser.py - fire_event(StaticParsingMacroOverrideDetected(path=node.path)) + fire_event( + Note( + f"1601: detected macro override of ref/source/config in the scope of {node.path}" + ), + EventLevel.DEBUG, + ) return "has_banned_macro" # run the stable static parser and return the results try: statically_parsed = py_extract_from_source(node.raw_code) - fire_event(StaticParserSuccess(path=node.path)) + fire_event( + Note(f"1699: static parser successfully parsed {node.path}"), EventLevel.DEBUG + ) return _shift_sources(statically_parsed) # if we want information on what features are barring the static # parser from reading model files, this is where we would add that # since that information is stored in the `ExtractionError`. except ExtractionError: - fire_event(StaticParserFailure(path=node.path)) + fire_event(Note(f"1603: static parser failed on {node.path}"), EventLevel.DEBUG) return "cannot_parse" def run_experimental_parser( @@ -419,7 +428,12 @@ def run_experimental_parser( # this log line is used for integration testing. If you change # the code at the beginning of the line change the tests in # test/integration/072_experimental_parser_tests/test_all_experimental_parser.py - fire_event(StaticParsingMacroOverrideDetected(path=node.path)) + fire_event( + Note( + f"1601: detected macro override of ref/source/config in the scope of {node.path}" + ), + EventLevel.DEBUG, + ) return "has_banned_macro" # run the experimental parser and return the results @@ -428,13 +442,16 @@ def run_experimental_parser( # experimental features. Change `py_extract_from_source` to the new # experimental call when we add additional features. experimentally_parsed = py_extract_from_source(node.raw_code) - fire_event(ExperimentalParserSuccess(path=node.path)) + fire_event( + Note(f"1698: experimental parser successfully parsed {node.path}"), + EventLevel.DEBUG, + ) return _shift_sources(experimentally_parsed) # if we want information on what features are barring the experimental # parser from reading model files, this is where we would add that # since that information is stored in the `ExtractionError`. except ExtractionError: - fire_event(ExperimentalParserFailure(path=node.path)) + fire_event(Note(f"1604: experimental parser failed on {node.path}"), EventLevel.DEBUG) return "cannot_parse" # checks for banned macros diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index 529a11f5ed9..b8444184473 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -187,15 +187,6 @@ def test_event_codes(self): UnableToPartialParse(reason="something went wrong"), PartialParsingNotEnabled(), ParsedFileLoadFailed(path="", exc="", exc_info=""), - StaticParserCausedJinjaRendering(path=""), - UsingExperimentalParser(path=""), - SampleFullJinjaRendering(path=""), - StaticParserFallbackJinjaRendering(path=""), - StaticParsingMacroOverrideDetected(path=""), - StaticParserSuccess(path=""), - StaticParserFailure(path=""), - ExperimentalParserSuccess(path=""), - ExperimentalParserFailure(path=""), PartialParsingEnabled(deleted=0, added=0, changed=0), PartialParsingFile(file_id=""), InvalidDisabledTargetInTestNode( @@ -227,6 +218,8 @@ def test_event_codes(self): disabled="", ), JinjaLogWarning(), + JinjaLogInfo(msg=""), + JinjaLogDebug(msg=""), # M - Deps generation ====================== GitSparseCheckoutSubdirectory(subdir=""), GitProgressCheckoutRevision(revision=""), @@ -238,8 +231,6 @@ def test_event_codes(self): RegistryProgressGETRequest(url=""), RegistryProgressGETResponse(url="", resp_code=1234), SelectorReportInvalidSelector(valid_selectors="", spec_method="", raw_spec=""), - JinjaLogInfo(msg=""), - JinjaLogDebug(msg=""), DepsNoPackagesFound(), DepsStartPackageInstall(package_name=""), DepsInstallInfo(version_name=""), From da47b90503453fd766cbc79a0607d2308c03ebb6 Mon Sep 17 00:00:00 2001 From: Matthew McKnight <91097623+McKnight-42@users.noreply.github.com> Date: Wed, 25 Jan 2023 14:57:16 -0600 Subject: [PATCH 135/156] [CT-1630] Convert Column_types tests (#6690) * init commit for column_types test conversion * init start of test_column_types.py * pass tes macros into both tests * remove alt tests, remove old tests, push up working conversion * rename base class, move to adapter zone so adapters can use * typo fix --- .../macros/test_alter_column_type.sql | 5 --- .../056_column_type_tests/pg_models/model.sql | 9 ---- .../pg_models/schema.yml | 14 ------- .../test_alter_column_types.py | 13 ------ .../test_column_types.py | 22 ---------- .../tests/adapter/column_types/fixtures.py | 41 +++++++++++++++++++ .../adapter/column_types/test_column_types.py | 24 +++++++++++ 7 files changed, 65 insertions(+), 63 deletions(-) delete mode 100644 test/integration/056_column_type_tests/macros/test_alter_column_type.sql delete mode 100644 test/integration/056_column_type_tests/pg_models/model.sql delete mode 100644 test/integration/056_column_type_tests/pg_models/schema.yml delete mode 100644 test/integration/056_column_type_tests/test_alter_column_types.py delete mode 100644 test/integration/056_column_type_tests/test_column_types.py rename test/integration/056_column_type_tests/macros/test_is_type.sql => tests/adapter/dbt/tests/adapter/column_types/fixtures.py (73%) create mode 100644 tests/adapter/dbt/tests/adapter/column_types/test_column_types.py diff --git a/test/integration/056_column_type_tests/macros/test_alter_column_type.sql b/test/integration/056_column_type_tests/macros/test_alter_column_type.sql deleted file mode 100644 index 133d59fada5..00000000000 --- a/test/integration/056_column_type_tests/macros/test_alter_column_type.sql +++ /dev/null @@ -1,5 +0,0 @@ --- Macro to alter a column type -{% macro test_alter_column_type(model_name, column_name, new_column_type) %} - {% set relation = ref(model_name) %} - {{ alter_column_type(relation, column_name, new_column_type) }} -{% endmacro %} diff --git a/test/integration/056_column_type_tests/pg_models/model.sql b/test/integration/056_column_type_tests/pg_models/model.sql deleted file mode 100644 index f1b877225a9..00000000000 --- a/test/integration/056_column_type_tests/pg_models/model.sql +++ /dev/null @@ -1,9 +0,0 @@ -select - 1::smallint as smallint_col, - 2::integer as int_col, - 3::bigint as bigint_col, - 4.0::real as real_col, - 5.0::double precision as double_col, - 6.0::numeric as numeric_col, - '7'::text as text_col, - '8'::varchar(20) as varchar_col diff --git a/test/integration/056_column_type_tests/pg_models/schema.yml b/test/integration/056_column_type_tests/pg_models/schema.yml deleted file mode 100644 index 93e309d1b0b..00000000000 --- a/test/integration/056_column_type_tests/pg_models/schema.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: 2 -models: - - name: model - tests: - - is_type: - column_map: - smallint_col: ['integer', 'number'] - int_col: ['integer', 'number'] - bigint_col: ['integer', 'number'] - real_col: ['float', 'number'] - double_col: ['float', 'number'] - numeric_col: ['numeric', 'number'] - text_col: ['string', 'not number'] - varchar_col: ['string', 'not number'] diff --git a/test/integration/056_column_type_tests/test_alter_column_types.py b/test/integration/056_column_type_tests/test_alter_column_types.py deleted file mode 100644 index e06e1f5697c..00000000000 --- a/test/integration/056_column_type_tests/test_alter_column_types.py +++ /dev/null @@ -1,13 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import yaml - - -class TestAlterColumnTypes(DBTIntegrationTest): - @property - def schema(self): - return '056_alter_column_types' - - def run_and_alter_and_test(self, alter_column_type_args): - self.assertEqual(len(self.run_dbt(['run'])), 1) - self.run_dbt(['run-operation', 'test_alter_column_type', '--args', alter_column_type_args]) - self.assertEqual(len(self.run_dbt(['test'])), 1) diff --git a/test/integration/056_column_type_tests/test_column_types.py b/test/integration/056_column_type_tests/test_column_types.py deleted file mode 100644 index 66abbb4c970..00000000000 --- a/test/integration/056_column_type_tests/test_column_types.py +++ /dev/null @@ -1,22 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestColumnTypes(DBTIntegrationTest): - @property - def schema(self): - return '056_column_types' - - def run_and_test(self): - self.assertEqual(len(self.run_dbt(['run'])), 1) - self.assertEqual(len(self.run_dbt(['test'])), 1) - - -class TestPostgresColumnTypes(TestColumnTypes): - @property - def models(self): - return 'pg_models' - - @use_profile('postgres') - def test_postgres_column_types(self): - self.run_and_test() - diff --git a/test/integration/056_column_type_tests/macros/test_is_type.sql b/tests/adapter/dbt/tests/adapter/column_types/fixtures.py similarity index 73% rename from test/integration/056_column_type_tests/macros/test_is_type.sql rename to tests/adapter/dbt/tests/adapter/column_types/fixtures.py index 2f1ffde2b1e..97a61c2b6f5 100644 --- a/test/integration/056_column_type_tests/macros/test_is_type.sql +++ b/tests/adapter/dbt/tests/adapter/column_types/fixtures.py @@ -1,4 +1,13 @@ +# macros +macro_test_alter_column_type = """ +-- Macro to alter a column type +{% macro test_alter_column_type(model_name, column_name, new_column_type) %} + {% set relation = ref(model_name) %} + {{ alter_column_type(relation, column_name, new_column_type) }} +{% endmacro %} +""" +macro_test_is_type_sql = """ {% macro simple_type_check_column(column, check) %} {% if check == 'string' %} {{ return(column.is_string()) }} @@ -70,3 +79,35 @@ {% endfor %} select * from (select 1 limit 0) as nothing {% endtest %} +""" + +# models/schema + +model_sql = """ +select + 1::smallint as smallint_col, + 2::integer as int_col, + 3::bigint as bigint_col, + 4.0::real as real_col, + 5.0::double precision as double_col, + 6.0::numeric as numeric_col, + '7'::text as text_col, + '8'::varchar(20) as varchar_col +""" + +schema_yml = """ +version: 2 +models: + - name: model + tests: + - is_type: + column_map: + smallint_col: ['integer', 'number'] + int_col: ['integer', 'number'] + bigint_col: ['integer', 'number'] + real_col: ['float', 'number'] + double_col: ['float', 'number'] + numeric_col: ['numeric', 'number'] + text_col: ['string', 'not number'] + varchar_col: ['string', 'not number'] +""" diff --git a/tests/adapter/dbt/tests/adapter/column_types/test_column_types.py b/tests/adapter/dbt/tests/adapter/column_types/test_column_types.py new file mode 100644 index 00000000000..cc213d36a4b --- /dev/null +++ b/tests/adapter/dbt/tests/adapter/column_types/test_column_types.py @@ -0,0 +1,24 @@ +import pytest +from dbt.tests.util import run_dbt +from dbt.tests.adapter.column_types.fixtures import macro_test_is_type_sql, model_sql, schema_yml + + +class BaseColumnTypes: + @pytest.fixture(scope="class") + def macros(self): + return {"test_is_type.sql": macro_test_is_type_sql} + + def run_and_test(self): + results = run_dbt(["run"]) + assert len(results) == 1 + results = run_dbt(["test"]) + assert len(results) == 1 + + +class TestPostgresColumnTypes(BaseColumnTypes): + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": model_sql, "schema.yml": schema_yml} + + def test_run_and_test(self, project): + self.run_and_test() From a34521ec0708f55817be7f18eda059794e213814 Mon Sep 17 00:00:00 2001 From: Gerda Shank Date: Wed, 25 Jan 2023 17:47:45 -0500 Subject: [PATCH 136/156] CT 1894 log partial parsing var changes and sort cli vars before hashing (#6713) * Log information about vars_hash, normalize cli_vars before hashing * Changie * Add to test_events.py * Update core/dbt/events/types.py Co-authored-by: Doug Beatty <44704949+dbeatty10@users.noreply.github.com> --- .../unreleased/Fixes-20230124-141943.yaml | 6 +++++ core/dbt/events/proto_types.py | 17 ++++++++++++++ core/dbt/events/types.proto | 16 ++++++++++++- core/dbt/events/types.py | 9 ++++++++ core/dbt/parser/manifest.py | 23 ++++++++++++++++++- tests/unit/test_events.py | 1 + 6 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 .changes/unreleased/Fixes-20230124-141943.yaml diff --git a/.changes/unreleased/Fixes-20230124-141943.yaml b/.changes/unreleased/Fixes-20230124-141943.yaml new file mode 100644 index 00000000000..4b85413de58 --- /dev/null +++ b/.changes/unreleased/Fixes-20230124-141943.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: Sort cli vars before hashing for partial parsing +time: 2023-01-24T14:19:43.333628-05:00 +custom: + Author: gshank + Issue: "6710" diff --git a/core/dbt/events/proto_types.py b/core/dbt/events/proto_types.py index e2a001be763..84c01cac101 100644 --- a/core/dbt/events/proto_types.py +++ b/core/dbt/events/proto_types.py @@ -1055,6 +1055,23 @@ class UnableToPartialParseMsg(betterproto.Message): data: "UnableToPartialParse" = betterproto.message_field(2) +@dataclass +class StateCheckVarsHash(betterproto.Message): + """I025""" + + checksum: str = betterproto.string_field(1) + vars: str = betterproto.string_field(2) + profile: str = betterproto.string_field(3) + target: str = betterproto.string_field(4) + version: str = betterproto.string_field(5) + + +@dataclass +class StateCheckVarsHashMsg(betterproto.Message): + info: "EventInfo" = betterproto.message_field(1) + data: "StateCheckVarsHash" = betterproto.message_field(2) + + @dataclass class PartialParsingNotEnabled(betterproto.Message): """I028""" diff --git a/core/dbt/events/types.proto b/core/dbt/events/types.proto index 8f49d5afd8f..1819f357a2b 100644 --- a/core/dbt/events/types.proto +++ b/core/dbt/events/types.proto @@ -839,7 +839,21 @@ message UnableToPartialParseMsg { UnableToPartialParse data = 2; } -// Skipped I025, I026, I027 +// I025 +message StateCheckVarsHash { + string checksum = 1; + string vars = 2; + string profile = 3; + string target = 4; + string version = 5; +} + +message StateCheckVarsHashMsg { + EventInfo info = 1; + StateCheckVarsHash data = 2; +} + +// Skipped I026, I027 // I028 diff --git a/core/dbt/events/types.py b/core/dbt/events/types.py index ab212a6c022..0b005b65b25 100644 --- a/core/dbt/events/types.py +++ b/core/dbt/events/types.py @@ -843,6 +843,15 @@ def message(self) -> str: return f"Unable to do partial parsing because {self.reason}" +@dataclass +class StateCheckVarsHash(DebugLevel, pt.StateCheckVarsHash): + def code(self): + return "I025" + + def message(self) -> str: + return f"checksum: {self.checksum}, vars: {self.vars}, profile: {self.profile}, target: {self.target}, version: {self.version}" + + # Skipped I025, I026, I026, I027 diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index fbfada4fc2a..903852f6ed7 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -8,6 +8,7 @@ from itertools import chain import time from dbt.events.base_types import EventLevel +import pprint import dbt.exceptions import dbt.tracking @@ -29,6 +30,8 @@ ParsedFileLoadFailed, InvalidDisabledTargetInTestNode, NodeNotFoundOrDisabled, + StateCheckVarsHash, + Note, ) from dbt.logger import DbtProcessState from dbt.node_types import NodeType @@ -569,6 +572,12 @@ def is_partial_parsable(self, manifest: Manifest) -> Tuple[bool, Optional[str]]: reason="config vars, config profile, or config target have changed" ) ) + fire_event( + Note( + msg=f"previous checksum: {self.manifest.state_check.vars_hash.checksum}, current checksum: {manifest.state_check.vars_hash.checksum}" + ), + level=EventLevel.DEBUG, + ) valid = False reparse_reason = ReparseReason.vars_changed if self.manifest.state_check.profile_hash != manifest.state_check.profile_hash: @@ -702,16 +711,28 @@ def build_manifest_state_check(self): # arg vars, but since any changes to that file will cause state_check # to not pass, it doesn't matter. If we move to more granular checking # of env_vars, that would need to change. + # We are using the parsed cli_vars instead of config.args.vars, in order + # to sort them and avoid reparsing because of ordering issues. + stringified_cli_vars = pprint.pformat(config.cli_vars) vars_hash = FileHash.from_contents( "\x00".join( [ - getattr(config.args, "vars", "{}") or "{}", + stringified_cli_vars, getattr(config.args, "profile", "") or "", getattr(config.args, "target", "") or "", __version__, ] ) ) + fire_event( + StateCheckVarsHash( + checksum=vars_hash.checksum, + vars=stringified_cli_vars, + profile=config.args.profile, + target=config.args.target, + version=__version__, + ) + ) # Create a FileHash of the env_vars in the project key_list = list(config.project_env_vars.keys()) diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index b8444184473..c7b9ca24d66 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -185,6 +185,7 @@ def test_event_codes(self): PartialParsingError(exc_info={}), PartialParsingSkipParsing(), UnableToPartialParse(reason="something went wrong"), + StateCheckVarsHash(vars="testing", target="testing", profile="testing"), PartialParsingNotEnabled(), ParsedFileLoadFailed(path="", exc="", exc_info=""), PartialParsingEnabled(deleted=0, added=0, changed=0), From b0651b13b521dc19166ef03ced79ffc639f22cfd Mon Sep 17 00:00:00 2001 From: Matthew Beall Date: Wed, 25 Jan 2023 17:51:34 -0700 Subject: [PATCH 137/156] change `exposure_content` to `source_content` (#6739) * change `exposure_content` to `source_content` * Adding changelog Co-authored-by: Leah Antkiewicz --- .changes/unreleased/Fixes-20230125-191739.yaml | 6 ++++++ core/dbt/contracts/util.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 .changes/unreleased/Fixes-20230125-191739.yaml diff --git a/.changes/unreleased/Fixes-20230125-191739.yaml b/.changes/unreleased/Fixes-20230125-191739.yaml new file mode 100644 index 00000000000..fff39574ed9 --- /dev/null +++ b/.changes/unreleased/Fixes-20230125-191739.yaml @@ -0,0 +1,6 @@ +kind: Fixes +body: '[Regression] exposure_content referenced incorrectly' +time: 2023-01-25T19:17:39.942081-05:00 +custom: + Author: Mathyoub + Issue: "6738" diff --git a/core/dbt/contracts/util.py b/core/dbt/contracts/util.py index fb2af2dac59..d8b166b1d93 100644 --- a/core/dbt/contracts/util.py +++ b/core/dbt/contracts/util.py @@ -283,7 +283,7 @@ def upgrade_manifest_json(manifest: dict) -> dict: if "root_path" in exposure_content: del exposure_content["root_path"] for source_content in manifest.get("sources", {}).values(): - if "root_path" in exposure_content: + if "root_path" in source_content: del source_content["root_path"] for macro_content in manifest.get("macros", {}).values(): if "root_path" in macro_content: From c65ba11ae6dca5203228320a7f6388ce1796f104 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Thu, 26 Jan 2023 00:54:00 -0800 Subject: [PATCH 138/156] Ct 1827/064 column comments tests conversion (#6654) * Convert test and make it a bit more pytest-onic * Ax old integration test. * Run black on test conversion * I didn't like how pytest was running the fixture so wrapped it into a closure. * Merge converted test into persist docs. Co-authored-by: Mila Page --- .../models/quote_model.sql | 1 - .../models/schema.yml | 9 -- .../test_column_comments.py | 43 -------- .../functional/persist_docs_tests/fixtures.py | 102 ++++++++++-------- .../persist_docs_tests/test_persist_docs.py | 44 ++++++++ 5 files changed, 103 insertions(+), 96 deletions(-) delete mode 100644 test/integration/064_column_comments_tests/models/quote_model.sql delete mode 100644 test/integration/064_column_comments_tests/models/schema.yml delete mode 100644 test/integration/064_column_comments_tests/test_column_comments.py diff --git a/test/integration/064_column_comments_tests/models/quote_model.sql b/test/integration/064_column_comments_tests/models/quote_model.sql deleted file mode 100644 index 2255b4bd7f0..00000000000 --- a/test/integration/064_column_comments_tests/models/quote_model.sql +++ /dev/null @@ -1 +0,0 @@ -select 1 as {{ adapter.quote("2id") }} diff --git a/test/integration/064_column_comments_tests/models/schema.yml b/test/integration/064_column_comments_tests/models/schema.yml deleted file mode 100644 index 1e82165fabf..00000000000 --- a/test/integration/064_column_comments_tests/models/schema.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 2 -models: - - name: quote_model - description: "model to test column quotes and comments" - columns: - - name: 2id - description: "XXX My description" - quote: true - diff --git a/test/integration/064_column_comments_tests/test_column_comments.py b/test/integration/064_column_comments_tests/test_column_comments.py deleted file mode 100644 index bd94b642cb6..00000000000 --- a/test/integration/064_column_comments_tests/test_column_comments.py +++ /dev/null @@ -1,43 +0,0 @@ -import json - -from test.integration.base import DBTIntegrationTest, use_profile - - -class TestColumnComment(DBTIntegrationTest): - @property - def schema(self): - return "column_comment_060" - - @property - def models(self): - return "models" - - @property - def project_config(self): - return { - 'config-version': 2, - 'models': { - 'test': { - 'materialized': 'table', - '+persist_docs': { - "relation": True, - "columns": True, - }, - } - } - } - - def run_has_comments(self): - self.run_dbt() - self.run_dbt(['docs', 'generate']) - with open('target/catalog.json') as fp: - catalog_data = json.load(fp) - assert 'nodes' in catalog_data - assert len(catalog_data['nodes']) == 1 - column_node = catalog_data['nodes']['model.test.quote_model'] - column_comment = column_node['columns']['2id']['comment'] - assert column_comment.startswith('XXX') - - @use_profile('postgres') - def test_postgres_comments(self): - self.run_has_comments() diff --git a/tests/functional/persist_docs_tests/fixtures.py b/tests/functional/persist_docs_tests/fixtures.py index c596f5219cf..f7179bb1ab5 100644 --- a/tests/functional/persist_docs_tests/fixtures.py +++ b/tests/functional/persist_docs_tests/fixtures.py @@ -1,3 +1,62 @@ +_MODELS__VIEW = """ +{{ config(materialized='view') }} +select 2 as id, 'Bob' as name +""" + +_MODELS__NO_DOCS_MODEL = """ +select 1 as id, 'Alice' as name +""" + +_DOCS__MY_FUN_DOCS = """ +{% docs my_fun_doc %} +name Column description "with double quotes" +and with 'single quotes' as welll as other; +'''abc123''' +reserved -- characters +-- +/* comment */ +Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting + +{% enddocs %} +""" + +_MODELS__TABLE = """ +{{ config(materialized='table') }} +select 1 as id, 'Joe' as name +""" + + +_MODELS__MISSING_COLUMN = """ +{{ config(materialized='table') }} +select 1 as id, 'Ed' as name +""" + +_MODELS__MODEL_USING_QUOTE_UTIL = """ +select 1 as {{ adapter.quote("2id") }} +""" + +_PROPERTIES__QUOTE_MODEL = """ +version: 2 +models: + - name: quote_model + description: "model to test column quotes and comments" + columns: + - name: 2id + description: "XXX My description" + quote: true +""" + +_PROPERITES__SCHEMA_MISSING_COL = """ +version: 2 +models: + - name: missing_column + columns: + - name: id + description: "test id column description" + - name: column_that_does_not_exist + description: "comment that cannot be created" +""" + _PROPERTIES__SCHEMA_YML = """ version: 2 @@ -71,49 +130,6 @@ {{ doc('my_fun_doc')}} """ -_MODELS__VIEW = """ -{{ config(materialized='view') }} -select 2 as id, 'Bob' as name -""" - -_MODELS__NO_DOCS_MODEL = """ -select 1 as id, 'Alice' as name -""" - -_DOCS__MY_FUN_DOCS = """ -{% docs my_fun_doc %} -name Column description "with double quotes" -and with 'single quotes' as welll as other; -'''abc123''' -reserved -- characters --- -/* comment */ -Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting - -{% enddocs %} -""" - -_MODELS__TABLE = """ -{{ config(materialized='table') }} -select 1 as id, 'Joe' as name -""" - - -_MODELS__MISSING_COLUMN = """ -{{ config(materialized='table') }} -select 1 as id, 'Ed' as name -""" - -_PROPERITES__SCHEMA_MISSING_COL = """ -version: 2 -models: - - name: missing_column - columns: - - name: id - description: "test id column description" - - name: column_that_does_not_exist - description: "comment that cannot be created" -""" _SEEDS__SEED = """id,name 1,Alice diff --git a/tests/functional/persist_docs_tests/test_persist_docs.py b/tests/functional/persist_docs_tests/test_persist_docs.py index 8c3822b497a..7ca5dcfabe8 100644 --- a/tests/functional/persist_docs_tests/test_persist_docs.py +++ b/tests/functional/persist_docs_tests/test_persist_docs.py @@ -9,9 +9,11 @@ from tests.functional.persist_docs_tests.fixtures import ( _DOCS__MY_FUN_DOCS, _MODELS__MISSING_COLUMN, + _MODELS__MODEL_USING_QUOTE_UTIL, _MODELS__NO_DOCS_MODEL, _MODELS__TABLE, _MODELS__VIEW, + _PROPERTIES__QUOTE_MODEL, _PROPERITES__SCHEMA_MISSING_COL, _PROPERTIES__SCHEMA_YML, _SEEDS__SEED, @@ -148,3 +150,45 @@ def test_postgres_missing_column(self, project): table_node = catalog_data["nodes"]["model.test.missing_column"] table_id_comment = table_node["columns"]["id"]["comment"] assert table_id_comment.startswith("test id column description") + + +class TestPersistDocsColumnComment: + @pytest.fixture(scope="class") + def models(self): + return {"quote_model.sql": _MODELS__MODEL_USING_QUOTE_UTIL} + + @pytest.fixture(scope="class") + def properties(self): + return {"properties.yml": _PROPERTIES__QUOTE_MODEL} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "models": { + "test": { + "materialized": "table", + "+persist_docs": { + "relation": True, + "columns": True, + }, + } + } + } + + @pytest.fixture(scope="class") + def run_has_comments(self, project): + def fixt(): + run_dbt() + run_dbt(["docs", "generate"]) + with open("target/catalog.json") as fp: + catalog_data = json.load(fp) + assert "nodes" in catalog_data + assert len(catalog_data["nodes"]) == 1 + column_node = catalog_data["nodes"]["model.test.quote_model"] + column_comment = column_node["columns"]["2id"]["comment"] + assert column_comment.startswith("XXX") + + return fixt + + def test_postgres_comments(self, run_has_comments): + run_has_comments() From c2c4757a2bc65a62ba85e76056d0eaedf76f455e Mon Sep 17 00:00:00 2001 From: Peter Webb Date: Thu, 26 Jan 2023 14:27:42 -0500 Subject: [PATCH 139/156] Graph Analysis Optimization for Large Dags (#6720) * Optimization to remove graph analysis bottleneck in large dags. * Add changelog entry. --- .../unreleased/Under the Hood-20230126-135939.yaml | 6 ++++++ core/dbt/graph/queue.py | 11 ++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) create mode 100644 .changes/unreleased/Under the Hood-20230126-135939.yaml diff --git a/.changes/unreleased/Under the Hood-20230126-135939.yaml b/.changes/unreleased/Under the Hood-20230126-135939.yaml new file mode 100644 index 00000000000..091f0a65864 --- /dev/null +++ b/.changes/unreleased/Under the Hood-20230126-135939.yaml @@ -0,0 +1,6 @@ +kind: Under the Hood +body: ' Optimized GraphQueue to remove graph analysis bottleneck in large dags.' +time: 2023-01-26T13:59:39.518345-05:00 +custom: + Author: peterallenwebb + Issue: "6759" diff --git a/core/dbt/graph/queue.py b/core/dbt/graph/queue.py index dd439faf37e..a21a9afc630 100644 --- a/core/dbt/graph/queue.py +++ b/core/dbt/graph/queue.py @@ -40,7 +40,7 @@ def __init__(self, graph: nx.DiGraph, manifest: Manifest, selected: Set[UniqueId # store the 'score' of each node as a number. Lower is higher priority. self._scores = self._get_scores(self.graph) # populate the initial queue - self._find_new_additions() + self._find_new_additions(list(self.graph.nodes())) # awaits after task end self.some_task_done = threading.Condition(self.lock) @@ -156,12 +156,12 @@ def _already_known(self, node: UniqueId) -> bool: """ return node in self.in_progress or node in self.queued - def _find_new_additions(self) -> None: + def _find_new_additions(self, candidates) -> None: """Find any nodes in the graph that need to be added to the internal queue and add them. """ - for node, in_degree in self.graph.in_degree(): - if in_degree == 0 and not self._already_known(node): + for node in candidates: + if self.graph.in_degree(node) == 0 and not self._already_known(node): self.inner.put((self._scores[node], node)) self.queued.add(node) @@ -174,8 +174,9 @@ def mark_done(self, node_id: UniqueId) -> None: """ with self.lock: self.in_progress.remove(node_id) + successors = list(self.graph.successors(node_id)) self.graph.remove_node(node_id) - self._find_new_additions() + self._find_new_additions(successors) self.inner.task_done() self.some_task_done.notify_all() From 3f96fad4f95e44d21834a61b1084b9bf2f34d840 Mon Sep 17 00:00:00 2001 From: Mila Page <67295367+VersusFacit@users.noreply.github.com> Date: Thu, 26 Jan 2023 12:23:02 -0800 Subject: [PATCH 140/156] Ct 1629/052 column quoting tests conversion (#6652) * Test converted and reformatted for pytest. * Ax old versions of 052 test * Nix the 'os' import and black format * Change names of models to be more PEP like * cleanup code Co-authored-by: Mila Page --- .../models-unquoted/model.sql | 12 --- .../052_column_quoting_tests/models/model.sql | 12 --- .../052_column_quoting_tests/seeds/seed.csv | 4 - .../test_column_quotes.py | 78 -------------- .../column_quoting/test_column_quotes.py | 100 ++++++++++++++++++ 5 files changed, 100 insertions(+), 106 deletions(-) delete mode 100644 test/integration/052_column_quoting_tests/models-unquoted/model.sql delete mode 100644 test/integration/052_column_quoting_tests/models/model.sql delete mode 100644 test/integration/052_column_quoting_tests/seeds/seed.csv delete mode 100644 test/integration/052_column_quoting_tests/test_column_quotes.py create mode 100644 tests/functional/column_quoting/test_column_quotes.py diff --git a/test/integration/052_column_quoting_tests/models-unquoted/model.sql b/test/integration/052_column_quoting_tests/models-unquoted/model.sql deleted file mode 100644 index 1bdcda38353..00000000000 --- a/test/integration/052_column_quoting_tests/models-unquoted/model.sql +++ /dev/null @@ -1,12 +0,0 @@ -{% set col_a = '"col_a"' %} -{% set col_b = '"col_b"' %} - -{{config( - materialized = 'incremental', - unique_key = col_a, - incremental_strategy = var('strategy') - )}} - -select -{{ col_a }}, {{ col_b }} -from {{ref('seed')}} diff --git a/test/integration/052_column_quoting_tests/models/model.sql b/test/integration/052_column_quoting_tests/models/model.sql deleted file mode 100644 index 3bc61e082d9..00000000000 --- a/test/integration/052_column_quoting_tests/models/model.sql +++ /dev/null @@ -1,12 +0,0 @@ -{% set col_a = '"col_A"' %} -{% set col_b = '"col_B"' %} - -{{config( - materialized = 'incremental', - unique_key = col_a, - incremental_strategy = var('strategy') - )}} - -select -{{ col_a }}, {{ col_b }} -from {{ref('seed')}} diff --git a/test/integration/052_column_quoting_tests/seeds/seed.csv b/test/integration/052_column_quoting_tests/seeds/seed.csv deleted file mode 100644 index d4a1e26eed2..00000000000 --- a/test/integration/052_column_quoting_tests/seeds/seed.csv +++ /dev/null @@ -1,4 +0,0 @@ -col_A,col_B -1,2 -3,4 -5,6 diff --git a/test/integration/052_column_quoting_tests/test_column_quotes.py b/test/integration/052_column_quoting_tests/test_column_quotes.py deleted file mode 100644 index f5aef6fed39..00000000000 --- a/test/integration/052_column_quoting_tests/test_column_quotes.py +++ /dev/null @@ -1,78 +0,0 @@ -from test.integration.base import DBTIntegrationTest, use_profile -import os - - -class BaseColumnQuotingTest(DBTIntegrationTest): - def column_quoting(self): - raise NotImplementedError('column_quoting not implemented') - - @property - def schema(self): - return 'dbt_column_quoting_052' - - @staticmethod - def dir(value): - return os.path.normpath(value) - - def _run_columnn_quotes(self, strategy='delete+insert'): - strategy_vars = '{{"strategy": "{}"}}'.format(strategy) - self.run_dbt(['seed', '--vars', strategy_vars]) - self.run_dbt(['run', '--vars', strategy_vars]) - self.run_dbt(['run', '--vars', strategy_vars]) - - -class TestColumnQuotingDefault(BaseColumnQuotingTest): - @property - def project_config(self): - return { - 'config-version': 2 - } - - @property - def models(self): - return self.dir('models') - - def run_dbt(self, *args, **kwargs): - return super().run_dbt(*args, **kwargs) - - @use_profile('postgres') - def test_postgres_column_quotes(self): - self._run_columnn_quotes() - - -class TestColumnQuotingDisabled(BaseColumnQuotingTest): - @property - def models(self): - return self.dir('models-unquoted') - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'quote_columns': False, - }, - } - - @use_profile('postgres') - def test_postgres_column_quotes(self): - self._run_columnn_quotes() - - -class TestColumnQuotingEnabled(BaseColumnQuotingTest): - @property - def models(self): - return self.dir('models') - - @property - def project_config(self): - return { - 'config-version': 2, - 'seeds': { - 'quote_columns': True, - }, - } - - @use_profile('postgres') - def test_postgres_column_quotes(self): - self._run_columnn_quotes() diff --git a/tests/functional/column_quoting/test_column_quotes.py b/tests/functional/column_quoting/test_column_quotes.py new file mode 100644 index 00000000000..362f7b0d4de --- /dev/null +++ b/tests/functional/column_quoting/test_column_quotes.py @@ -0,0 +1,100 @@ +import pytest + +from dbt.tests.util import run_dbt + +_MODELS__COLUMN_QUOTING_DEFAULT = """ +{% set col_a = '"col_A"' %} +{% set col_b = '"col_B"' %} + +{{ + config( + materialized = 'incremental', + unique_key = col_a, + ) +}} + +select + {{ col_a }}, + {{ col_b }} +from {{ref('seed')}} +""" + +_MODELS__COLUMN_QUOTING_NO_QUOTING = """ +{% set col_a = '"col_a"' %} +{% set col_b = '"col_b"' %} + +{{ + config( + materialized = 'incremental', + unique_key = col_a, + ) +}} + +select + {{ col_a }}, + {{ col_b }} +from {{ref('seed')}} +""" + +_SEEDS_BASIC_SEED = """col_A,col_B +1,2 +3,4 +5,6 +""" + + +class BaseColumnQuotingTest: + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": _MODELS__COLUMN_QUOTING_DEFAULT} + + @pytest.fixture(scope="class") + def seeds(self): + return {"seed.csv": _SEEDS_BASIC_SEED} + + @pytest.fixture(scope="function") + def run_column_quotes(self, project): + def fixt(): + results = run_dbt(["seed"]) + assert len(results) == 1 + results = run_dbt(["run"]) + assert len(results) == 1 + results = run_dbt(["run"]) + assert len(results) == 1 + + return fixt + + +class TestColumnQuotingDefault(BaseColumnQuotingTest): + def test_column_quotes(self, run_column_quotes): + run_column_quotes() + + +class TestColumnQuotingEnabled(BaseColumnQuotingTest): + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": True, + }, + } + + def test_column_quotes(self, run_column_quotes): + run_column_quotes() + + +class TestColumnQuotingDisabled(BaseColumnQuotingTest): + @pytest.fixture(scope="class") + def models(self): + return {"model.sql": _MODELS__COLUMN_QUOTING_NO_QUOTING} + + @pytest.fixture(scope="class") + def project_config_update(self): + return { + "seeds": { + "quote_columns": False, + }, + } + + def test_column_quotes(self, run_column_quotes): + run_column_quotes() From 82d9b2fa87e4bc4953b82be9baa2a3d9e951b83e Mon Sep 17 00:00:00 2001 From: Alexander Smolyakov Date: Fri, 27 Jan 2023 19:04:31 +0400 Subject: [PATCH 141/156] [CI/CD] Update release workflow and introduce workflow for nightly releases (#6602) * Add release workflows * Update nightly-release.yml * Set default `test_run` value to `true` * Update .bumpversion.cfg * Resolve review comment - Update workflow docs - Change workflow name - Set `test_run` default value to `true` * Update Slack secret * PyPI --- .bumpversion.cfg | 4 + .github/workflows/nightly-release.yml | 109 ++++++++ .github/workflows/release.yml | 350 ++++++++++++++------------ scripts/env-setup.sh | 6 + 4 files changed, 308 insertions(+), 161 deletions(-) create mode 100644 .github/workflows/nightly-release.yml create mode 100644 scripts/env-setup.sh diff --git a/.bumpversion.cfg b/.bumpversion.cfg index e55d8b13ece..4db0c9a0c58 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -5,8 +5,10 @@ parse = (?P\d+) \.(?P\d+) ((?Pa|b|rc) (?P
    \d+)  # pre-release version num
    +	)(\.(?P[a-z..0-9]+)
     	)?
     serialize =
    +	{major}.{minor}.{patch}{prekind}{pre}.{nightly}
     	{major}.{minor}.{patch}{prekind}{pre}
     	{major}.{minor}.{patch}
     commit = False
    @@ -24,6 +26,8 @@ values =
     [bumpversion:part:pre]
     first_value = 1
     
    +[bumpversion:part:nightly]
    +
     [bumpversion:file:core/setup.py]
     
     [bumpversion:file:core/dbt/version.py]
    diff --git a/.github/workflows/nightly-release.yml b/.github/workflows/nightly-release.yml
    new file mode 100644
    index 00000000000..b668d62eccf
    --- /dev/null
    +++ b/.github/workflows/nightly-release.yml
    @@ -0,0 +1,109 @@
    +# **what?**
    +# Nightly releases to GitHub and PyPI. This workflow produces the following outcome:
    +# - generate and validate data for night release (commit SHA, version number, release branch);
    +# - pass data to release workflow;
    +# - night release will be pushed to GitHub as a draft release;
    +# - night build will be pushed to test PyPI;
    +#
    +# **why?**
    +# Ensure an automated and tested release process for nightly builds
    +#
    +# **when?**
    +# This workflow runs on schedule or can be run manually on demand.
    +
    +name: Nightly Test Release to GitHub and PyPI
    +
    +on:
    +  workflow_dispatch: # for manual triggering
    +  schedule:
    +    - cron: 0 9 * * *
    +
    +permissions:
    +  contents: write # this is the permission that allows creating a new release
    +
    +defaults:
    +  run:
    +    shell: bash
    +
    +env:
    +  RELEASE_BRANCH: "main"
    +
    +jobs:
    +  aggregate-release-data:
    +    runs-on: ubuntu-latest
    +
    +    outputs:
    +      commit_sha: ${{ steps.resolve-commit-sha.outputs.release_commit }}
    +      version_number: ${{ steps.nightly-release-version.outputs.number }}
    +      release_branch: ${{ steps.release-branch.outputs.name }}
    +
    +    steps:
    +      - name: "Checkout ${{ github.repository }} Branch ${{ env.RELEASE_BRANCH }}"
    +        uses: actions/checkout@v3
    +        with:
    +          ref: ${{ env.RELEASE_BRANCH }}
    +
    +      - name: "Resolve Commit To Release"
    +        id: resolve-commit-sha
    +        run: |
    +          commit_sha=$(git rev-parse HEAD)
    +          echo "release_commit=$commit_sha" >> $GITHUB_OUTPUT
    +
    +      - name: "Get Current Version Number"
    +        id: version-number-sources
    +        run: |
    +          current_version=`awk -F"current_version = " '{print $2}' .bumpversion.cfg | tr '\n' ' '`
    +          echo "current_version=$current_version" >> $GITHUB_OUTPUT
    +
    +      - name: "Audit Version And Parse Into Parts"
    +        id: semver
    +        uses: dbt-labs/actions/parse-semver@v1.1.0
    +        with:
    +          version: ${{ steps.version-number-sources.outputs.current_version }}
    +
    +      - name: "Get Current Date"
    +        id: current-date
    +        run: echo "date=$(date +'%m%d%Y')" >> $GITHUB_OUTPUT
    +
    +      - name: "Generate Nightly Release Version Number"
    +        id: nightly-release-version
    +        run: |
    +          number="${{ steps.semver.outputs.version }}.dev${{ steps.current-date.outputs.date }}+nightly"
    +          echo "number=$number" >> $GITHUB_OUTPUT
    +
    +      - name: "Audit Nightly Release Version And Parse Into Parts"
    +        uses: dbt-labs/actions/parse-semver@v1.1.0
    +        with:
    +          version: ${{ steps.nightly-release-version.outputs.number }}
    +
    +      - name: "Set Release Branch"
    +        id: release-branch
    +        run: |
    +          echo "name=${{ env.RELEASE_BRANCH }}" >> $GITHUB_OUTPUT
    +
    +  log-outputs-aggregate-release-data:
    +    runs-on: ubuntu-latest
    +    needs: [aggregate-release-data]
    +
    +    steps:
    +      - name: "[DEBUG] Log Outputs"
    +        run: |
    +          echo commit_sha    : ${{ needs.aggregate-release-data.outputs.commit_sha }}
    +          echo version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
    +          echo release_branch: ${{ needs.aggregate-release-data.outputs.release_branch }}
    +
    +  release-github-pypi:
    +    needs: [aggregate-release-data]
    +
    +    uses: ./.github/workflows/release.yml
    +    with:
    +      sha: ${{ needs.aggregate-release-data.outputs.commit_sha }}
    +      target_branch: ${{ needs.aggregate-release-data.outputs.release-branch }}
    +      version_number: ${{ needs.aggregate-release-data.outputs.version_number }}
    +      build_script_path: "scripts/build-dist.sh"
    +      env_setup_script_path: "scripts/env-setup.sh"
    +      s3_bucket_name: "core-team-artifacts"
    +      package_test_command: "dbt --version"
    +      test_run: true
    +      nightly_release: true
    +    secrets: inherit
    diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
    index ade939b6ee3..043f0a3d520 100644
    --- a/.github/workflows/release.yml
    +++ b/.github/workflows/release.yml
    @@ -1,24 +1,110 @@
     # **what?**
    -# Take the given commit, run unit tests specifically on that sha, build and
    -# package it, and then release to GitHub and PyPi with that specific build
    -
    +# Release workflow provides the following steps:
    +# - checkout the given commit;
    +# - validate version in sources and changelog file for given version;
    +# - bump the version and generate a changelog if needed;
    +# - merge all changes to the target branch if needed;
    +# - run unit and integration tests against given commit;
    +# - build and package that SHA;
    +# - release it to GitHub and PyPI with that specific build;
    +#
     # **why?**
     # Ensure an automated and tested release process
    -
    +#
     # **when?**
    -# This will only run manually with a given sha and version
    +# This workflow can be run manually on demand or can be called by other workflows
     
    -name: Release to GitHub and PyPi
    +name: Release to GitHub and PyPI
     
     on:
       workflow_dispatch:
         inputs:
           sha:
    -       description: 'The last commit sha in the release'
    -       required: true
    +        description: "The last commit sha in the release"
    +        type: string
    +        required: true
    +      target_branch:
    +        description: "The branch to release from"
    +        type: string
    +        required: true
    +      version_number:
    +        description: "The release version number (i.e. 1.0.0b1)"
    +        type: string
    +        required: true
    +      build_script_path:
    +        description: "Build script path"
    +        type: string
    +        default: "scripts/build-dist.sh"
    +        required: true
    +      env_setup_script_path:
    +        description: "Environment setup script path"
    +        type: string
    +        default: "scripts/env-setup.sh"
    +        required: false
    +      s3_bucket_name:
    +        description: "AWS S3 bucket name"
    +        type: string
    +        default: "core-team-artifacts"
    +        required: true
    +      package_test_command:
    +        description: "Package test command"
    +        type: string
    +        default: "dbt --version"
    +        required: true
    +      test_run:
    +        description: "Test run (Publish release as draft)"
    +        type: boolean
    +        default: true
    +        required: false
    +      nightly_release:
    +        description: "Nightly release to dev environment"
    +        type: boolean
    +        default: false
    +        required: false
    +  workflow_call:
    +    inputs:
    +      sha:
    +        description: "The last commit sha in the release"
    +        type: string
    +        required: true
    +      target_branch:
    +        description: "The branch to release from"
    +        type: string
    +        required: true
           version_number:
    -       description: 'The release version number (i.e. 1.0.0b1)'
    -       required: true
    +        description: "The release version number (i.e. 1.0.0b1)"
    +        type: string
    +        required: true
    +      build_script_path:
    +        description: "Build script path"
    +        type: string
    +        default: "scripts/build-dist.sh"
    +        required: true
    +      env_setup_script_path:
    +        description: "Environment setup script path"
    +        type: string
    +        default: "scripts/env-setup.sh"
    +        required: false
    +      s3_bucket_name:
    +        description: "AWS S3 bucket name"
    +        type: string
    +        default: "core-team-artifacts"
    +        required: true
    +      package_test_command:
    +        description: "Package test command"
    +        type: string
    +        default: "dbt --version"
    +        required: true
    +      test_run:
    +        description: "Test run (Publish release as draft)"
    +        type: boolean
    +        default: true
    +        required: false
    +      nightly_release:
    +        description: "Nightly release to dev environment"
    +        type: boolean
    +        default: false
    +        required: false
     
     permissions:
       contents: write # this is the permission that allows creating a new release
    @@ -28,175 +114,117 @@ defaults:
         shell: bash
     
     jobs:
    -  unit:
    -    name: Unit test
    -
    +  log-inputs:
    +    name: Log Inputs
         runs-on: ubuntu-latest
    -
    -    env:
    -      TOXENV: "unit"
    -
         steps:
    -      - name: Check out the repository
    -        uses: actions/checkout@v2
    -        with:
    -          persist-credentials: false
    -          ref: ${{ github.event.inputs.sha }}
    -
    -      - name: Set up Python
    -        uses: actions/setup-python@v2
    -        with:
    -          python-version: 3.8
    -
    -      - name: Install python dependencies
    +      - name: "[DEBUG] Print Variables"
             run: |
    -          pip install --user --upgrade pip
    -          pip install tox
    -          pip --version
    -          tox --version
    -
    -      - name: Run tox
    -        run: tox
    -
    -  build:
    -    name: build packages
    +          echo The last commit sha in the release: ${{ inputs.sha }}
    +          echo The branch to release from:         ${{ inputs.target_branch }}
    +          echo The release version number:         ${{ inputs.version_number }}
    +          echo Build script path:                  ${{ inputs.build_script_path }}
    +          echo Environment setup script path:      ${{ inputs.env_setup_script_path }}
    +          echo AWS S3 bucket name:                 ${{ inputs.s3_bucket_name }}
    +          echo Package test command:               ${{ inputs.package_test_command }}
    +          echo Test run:                           ${{ inputs.test_run }}
    +          echo Nightly release:                    ${{ inputs.nightly_release }}
    +
    +  bump-version-generate-changelog:
    +    name: Bump package version, Generate changelog
    +
    +    uses: dbt-labs/dbt-release/.github/workflows/release-prep.yml@main
    +
    +    with:
    +      sha: ${{ inputs.sha }}
    +      version_number: ${{ inputs.version_number }}
    +      target_branch: ${{ inputs.target_branch }}
    +      env_setup_script_path: ${{ inputs.env_setup_script_path }}
    +      test_run: ${{ inputs.test_run }}
    +      nightly_release: ${{ inputs.nightly_release }}
    +
    +    secrets:
    +      FISHTOWN_BOT_PAT: ${{ secrets.FISHTOWN_BOT_PAT }}
    +
    +  log-outputs-bump-version-generate-changelog:
    +    name: "[Log output] Bump package version, Generate changelog"
    +    if: ${{ !failure() && !cancelled() }}
    +
    +    needs: [bump-version-generate-changelog]
     
         runs-on: ubuntu-latest
     
         steps:
    -      - name: Check out the repository
    -        uses: actions/checkout@v2
    -        with:
    -          persist-credentials: false
    -          ref: ${{ github.event.inputs.sha }}
    -
    -      - name: Set up Python
    -        uses: actions/setup-python@v2
    -        with:
    -          python-version: 3.8
    -
    -      - name: Install python dependencies
    +      - name: Print variables
             run: |
    -          pip install --user --upgrade pip
    -          pip install --upgrade setuptools wheel twine check-wheel-contents
    -          pip --version
    -
    -      - name: Build distributions
    -        run: ./scripts/build-dist.sh
    -
    -      - name: Show distributions
    -        run: ls -lh dist/
    -
    -      - name: Check distribution descriptions
    -        run: |
    -          twine check dist/*
    -
    -      - name: Check wheel contents
    -        run: |
    -          check-wheel-contents dist/*.whl --ignore W007,W008
    -
    -      - uses: actions/upload-artifact@v2
    -        with:
    -          name: dist
    -          path: |
    -            dist/
    -            !dist/dbt-${{github.event.inputs.version_number}}.tar.gz
    -
    -  test-build:
    -    name: verify packages
    -
    -    needs: [build, unit]
    -
    -    runs-on: ubuntu-latest
    +          echo Final SHA     : ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
    +          echo Changelog path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
    +
    +  build-test-package:
    +    name: Build, Test, Package
    +    if: ${{ !failure() && !cancelled() }}
    +    needs: [bump-version-generate-changelog]
    +
    +    uses: dbt-labs/dbt-release/.github/workflows/build.yml@main
    +
    +    with:
    +      sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
    +      version_number: ${{ inputs.version_number }}
    +      changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
    +      build_script_path: ${{ inputs.build_script_path }}
    +      s3_bucket_name: ${{ inputs.s3_bucket_name }}
    +      package_test_command: ${{ inputs.package_test_command }}
    +      test_run: ${{ inputs.test_run }}
    +      nightly_release: ${{ inputs.nightly_release }}
    +
    +    secrets:
    +      AWS_ACCESS_KEY_ID: ${{ secrets.PRODUCTION_AWS_ACCESS_KEY_ID }}
    +      AWS_SECRET_ACCESS_KEY: ${{ secrets.PRODUCTION_AWS_SECRET_ACCESS_KEY }}
     
    -    steps:
    -      - name: Set up Python
    -        uses: actions/setup-python@v2
    -        with:
    -          python-version: 3.8
    -
    -      - name: Install python dependencies
    -        run: |
    -          pip install --user --upgrade pip
    -          pip install --upgrade wheel
    -          pip --version
    -
    -      - uses: actions/download-artifact@v2
    -        with:
    -          name: dist
    -          path: dist/
    -
    -      - name: Show distributions
    -        run: ls -lh dist/
    +  github-release:
    +    name: GitHub Release
    +    if: ${{ !failure() && !cancelled() }}
     
    -      - name: Install wheel distributions
    -        run: |
    -          find ./dist/*.whl -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
    +    needs: [bump-version-generate-changelog, build-test-package]
     
    -      - name: Check wheel distributions
    -        run: |
    -          dbt --version
    +    uses: dbt-labs/dbt-release/.github/workflows/github-release.yml@main
     
    -      - name: Install source distributions
    -        run: |
    -          find ./dist/*.gz -maxdepth 1 -type f | xargs pip install --force-reinstall --find-links=dist/
    +    with:
    +      sha: ${{ needs.bump-version-generate-changelog.outputs.final_sha }}
    +      version_number: ${{ inputs.version_number }}
    +      changelog_path: ${{ needs.bump-version-generate-changelog.outputs.changelog_path }}
    +      test_run: ${{ inputs.test_run }}
     
    -      - name: Check source distributions
    -        run: |
    -          dbt --version
    +  pypi-release:
    +    name: PyPI Release
     
    -  github-release:
    -    name: GitHub Release
    +    needs: [github-release]
     
    -    needs: test-build
    +    uses: dbt-labs/dbt-release/.github/workflows/pypi-release.yml@main
     
    -    runs-on: ubuntu-latest
    +    with:
    +      version_number: ${{ inputs.version_number }}
    +      test_run: ${{ inputs.test_run }}
     
    -    steps:
    -      - uses: actions/download-artifact@v2
    -        with:
    -          name: dist
    -          path: '.'
    -
    -      # Need to set an output variable because env variables can't be taken as input
    -      # This is needed for the next step with releasing to GitHub
    -      - name: Find release type
    -        id: release_type
    -        env:
    -          IS_PRERELEASE: ${{ contains(github.event.inputs.version_number, 'rc') ||  contains(github.event.inputs.version_number, 'b') }}
    -        run: |
    -          echo "isPrerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT
    -
    -      - name: Creating GitHub Release
    -        uses: softprops/action-gh-release@v1
    -        with:
    -          name: dbt-core v${{github.event.inputs.version_number}}
    -          tag_name: v${{github.event.inputs.version_number}}
    -          prerelease: ${{ steps.release_type.outputs.isPrerelease }}
    -          target_commitish: ${{github.event.inputs.sha}}
    -          body: |
    -            [Release notes](https://github.com/dbt-labs/dbt-core/blob/main/CHANGELOG.md)
    -          files: |
    -            dbt_postgres-${{github.event.inputs.version_number}}-py3-none-any.whl
    -            dbt_core-${{github.event.inputs.version_number}}-py3-none-any.whl
    -            dbt-postgres-${{github.event.inputs.version_number}}.tar.gz
    -            dbt-core-${{github.event.inputs.version_number}}.tar.gz
    +    secrets:
    +      PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
    +      TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }}
     
    -  pypi-release:
    -    name: Pypi release
    +  slack-notification:
    +    name: Slack Notification
    +    if: ${{ failure() }}
     
    -    runs-on: ubuntu-latest
    +    needs:
    +      [
    +        bump-version-generate-changelog,
    +        build-test-package,
    +        github-release,
    +        pypi-release,
    +      ]
     
    -    needs: github-release
    +    uses: dbt-labs/dbt-release/.github/workflows/slack-post-notification.yml@main
    +    with:
    +      status: "failure"
     
    -    environment: PypiProd
    -    steps:
    -      - uses: actions/download-artifact@v2
    -        with:
    -          name: dist
    -          path: 'dist'
    -
    -      - name: Publish distribution to PyPI
    -        uses: pypa/gh-action-pypi-publish@v1.4.2
    -        with:
    -          password: ${{ secrets.PYPI_API_TOKEN }}
    +    secrets:
    +      SLACK_WEBHOOK_URL: ${{ secrets.SLACK_DEV_CORE_ALERTS }}
    diff --git a/scripts/env-setup.sh b/scripts/env-setup.sh
    new file mode 100644
    index 00000000000..42968b79eb1
    --- /dev/null
    +++ b/scripts/env-setup.sh
    @@ -0,0 +1,6 @@
    +#!/bin/bash
    +# Set environment variables required for integration tests
    +echo "DBT_INVOCATION_ENV=github-actions" >> $GITHUB_ENV
    +echo "DBT_TEST_USER_1=dbt_test_user_1" >> $GITHUB_ENV
    +echo "DBT_TEST_USER_2=dbt_test_user_2" >> $GITHUB_ENV
    +echo "DBT_TEST_USER_3=dbt_test_user_3" >> $GITHUB_ENV
    
    From c653330911ccbfdee331e56f5c2f8ced1aea5681 Mon Sep 17 00:00:00 2001
    From: Neelesh Salian 
    Date: Fri, 27 Jan 2023 11:16:59 -0800
    Subject: [PATCH 142/156] Adding nssalian to committers list (#6769)
    
    ---
     .changie.yaml | 2 +-
     1 file changed, 1 insertion(+), 1 deletion(-)
    
    diff --git a/.changie.yaml b/.changie.yaml
    index e417244506b..dbbb43daf31 100644
    --- a/.changie.yaml
    +++ b/.changie.yaml
    @@ -88,7 +88,7 @@ custom:
     footerFormat: |
       {{- $contributorDict := dict }}
       {{- /* any names added to this list should be all lowercase for later matching purposes */}}
    -  {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }}
    +  {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" "nssalian" }}
       {{- range $change := .Changes }}
         {{- $authorList := splitList " " $change.Custom.Author }}
         {{- /* loop through all authors for a single changelog */}}
    
    From a8abc496323f741d3218d298d5d2bb118fa01017 Mon Sep 17 00:00:00 2001
    From: Neelesh Salian 
    Date: Fri, 27 Jan 2023 17:00:39 -0800
    Subject: [PATCH 143/156] [CT-1940] Stand-alone Python module for
     PostgresColumn (#6773)
    
    ---
     .changes/unreleased/Features-20230127-162812.yaml  |  6 ++++++
     plugins/postgres/dbt/adapters/postgres/__init__.py |  2 +-
     plugins/postgres/dbt/adapters/postgres/column.py   | 12 ++++++++++++
     plugins/postgres/dbt/adapters/postgres/impl.py     |  2 +-
     plugins/postgres/dbt/adapters/postgres/relation.py | 12 ------------
     5 files changed, 20 insertions(+), 14 deletions(-)
     create mode 100644 .changes/unreleased/Features-20230127-162812.yaml
     create mode 100644 plugins/postgres/dbt/adapters/postgres/column.py
    
    diff --git a/.changes/unreleased/Features-20230127-162812.yaml b/.changes/unreleased/Features-20230127-162812.yaml
    new file mode 100644
    index 00000000000..8076cf3b18d
    --- /dev/null
    +++ b/.changes/unreleased/Features-20230127-162812.yaml
    @@ -0,0 +1,6 @@
    +kind: Features
    +body: Stand-alone Python module for PostgresColumn
    +time: 2023-01-27T16:28:12.212427-08:00
    +custom:
    +  Author: nssalian
    +  Issue: "6772"
    diff --git a/plugins/postgres/dbt/adapters/postgres/__init__.py b/plugins/postgres/dbt/adapters/postgres/__init__.py
    index b5b3b7b7a09..38dce8bdb22 100644
    --- a/plugins/postgres/dbt/adapters/postgres/__init__.py
    +++ b/plugins/postgres/dbt/adapters/postgres/__init__.py
    @@ -1,7 +1,7 @@
     # these are mostly just exports, #noqa them so flake8 will be happy
     from dbt.adapters.postgres.connections import PostgresConnectionManager  # noqa
     from dbt.adapters.postgres.connections import PostgresCredentials
    -from dbt.adapters.postgres.relation import PostgresColumn  # noqa
    +from dbt.adapters.postgres.column import PostgresColumn  # noqa
     from dbt.adapters.postgres.relation import PostgresRelation  # noqa: F401
     from dbt.adapters.postgres.impl import PostgresAdapter
     
    diff --git a/plugins/postgres/dbt/adapters/postgres/column.py b/plugins/postgres/dbt/adapters/postgres/column.py
    new file mode 100644
    index 00000000000..686ec0cb8a4
    --- /dev/null
    +++ b/plugins/postgres/dbt/adapters/postgres/column.py
    @@ -0,0 +1,12 @@
    +from dbt.adapters.base import Column
    +
    +
    +class PostgresColumn(Column):
    +    @property
    +    def data_type(self):
    +        # on postgres, do not convert 'text' or 'varchar' to 'varchar()'
    +        if self.dtype.lower() == "text" or (
    +            self.dtype.lower() == "character varying" and self.char_size is None
    +        ):
    +            return self.dtype
    +        return super().data_type
    diff --git a/plugins/postgres/dbt/adapters/postgres/impl.py b/plugins/postgres/dbt/adapters/postgres/impl.py
    index 9a5d5d3f8f6..9d729b5148e 100644
    --- a/plugins/postgres/dbt/adapters/postgres/impl.py
    +++ b/plugins/postgres/dbt/adapters/postgres/impl.py
    @@ -5,7 +5,7 @@
     from dbt.adapters.base.impl import AdapterConfig
     from dbt.adapters.sql import SQLAdapter
     from dbt.adapters.postgres import PostgresConnectionManager
    -from dbt.adapters.postgres import PostgresColumn
    +from dbt.adapters.postgres.column import PostgresColumn
     from dbt.adapters.postgres import PostgresRelation
     from dbt.dataclass_schema import dbtClassMixin, ValidationError
     from dbt.exceptions import (
    diff --git a/plugins/postgres/dbt/adapters/postgres/relation.py b/plugins/postgres/dbt/adapters/postgres/relation.py
    index 43c8c724a74..820a69b0c64 100644
    --- a/plugins/postgres/dbt/adapters/postgres/relation.py
    +++ b/plugins/postgres/dbt/adapters/postgres/relation.py
    @@ -1,4 +1,3 @@
    -from dbt.adapters.base import Column
     from dataclasses import dataclass
     from dbt.adapters.base.relation import BaseRelation
     from dbt.exceptions import DbtRuntimeError
    @@ -21,14 +20,3 @@ def __post_init__(self):
     
         def relation_max_name_length(self):
             return 63
    -
    -
    -class PostgresColumn(Column):
    -    @property
    -    def data_type(self):
    -        # on postgres, do not convert 'text' or 'varchar' to 'varchar()'
    -        if self.dtype.lower() == "text" or (
    -            self.dtype.lower() == "character varying" and self.char_size is None
    -        ):
    -            return self.dtype
    -        return super().data_type
    
    From 92d1ef84825b72f8a43a5b97c198fbe0c3db12de Mon Sep 17 00:00:00 2001
    From: Alexander Smolyakov 
    Date: Mon, 30 Jan 2023 19:19:10 +0400
    Subject: [PATCH 144/156] Update release workflow (#6778)
    
    - Update AWS secrets
    - Rework condition for Slack notification
    ---
     .github/workflows/release.yml | 6 +++---
     1 file changed, 3 insertions(+), 3 deletions(-)
    
    diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
    index 043f0a3d520..1eaf60521ba 100644
    --- a/.github/workflows/release.yml
    +++ b/.github/workflows/release.yml
    @@ -178,8 +178,8 @@ jobs:
           nightly_release: ${{ inputs.nightly_release }}
     
         secrets:
    -      AWS_ACCESS_KEY_ID: ${{ secrets.PRODUCTION_AWS_ACCESS_KEY_ID }}
    -      AWS_SECRET_ACCESS_KEY: ${{ secrets.PRODUCTION_AWS_SECRET_ACCESS_KEY }}
    +      AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
    +      AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
     
       github-release:
         name: GitHub Release
    @@ -212,7 +212,7 @@ jobs:
     
       slack-notification:
         name: Slack Notification
    -    if: ${{ failure() }}
    +    if: ${{ failure() && (!inputs.test_run || inputs.nightly_release) }}
     
         needs:
           [
    
    From d2f3cdd6de4aad85aa9d7523eb1179f03a2f1666 Mon Sep 17 00:00:00 2001
    From: Kshitij Aranke 
    Date: Mon, 30 Jan 2023 07:55:28 -0800
    Subject: [PATCH 145/156] [CT-1841] Convert custom target test to Pytest
     (#6765)
    
    ---
     .../Under the Hood-20230126-164741.yaml       |    6 +
     pytest.ini                                    |    1 -
     .../create_incremental__dbt_tmp.sql           |    4 -
     .../035_docs_blocks_tests/docs/docs.md        |   17 -
     .../duplicate_docs/docs.md                    |    7 -
     .../duplicate_docs/model.sql                  |    1 -
     .../duplicate_docs/schema.yml                 |    5 -
     .../invalid_name_models/docs.md               |   12 -
     .../invalid_name_models/model.sql             |    1 -
     .../invalid_name_models/schema.yml            |   10 -
     .../missing_docs_models/docs.md               |    7 -
     .../missing_docs_models/model.sql             |    1 -
     .../missing_docs_models/schema.yml            |   11 -
     .../035_docs_blocks_tests/models/docs.md      |   17 -
     .../035_docs_blocks_tests/models/model.sql    |    1 -
     .../035_docs_blocks_tests/models/schema.yml   |   12 -
     .../038_caching_tests/models/model.sql        |    6 -
     .../another_schema_model.sql                  |    7 -
     .../models_multi_schemas/model.sql            |    6 -
     .../shouting_models/MODEL.sql                 |    6 -
     .../models/do_nothing_1.sql                   |    1 -
     .../models/do_nothing_2.sql                   |    1 -
     .../models/do_nothing_3.sql                   |    1 -
     .../test_target_path.py                       |   44 -
     test/integration/README.md                    |    1 -
     test/integration/__init__.py                  |    0
     test/integration/base.py                      | 1158 -----------------
     .../test_custom_target_path.py                |   35 +
     tox.ini                                       |    1 -
     29 files changed, 41 insertions(+), 1339 deletions(-)
     create mode 100644 .changes/unreleased/Under the Hood-20230126-164741.yaml
     delete mode 100644 test/integration/017_runtime_materialization_tests/create_incremental__dbt_tmp.sql
     delete mode 100644 test/integration/035_docs_blocks_tests/docs/docs.md
     delete mode 100644 test/integration/035_docs_blocks_tests/duplicate_docs/docs.md
     delete mode 100644 test/integration/035_docs_blocks_tests/duplicate_docs/model.sql
     delete mode 100644 test/integration/035_docs_blocks_tests/duplicate_docs/schema.yml
     delete mode 100644 test/integration/035_docs_blocks_tests/invalid_name_models/docs.md
     delete mode 100644 test/integration/035_docs_blocks_tests/invalid_name_models/model.sql
     delete mode 100644 test/integration/035_docs_blocks_tests/invalid_name_models/schema.yml
     delete mode 100644 test/integration/035_docs_blocks_tests/missing_docs_models/docs.md
     delete mode 100644 test/integration/035_docs_blocks_tests/missing_docs_models/model.sql
     delete mode 100644 test/integration/035_docs_blocks_tests/missing_docs_models/schema.yml
     delete mode 100644 test/integration/035_docs_blocks_tests/models/docs.md
     delete mode 100644 test/integration/035_docs_blocks_tests/models/model.sql
     delete mode 100644 test/integration/035_docs_blocks_tests/models/schema.yml
     delete mode 100644 test/integration/038_caching_tests/models/model.sql
     delete mode 100644 test/integration/038_caching_tests/models_multi_schemas/another_schema_model.sql
     delete mode 100644 test/integration/038_caching_tests/models_multi_schemas/model.sql
     delete mode 100644 test/integration/038_caching_tests/shouting_models/MODEL.sql
     delete mode 100644 test/integration/075_custom_target_path/models/do_nothing_1.sql
     delete mode 100644 test/integration/075_custom_target_path/models/do_nothing_2.sql
     delete mode 100644 test/integration/075_custom_target_path/models/do_nothing_3.sql
     delete mode 100644 test/integration/075_custom_target_path/test_target_path.py
     delete mode 100644 test/integration/README.md
     delete mode 100644 test/integration/__init__.py
     delete mode 100644 test/integration/base.py
     create mode 100644 tests/functional/custom_target_path/test_custom_target_path.py
    
    diff --git a/.changes/unreleased/Under the Hood-20230126-164741.yaml b/.changes/unreleased/Under the Hood-20230126-164741.yaml
    new file mode 100644
    index 00000000000..803768d9e3a
    --- /dev/null
    +++ b/.changes/unreleased/Under the Hood-20230126-164741.yaml	
    @@ -0,0 +1,6 @@
    +kind: Under the Hood
    +body: '[CT-1841] Convert custom target test to Pytest'
    +time: 2023-01-26T16:47:41.198714-08:00
    +custom:
    +  Author: aranke
    +  Issue: "6638"
    diff --git a/pytest.ini b/pytest.ini
    index 1761d5ae157..26ae97594fe 100644
    --- a/pytest.ini
    +++ b/pytest.ini
    @@ -6,5 +6,4 @@ env_files =
         test.env
     testpaths =
         test/unit
    -    test/integration
         tests/functional
    diff --git a/test/integration/017_runtime_materialization_tests/create_incremental__dbt_tmp.sql b/test/integration/017_runtime_materialization_tests/create_incremental__dbt_tmp.sql
    deleted file mode 100644
    index ee4bebffa85..00000000000
    --- a/test/integration/017_runtime_materialization_tests/create_incremental__dbt_tmp.sql
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -
    -create table {schema}.incremental__dbt_tmp as (	
    -    select 1 as id	
    -);	
    diff --git a/test/integration/035_docs_blocks_tests/docs/docs.md b/test/integration/035_docs_blocks_tests/docs/docs.md
    deleted file mode 100644
    index 9420840d311..00000000000
    --- a/test/integration/035_docs_blocks_tests/docs/docs.md
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -{% docs my_model_doc %}
    -Alt text about the model
    -{% enddocs %}
    -
    -{% docs my_model_doc__id %}
    -The user ID number with alternative text
    -{% enddocs %}
    -
    -The following doc is never used, which should be fine.
    -{% docs my_model_doc__first_name %}
    -The user's first name - don't show this text!
    -{% enddocs %}
    -
    -This doc is referenced by its full name
    -{% docs my_model_doc__last_name %}
    -The user's last name in this other file
    -{% enddocs %}
    diff --git a/test/integration/035_docs_blocks_tests/duplicate_docs/docs.md b/test/integration/035_docs_blocks_tests/duplicate_docs/docs.md
    deleted file mode 100644
    index 8499e541d56..00000000000
    --- a/test/integration/035_docs_blocks_tests/duplicate_docs/docs.md
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -{% docs my_model_doc %}
    -    a doc string
    -{% enddocs %}
    -
    -{% docs my_model_doc %}
    -    duplicate doc string
    -{% enddocs %}
    diff --git a/test/integration/035_docs_blocks_tests/duplicate_docs/model.sql b/test/integration/035_docs_blocks_tests/duplicate_docs/model.sql
    deleted file mode 100644
    index 3397a804a20..00000000000
    --- a/test/integration/035_docs_blocks_tests/duplicate_docs/model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as id, 'joe' as first_name
    \ No newline at end of file
    diff --git a/test/integration/035_docs_blocks_tests/duplicate_docs/schema.yml b/test/integration/035_docs_blocks_tests/duplicate_docs/schema.yml
    deleted file mode 100644
    index a04a43e4407..00000000000
    --- a/test/integration/035_docs_blocks_tests/duplicate_docs/schema.yml
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model
    -    description: "{{ doc('my_model_doc') }}"
    diff --git a/test/integration/035_docs_blocks_tests/invalid_name_models/docs.md b/test/integration/035_docs_blocks_tests/invalid_name_models/docs.md
    deleted file mode 100644
    index 57e58698a30..00000000000
    --- a/test/integration/035_docs_blocks_tests/invalid_name_models/docs.md
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -{% docs my_model_doc %}
    -My model is just a copy of the seed
    -{% enddocs %}
    -
    -{% docs my_model_doc__id %}
    -The user ID number
    -{% enddocs %}
    -
    -The following doc is never used, which should be fine.
    -{% docs my_model_doc__first_name %}
    -The user's first name
    -{% enddocs %}
    diff --git a/test/integration/035_docs_blocks_tests/invalid_name_models/model.sql b/test/integration/035_docs_blocks_tests/invalid_name_models/model.sql
    deleted file mode 100644
    index 5205bf286d5..00000000000
    --- a/test/integration/035_docs_blocks_tests/invalid_name_models/model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as id, 'joe' as first_name
    diff --git a/test/integration/035_docs_blocks_tests/invalid_name_models/schema.yml b/test/integration/035_docs_blocks_tests/invalid_name_models/schema.yml
    deleted file mode 100644
    index 0676d404d05..00000000000
    --- a/test/integration/035_docs_blocks_tests/invalid_name_models/schema.yml
    +++ /dev/null
    @@ -1,10 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model
    -    description: "{{ doc('my_model_doc') }}"
    -    columns:
    -      - name: id
    -        description: "{{ doc('my_model_doc__id') }}"
    -      - name: first_name
    -        description: "{{ doc('foo.bar.my_model_doc__id') }}"
    diff --git a/test/integration/035_docs_blocks_tests/missing_docs_models/docs.md b/test/integration/035_docs_blocks_tests/missing_docs_models/docs.md
    deleted file mode 100644
    index 84ceb700c8d..00000000000
    --- a/test/integration/035_docs_blocks_tests/missing_docs_models/docs.md
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -{% docs my_model_doc %}
    -My model is just a copy of the seed
    -{% enddocs %}
    -
    -{% docs my_model_doc__id %}
    -The user ID number
    -{% enddocs %}
    diff --git a/test/integration/035_docs_blocks_tests/missing_docs_models/model.sql b/test/integration/035_docs_blocks_tests/missing_docs_models/model.sql
    deleted file mode 100644
    index 5205bf286d5..00000000000
    --- a/test/integration/035_docs_blocks_tests/missing_docs_models/model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as id, 'joe' as first_name
    diff --git a/test/integration/035_docs_blocks_tests/missing_docs_models/schema.yml b/test/integration/035_docs_blocks_tests/missing_docs_models/schema.yml
    deleted file mode 100644
    index 5186951dbc8..00000000000
    --- a/test/integration/035_docs_blocks_tests/missing_docs_models/schema.yml
    +++ /dev/null
    @@ -1,11 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model
    -    description: "{{ doc('my_model_doc') }}"
    -    columns:
    -      - name: id
    -        description: "{{ doc('my_model_doc__id') }}"
    -      - name: first_name
    -      # invalid reference
    -        description: "{{ doc('my_model_doc__first_name') }}"
    diff --git a/test/integration/035_docs_blocks_tests/models/docs.md b/test/integration/035_docs_blocks_tests/models/docs.md
    deleted file mode 100644
    index 76ef8f051b7..00000000000
    --- a/test/integration/035_docs_blocks_tests/models/docs.md
    +++ /dev/null
    @@ -1,17 +0,0 @@
    -{% docs my_model_doc %}
    -My model is just a copy of the seed
    -{% enddocs %}
    -
    -{% docs my_model_doc__id %}
    -The user ID number
    -{% enddocs %}
    -
    -The following doc is never used, which should be fine.
    -{% docs my_model_doc__first_name %}
    -The user's first name (should not be shown!)
    -{% enddocs %}
    -
    -This doc is referenced by its full name
    -{% docs my_model_doc__last_name %}
    -The user's last name
    -{% enddocs %}
    diff --git a/test/integration/035_docs_blocks_tests/models/model.sql b/test/integration/035_docs_blocks_tests/models/model.sql
    deleted file mode 100644
    index c7bbcd398cc..00000000000
    --- a/test/integration/035_docs_blocks_tests/models/model.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as id, 'joe' as first_name, 'smith' as last_name
    diff --git a/test/integration/035_docs_blocks_tests/models/schema.yml b/test/integration/035_docs_blocks_tests/models/schema.yml
    deleted file mode 100644
    index bffa5d74629..00000000000
    --- a/test/integration/035_docs_blocks_tests/models/schema.yml
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -version: 2
    -
    -models:
    -  - name: model
    -    description: "{{ doc('my_model_doc') }}"
    -    columns:
    -      - name: id
    -        description: "{{ doc('my_model_doc__id') }}"
    -      - name: first_name
    -        description: The user's first name
    -      - name: last_name
    -        description: "{{ doc('test', 'my_model_doc__last_name') }}"
    diff --git a/test/integration/038_caching_tests/models/model.sql b/test/integration/038_caching_tests/models/model.sql
    deleted file mode 100644
    index 620d4b3dd94..00000000000
    --- a/test/integration/038_caching_tests/models/model.sql
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -{{
    -    config(
    -        materialized='table'
    -    )
    -}}
    -select 1 as id
    diff --git a/test/integration/038_caching_tests/models_multi_schemas/another_schema_model.sql b/test/integration/038_caching_tests/models_multi_schemas/another_schema_model.sql
    deleted file mode 100644
    index d27df03b0f7..00000000000
    --- a/test/integration/038_caching_tests/models_multi_schemas/another_schema_model.sql
    +++ /dev/null
    @@ -1,7 +0,0 @@
    -{{
    -    config(
    -        materialized='table',
    -        schema='another_schema'
    -    )
    -}}
    -select 1 as id
    diff --git a/test/integration/038_caching_tests/models_multi_schemas/model.sql b/test/integration/038_caching_tests/models_multi_schemas/model.sql
    deleted file mode 100644
    index 620d4b3dd94..00000000000
    --- a/test/integration/038_caching_tests/models_multi_schemas/model.sql
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -{{
    -    config(
    -        materialized='table'
    -    )
    -}}
    -select 1 as id
    diff --git a/test/integration/038_caching_tests/shouting_models/MODEL.sql b/test/integration/038_caching_tests/shouting_models/MODEL.sql
    deleted file mode 100644
    index 620d4b3dd94..00000000000
    --- a/test/integration/038_caching_tests/shouting_models/MODEL.sql
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -{{
    -    config(
    -        materialized='table'
    -    )
    -}}
    -select 1 as id
    diff --git a/test/integration/075_custom_target_path/models/do_nothing_1.sql b/test/integration/075_custom_target_path/models/do_nothing_1.sql
    deleted file mode 100644
    index f951e920316..00000000000
    --- a/test/integration/075_custom_target_path/models/do_nothing_1.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as x
    diff --git a/test/integration/075_custom_target_path/models/do_nothing_2.sql b/test/integration/075_custom_target_path/models/do_nothing_2.sql
    deleted file mode 100644
    index 1b2fc1bbdbf..00000000000
    --- a/test/integration/075_custom_target_path/models/do_nothing_2.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as y
    diff --git a/test/integration/075_custom_target_path/models/do_nothing_3.sql b/test/integration/075_custom_target_path/models/do_nothing_3.sql
    deleted file mode 100644
    index 4ebffeadc79..00000000000
    --- a/test/integration/075_custom_target_path/models/do_nothing_3.sql
    +++ /dev/null
    @@ -1 +0,0 @@
    -select 1 as z
    diff --git a/test/integration/075_custom_target_path/test_target_path.py b/test/integration/075_custom_target_path/test_target_path.py
    deleted file mode 100644
    index d87f358363f..00000000000
    --- a/test/integration/075_custom_target_path/test_target_path.py
    +++ /dev/null
    @@ -1,44 +0,0 @@
    -import os
    -from unittest import mock
    -from test.integration.base import DBTIntegrationTest, use_profile
    -
    -
    -class TestTargetPathFromProjectConfig(DBTIntegrationTest):
    -    @property
    -    def project_config(self):
    -        return {"config-version": 2, "target-path": "project_target"}
    -
    -    @property
    -    def schema(self):
    -        return "target_path_tests_075"
    -
    -    @property
    -    def models(self):
    -        return "models"
    -
    -    @use_profile("postgres")
    -    def test_postgres_overriden_target_path(self):
    -        results = self.run_dbt(args=["run"])
    -        self.assertFalse(os.path.exists("./target"))
    -        self.assertTrue(os.path.exists("./project_target"))
    -
    -
    -class TestTargetPathOverridenEnv(TestTargetPathFromProjectConfig):
    -    @use_profile("postgres")
    -    def test_postgres_overriden_target_path(self):
    -        with mock.patch.dict(os.environ, {"DBT_TARGET_PATH": "env_target"}):
    -            results = self.run_dbt(args=["run"])
    -        self.assertFalse(os.path.exists("./target"))
    -        self.assertFalse(os.path.exists("./project_target"))
    -        self.assertTrue(os.path.exists("./env_target"))
    -
    -
    -class TestTargetPathOverridenEnvironment(TestTargetPathFromProjectConfig):
    -    @use_profile("postgres")
    -    def test_postgres_overriden_target_path(self):
    -        with mock.patch.dict(os.environ, {"DBT_TARGET_PATH": "env_target"}):
    -            results = self.run_dbt(args=["run", "--target-path", "cli_target"])
    -        self.assertFalse(os.path.exists("./target"))
    -        self.assertFalse(os.path.exists("./project_target"))
    -        self.assertFalse(os.path.exists("./env_target"))
    -        self.assertTrue(os.path.exists("./cli_target"))
    diff --git a/test/integration/README.md b/test/integration/README.md
    deleted file mode 100644
    index bec5bc9d068..00000000000
    --- a/test/integration/README.md
    +++ /dev/null
    @@ -1 +0,0 @@
    -# Integration test README
    diff --git a/test/integration/__init__.py b/test/integration/__init__.py
    deleted file mode 100644
    index e69de29bb2d..00000000000
    diff --git a/test/integration/base.py b/test/integration/base.py
    deleted file mode 100644
    index ae1dfc6480e..00000000000
    --- a/test/integration/base.py
    +++ /dev/null
    @@ -1,1158 +0,0 @@
    -from io import StringIO
    -import json
    -import os
    -import random
    -import shutil
    -import sys
    -import tempfile
    -import time
    -import traceback
    -import unittest
    -import warnings
    -from contextlib import contextmanager
    -from datetime import datetime
    -from functools import wraps
    -
    -import pytest
    -import yaml
    -from unittest.mock import patch
    -
    -import dbt.main as dbt
    -from dbt import flags
    -from dbt.deprecations import reset_deprecations
    -from dbt.adapters.factory import get_adapter, reset_adapters, register_adapter
    -from dbt.clients.jinja import template_cache
    -from dbt.config import RuntimeConfig
    -from dbt.context import providers
    -from dbt.logger import log_manager
    -from dbt.events.functions import (
    -    capture_stdout_logs, fire_event, setup_event_logger, cleanup_event_logger, stop_capture_stdout_logs
    -)
    -from dbt.events.test_types import (
    -    IntegrationTestInfo,
    -    IntegrationTestDebug,
    -    IntegrationTestException
    -)
    -from dbt.contracts.graph.manifest import Manifest
    -
    -
    -INITIAL_ROOT = os.getcwd()
    -
    -# Ensure dbt interal flags have been set from env vars.
    -flags.set_from_args({}, {})
    -
    -
    -def normalize(path):
    -    """On windows, neither is enough on its own:
    -
    -    >>> normcase('C:\\documents/ALL CAPS/subdir\\..')
    -    'c:\\documents\\all caps\\subdir\\..'
    -    >>> normpath('C:\\documents/ALL CAPS/subdir\\..')
    -    'C:\\documents\\ALL CAPS'
    -    >>> normpath(normcase('C:\\documents/ALL CAPS/subdir\\..'))
    -    'c:\\documents\\all caps'
    -    """
    -    return os.path.normcase(os.path.normpath(path))
    -
    -
    -class Normalized:
    -    def __init__(self, value):
    -        self.value = value
    -
    -    def __repr__(self):
    -        return f'Normalized({self.value!r})'
    -
    -    def __str__(self):
    -        return f'Normalized({self.value!s})'
    -
    -    def __eq__(self, other):
    -        return normalize(self.value) == normalize(other)
    -
    -
    -class TestArgs:
    -    def __init__(self, kwargs):
    -        self.which = 'run'
    -        self.single_threaded = False
    -        self.profiles_dir = None
    -        self.project_dir = None
    -        self.__dict__.update(kwargs)
    -
    -
    -def _profile_from_test_name(test_name):
    -    adapter_names = ('postgres', 'presto')
    -    adapters_in_name = sum(x in test_name for x in adapter_names)
    -    if adapters_in_name != 1:
    -        raise ValueError(
    -            'test names must have exactly 1 profile choice embedded, {} has {}'
    -            .format(test_name, adapters_in_name)
    -        )
    -
    -    for adapter_name in adapter_names:
    -        if adapter_name in test_name:
    -            return adapter_name
    -
    -    raise ValueError(
    -        'could not find adapter name in test name {}'.format(test_name)
    -    )
    -
    -
    -def _pytest_test_name():
    -    return os.environ['PYTEST_CURRENT_TEST'].split()[0]
    -
    -
    -def _pytest_get_test_root():
    -    test_path = _pytest_test_name().split('::')[0]
    -    relative_to = INITIAL_ROOT
    -    head = os.path.relpath(test_path, relative_to)
    -
    -    path_parts = []
    -    while head:
    -        head, tail = os.path.split(head)
    -        path_parts.append(tail)
    -    path_parts.reverse()
    -    # dbt tests are all of the form 'test/integration/XXX_suite_name'
    -    target = os.path.join(*path_parts[:3])
    -    return os.path.join(relative_to, target)
    -
    -
    -def _really_makedirs(path):
    -    while not os.path.exists(path):
    -        try:
    -            os.makedirs(path)
    -        except EnvironmentError:
    -            raise
    -
    -
    -class DBTIntegrationTest(unittest.TestCase):
    -    CREATE_SCHEMA_STATEMENT = 'CREATE SCHEMA {}'
    -    DROP_SCHEMA_STATEMENT = 'DROP SCHEMA IF EXISTS {} CASCADE'
    -
    -    _randint = random.randint(0, 9999)
    -    _runtime_timedelta = (datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0))
    -    _runtime = (
    -        (int(_runtime_timedelta.total_seconds() * 1e6)) +
    -        _runtime_timedelta.microseconds
    -    )
    -
    -    prefix = f'test{_runtime}{_randint:04}'
    -    setup_alternate_db = False
    -
    -    @property
    -    def database_host(self):
    -        return os.getenv('POSTGRES_TEST_HOST', 'localhost')
    -
    -    def postgres_profile(self):
    -        return {
    -            'config': {
    -                'send_anonymous_usage_stats': False
    -            },
    -            'test': {
    -                'outputs': {
    -                    'default2': {
    -                        'type': 'postgres',
    -                        'threads': 4,
    -                        'host': self.database_host,
    -                        'port': int(os.getenv('POSTGRES_TEST_PORT', 5432)),
    -                        'user': os.getenv('POSTGRES_TEST_USER', 'root'),
    -                        'pass': os.getenv('POSTGRES_TEST_PASS', 'password'),
    -                        'dbname': os.getenv('POSTGRES_TEST_DATABASE', 'dbt'),
    -                        'schema': self.unique_schema()
    -                    },
    -                    'noaccess': {
    -                        'type': 'postgres',
    -                        'threads': 4,
    -                        'host': self.database_host,
    -                        'port': int(os.getenv('POSTGRES_TEST_PORT', 5432)),
    -                        'user': 'noaccess',
    -                        'pass': 'password',
    -                        'dbname': os.getenv('POSTGRES_TEST_DATABASE', 'dbt'),
    -                        'schema': self.unique_schema()
    -                    }
    -                },
    -                'target': 'default2'
    -            }
    -        }
    -
    -    def snowflake_profile(self):
    -        return {
    -            'config': {
    -                'send_anonymous_usage_stats': False
    -            },
    -            'test': {
    -                'outputs': {
    -                    'default2': {
    -                        'type': 'snowflake',
    -                        'threads': 4,
    -                        'account': os.getenv('SNOWFLAKE_TEST_ACCOUNT'),
    -                        'user': os.getenv('SNOWFLAKE_TEST_USER'),
    -                        'password': os.getenv('SNOWFLAKE_TEST_PASSWORD'),
    -                        'database': os.getenv('SNOWFLAKE_TEST_DATABASE'),
    -                        'schema': self.unique_schema(),
    -                        'warehouse': os.getenv('SNOWFLAKE_TEST_WAREHOUSE'),
    -                    },
    -                    'noaccess': {
    -                        'type': 'snowflake',
    -                        'threads': 4,
    -                        'account': os.getenv('SNOWFLAKE_TEST_ACCOUNT'),
    -                        'user': 'noaccess',
    -                        'password': 'password',
    -                        'database': os.getenv('SNOWFLAKE_TEST_DATABASE'),
    -                        'schema': self.unique_schema(),
    -                        'warehouse': os.getenv('SNOWFLAKE_TEST_WAREHOUSE'),
    -                    },
    -                    'oauth': {
    -                        'type': 'snowflake',
    -                        'threads': 4,
    -                        'account': os.getenv('SNOWFLAKE_TEST_ACCOUNT'),
    -                        'user': os.getenv('SNOWFLAKE_TEST_USER'),
    -                        'oauth_client_id': os.getenv('SNOWFLAKE_TEST_OAUTH_CLIENT_ID'),
    -                        'oauth_client_secret': os.getenv('SNOWFLAKE_TEST_OAUTH_CLIENT_SECRET'),
    -                        'token': os.getenv('SNOWFLAKE_TEST_OAUTH_REFRESH_TOKEN'),
    -                        'database': os.getenv('SNOWFLAKE_TEST_DATABASE'),
    -                        'schema': self.unique_schema(),
    -                        'warehouse': os.getenv('SNOWFLAKE_TEST_WAREHOUSE'),
    -                        'authenticator': 'oauth',
    -                    },
    -                },
    -                'target': 'default2'
    -            }
    -        }
    -
    -    def presto_profile(self):
    -        return {
    -            'config': {
    -                'send_anonymous_usage_stats': False
    -            },
    -            'test': {
    -                'outputs': {
    -                    'default2': {
    -                        'type': 'presto',
    -                        'method': 'none',
    -                        'threads': 1,
    -                        'schema': self.unique_schema(),
    -                        'database': 'hive',
    -                        'host': 'presto',
    -                        'port': 8080,
    -                    },
    -                },
    -                'target': 'default2'
    -            }
    -        }
    -
    -    @property
    -    def packages_config(self):
    -        return None
    -
    -    @property
    -    def selectors_config(self):
    -        return None
    -
    -    def unique_schema(self):
    -        schema = self.schema
    -
    -        to_return = "{}_{}".format(self.prefix, schema)
    -
    -        if self.adapter_type == 'snowflake':
    -            return to_return.upper()
    -
    -        return to_return.lower()
    -
    -    @property
    -    def default_database(self):
    -        database = self.config.credentials.database
    -        if self.adapter_type == 'snowflake':
    -            return database.upper()
    -        return database
    -
    -    @property
    -    def alternative_database(self):
    -        return None
    -
    -    def get_profile(self, adapter_type):
    -        if adapter_type == 'postgres':
    -            return self.postgres_profile()
    -        elif adapter_type == 'presto':
    -            return self.presto_profile()
    -        else:
    -            raise ValueError('invalid adapter type {}'.format(adapter_type))
    -
    -    def _pick_profile(self):
    -        test_name = self.id().split('.')[-1]
    -        return _profile_from_test_name(test_name)
    -
    -    def _symlink_test_folders(self):
    -        for entry in os.listdir(self.test_original_source_path):
    -            src = os.path.join(self.test_original_source_path, entry)
    -            tst = os.path.join(self.test_root_dir, entry)
    -            if os.path.isdir(src) or src.endswith('.sql'):
    -                # symlink all sql files and all directories.
    -                os.symlink(src, tst)
    -        os.symlink(self._logs_dir, os.path.join(self.test_root_dir, 'logs'))
    -
    -    @property
    -    def test_root_realpath(self):
    -        if sys.platform == 'darwin':
    -            return os.path.realpath(self.test_root_dir)
    -        else:
    -            return self.test_root_dir
    -
    -    def _generate_test_root_dir(self):
    -        return normalize(tempfile.mkdtemp(prefix='dbt-int-test-'))
    -
    -    def setUp(self):
    -        # Logbook warnings are ignored so we don't have to fork logbook to support python 3.10.
    -        # This _only_ works for tests in `test/integration`.
    -        warnings.filterwarnings(
    -            "ignore",
    -            category=DeprecationWarning,
    -            module="logbook"
    -        )
    -        self.dbt_core_install_root = os.path.dirname(dbt.__file__)
    -        log_manager.reset_handlers()
    -        self.initial_dir = INITIAL_ROOT
    -        os.chdir(self.initial_dir)
    -        # before we go anywhere, collect the initial path info
    -        self._logs_dir = os.path.join(self.initial_dir, 'logs', self.prefix)
    -        setup_event_logger(self._logs_dir)
    -        _really_makedirs(self._logs_dir)
    -        self.test_original_source_path = _pytest_get_test_root()
    -        self.test_root_dir = self._generate_test_root_dir()
    -
    -        os.chdir(self.test_root_dir)
    -        try:
    -            self._symlink_test_folders()
    -        except Exception as exc:
    -            msg = '\n\t'.join((
    -                'Failed to symlink test folders!',
    -                'initial_dir={0.initial_dir}',
    -                'test_original_source_path={0.test_original_source_path}',
    -                'test_root_dir={0.test_root_dir}'
    -            )).format(self)
    -            fire_event(IntegrationTestException(msg=msg))
    -
    -            # if logging isn't set up, I still really want this message.
    -            print(msg)
    -            traceback.print_exc()
    -
    -            raise
    -
    -        self._created_schemas = set()
    -        reset_deprecations()
    -        template_cache.clear()
    -
    -        self.use_profile(self._pick_profile())
    -        self.use_default_project()
    -        self.set_packages()
    -        self.set_selectors()
    -        self.load_config()
    -
    -    def use_default_project(self, overrides=None):
    -        # create a dbt_project.yml
    -        base_project_config = {
    -            'name': 'test',
    -            'version': '1.0',
    -            'config-version': 2,
    -            'test-paths': [],
    -            'model-paths': [self.models],
    -            'profile': 'test',
    -        }
    -
    -        project_config = {}
    -        project_config.update(base_project_config)
    -        project_config.update(self.project_config)
    -        project_config.update(overrides or {})
    -
    -        with open("dbt_project.yml", 'w') as f:
    -            yaml.safe_dump(project_config, f, default_flow_style=True)
    -
    -    def use_profile(self, adapter_type):
    -        self.adapter_type = adapter_type
    -
    -        profile_config = {}
    -        default_profile_config = self.get_profile(adapter_type)
    -
    -        profile_config.update(default_profile_config)
    -        profile_config.update(self.profile_config)
    -
    -        if not os.path.exists(self.test_root_dir):
    -            os.makedirs(self.test_root_dir)
    -
    -        flags.PROFILES_DIR = self.test_root_dir
    -        profiles_path = os.path.join(self.test_root_dir, 'profiles.yml')
    -        with open(profiles_path, 'w') as f:
    -            yaml.safe_dump(profile_config, f, default_flow_style=True)
    -        self._profile_config = profile_config
    -
    -    def set_packages(self):
    -        if self.packages_config is not None:
    -            with open('packages.yml', 'w') as f:
    -                yaml.safe_dump(self.packages_config, f, default_flow_style=True)
    -
    -    def set_selectors(self):
    -        if self.selectors_config is not None:
    -            with open('selectors.yml', 'w') as f:
    -                yaml.safe_dump(self.selectors_config, f, default_flow_style=True)
    -
    -    def load_config(self):
    -        # we've written our profile and project. Now we want to instantiate a
    -        # fresh adapter for the tests.
    -        # it's important to use a different connection handle here so
    -        # we don't look into an incomplete transaction
    -        kwargs = {
    -            'profile': None,
    -            'profiles_dir': self.test_root_dir,
    -            'target': None,
    -        }
    -
    -        config = RuntimeConfig.from_args(TestArgs(kwargs))
    -
    -        register_adapter(config)
    -        adapter = get_adapter(config)
    -        adapter.cleanup_connections()
    -        self.adapter_type = adapter.type()
    -        self.adapter = adapter
    -        self.config = config
    -
    -        self._drop_schemas()
    -        self._create_schemas()
    -
    -    def quote_as_configured(self, value, quote_key):
    -        return self.adapter.quote_as_configured(value, quote_key)
    -
    -    def tearDown(self):
    -        # get any current run adapter and clean up its connections before we
    -        # reset them. It'll probably be different from ours because
    -        # handle_and_check() calls reset_adapters().
    -        register_adapter(self.config)
    -        adapter = get_adapter(self.config)
    -        if adapter is not self.adapter:
    -            adapter.cleanup_connections()
    -        if not hasattr(self, 'adapter'):
    -            self.adapter = adapter
    -
    -        self._drop_schemas()
    -
    -        self.adapter.cleanup_connections()
    -        reset_adapters()
    -        os.chdir(INITIAL_ROOT)
    -        try:
    -            shutil.rmtree(self.test_root_dir)
    -        except EnvironmentError:
    -            msg = f"Could not clean up after test - {self.test_root_dir} not removable"
    -            fire_event(IntegrationTestException(msg=msg))
    -        
    -        cleanup_event_logger()
    -
    -    def _get_schema_fqn(self, database, schema):
    -        schema_fqn = self.quote_as_configured(schema, 'schema')
    -        if self.adapter_type == 'snowflake':
    -            database = self.quote_as_configured(database, 'database')
    -            schema_fqn = '{}.{}'.format(database, schema_fqn)
    -        return schema_fqn
    -
    -    def _create_schema_named(self, database, schema):
    -        schema_fqn = self._get_schema_fqn(database, schema)
    -        self.run_sql(self.CREATE_SCHEMA_STATEMENT.format(schema_fqn))
    -        self._created_schemas.add(schema_fqn)
    -
    -    def _drop_schema_named(self, database, schema):
    -        if self.adapter_type == 'presto':
    -            relation = self.adapter.Relation.create(database=database, schema=schema)
    -            self.adapter.drop_schema(relation)
    -        else:
    -            schema_fqn = self._get_schema_fqn(database, schema)
    -            self.run_sql(self.DROP_SCHEMA_STATEMENT.format(schema_fqn))
    -
    -    def _create_schemas(self):
    -        schema = self.unique_schema()
    -        with self.adapter.connection_named('__test'):
    -            self._create_schema_named(self.default_database, schema)
    -            if self.setup_alternate_db and self.adapter_type == 'snowflake':
    -                self._create_schema_named(self.alternative_database, schema)
    -
    -    def _drop_schemas_adapter(self):
    -        schema = self.unique_schema()
    -        if self.adapter_type == 'presto':
    -            self._drop_schema_named(self.default_database, schema)
    -            if self.setup_alternate_db and self.alternative_database:
    -                self._drop_schema_named(self.alternative_database, schema)
    -
    -    def _drop_schemas_sql(self):
    -        schema = self.unique_schema()
    -        # we always want to drop these if necessary, we'll clear it soon.
    -        self._created_schemas.add(
    -            self._get_schema_fqn(self.default_database, schema)
    -        )
    -        # on postgres, this will make you sad
    -        drop_alternative = (
    -            self.setup_alternate_db and
    -            self.adapter_type not in {'postgres'} and
    -            self.alternative_database
    -        )
    -        if drop_alternative:
    -            self._created_schemas.add(
    -                self._get_schema_fqn(self.alternative_database, schema)
    -            )
    -
    -        for schema_fqn in self._created_schemas:
    -            self.run_sql(self.DROP_SCHEMA_STATEMENT.format(schema_fqn))
    -
    -        self._created_schemas.clear()
    -
    -    def _drop_schemas(self):
    -        with self.adapter.connection_named('__test'):
    -            if self.adapter_type == 'presto':
    -                self._drop_schemas_adapter()
    -            else:
    -                self._drop_schemas_sql()
    -
    -    @property
    -    def project_config(self):
    -        return {
    -            'config-version': 2,
    -        }
    -
    -    @property
    -    def profile_config(self):
    -        return {}
    -
    -    def run_dbt(self, args=None, expect_pass=True, profiles_dir=True):
    -        res, success = self.run_dbt_and_check(args=args, profiles_dir=profiles_dir)
    -        self.assertEqual(
    -            success, expect_pass,
    -            "dbt exit state did not match expected")
    -
    -        return res
    -
    -    def run_dbt_and_capture(self, *args, **kwargs):
    -        try:
    -            stringbuf = StringIO()
    -            capture_stdout_logs(stringbuf)
    -            res = self.run_dbt(*args, **kwargs)
    -            stdout = stringbuf.getvalue()
    -
    -        finally:
    -            stop_capture_stdout_logs()
    -
    -        return res, stdout
    -
    -    def run_dbt_and_check(self, args=None, profiles_dir=True):
    -        log_manager.reset_handlers()
    -        if args is None:
    -            args = ["run"]
    -
    -        final_args = []
    -
    -        if os.getenv('DBT_TEST_SINGLE_THREADED') in ('y', 'Y', '1'):
    -            final_args.append('--single-threaded')
    -
    -        final_args.extend(args)
    -
    -        if profiles_dir:
    -            final_args.extend(['--profiles-dir', self.test_root_dir])
    -        final_args.append('--log-cache-events')
    -        # msg = f"Invoking dbt with {final_args}"
    -        # fire_event(IntegrationTestInfo(msg=msg))
    -        return dbt.handle_and_check(final_args)
    -
    -    def run_sql_file(self, path, kwargs=None):
    -        with open(path, 'r') as f:
    -            statements = f.read().split(";")
    -            for statement in statements:
    -                self.run_sql(statement, kwargs=kwargs)
    -
    -    # horrible hack to support snowflake for right now
    -    def transform_sql(self, query, kwargs=None):
    -        to_return = query
    -
    -        if self.adapter_type == 'snowflake':
    -            to_return = to_return.replace("BIGSERIAL", "BIGINT AUTOINCREMENT")
    -
    -        base_kwargs = {
    -            'schema': self.unique_schema(),
    -            'database': self.adapter.quote(self.default_database),
    -        }
    -        if kwargs is None:
    -            kwargs = {}
    -        base_kwargs.update(kwargs)
    -
    -        to_return = to_return.format(**base_kwargs)
    -
    -        return to_return
    -
    -    def run_sql_presto(self, sql, fetch, conn):
    -        cursor = conn.handle.cursor()
    -        try:
    -            cursor.execute(sql)
    -            if fetch == 'one':
    -                return cursor.fetchall()[0]
    -            elif fetch == 'all':
    -                return cursor.fetchall()
    -            else:
    -                # we have to fetch.
    -                cursor.fetchall()
    -        except Exception as e:
    -            conn.handle.rollback()
    -            conn.transaction_open = False
    -            print(sql)
    -            print(e)
    -            raise
    -        else:
    -            conn.handle.commit()
    -            conn.transaction_open = False
    -
    -    def run_sql_common(self, sql, fetch, conn):
    -        with conn.handle.cursor() as cursor:
    -            try:
    -                cursor.execute(sql)
    -                conn.handle.commit()
    -                if fetch == 'one':
    -                    return cursor.fetchone()
    -                elif fetch == 'all':
    -                    return cursor.fetchall()
    -                else:
    -                    return
    -            except BaseException as e:
    -                if conn.handle and not getattr(conn.handle, 'closed', True):
    -                    conn.handle.rollback()
    -                print(sql)
    -                print(e)
    -                raise
    -            finally:
    -                conn.transaction_open = False
    -
    -    def run_sql(self, query, fetch='None', kwargs=None, connection_name=None):
    -        if connection_name is None:
    -            connection_name = '__test'
    -
    -        if query.strip() == "":
    -            return
    -
    -        sql = self.transform_sql(query, kwargs=kwargs)
    -
    -        with self.get_connection(connection_name) as conn:
    -            msg = f'test connection "{conn.name}" executing: {sql}'
    -            fire_event(IntegrationTestDebug(msg=msg))
    -            if self.adapter_type == 'presto':
    -                return self.run_sql_presto(sql, fetch, conn)
    -            else:
    -                return self.run_sql_common(sql, fetch, conn)
    -
    -    def _ilike(self, target, value):
    -        # presto has this regex substitution monstrosity instead of 'ilike'
    -        if self.adapter_type == 'presto':
    -            return r"regexp_like({}, '(?i)\A{}\Z')".format(target, value)
    -        else:
    -            return "{} ilike '{}'".format(target, value)
    -
    -    def get_many_table_columns_snowflake(self, tables, schema, database=None):
    -        tables = set(tables)
    -        if database is None:
    -            database = self.default_database
    -        sql = 'show columns in schema {database}.{schema}'.format(
    -            database=self.quote_as_configured(database, 'database'),
    -            schema=self.quote_as_configured(schema, 'schema')
    -        )
    -        # assumption: this will be much  faster than doing one query/table
    -        # because in tests, we'll want most of our tables most of the time.
    -        columns = self.run_sql(sql, fetch='all')
    -        results = []
    -        for column in columns:
    -            table_name, _, column_name, json_data_type = column[:4]
    -            character_maximum_length = None
    -            if table_name in tables:
    -                typeinfo = json.loads(json_data_type)
    -                data_type = typeinfo['type']
    -                if data_type == 'TEXT':
    -                    character_maximum_length = max(typeinfo['length'], 16777216)
    -                results.append((table_name, column_name, data_type, character_maximum_length))
    -        return results
    -
    -    def get_many_table_columns_information_schema(self, tables, schema, database=None):
    -        if self.adapter_type == 'presto':
    -            columns = 'table_name, column_name, data_type'
    -        else:
    -            columns = 'table_name, column_name, data_type, character_maximum_length'
    -
    -        sql = """
    -                select {columns}
    -                from {db_string}information_schema.columns
    -                where {schema_filter}
    -                  and ({table_filter})
    -                order by column_name asc"""
    -
    -        db_string = ''
    -        if database:
    -            db_string = self.quote_as_configured(database, 'database') + '.'
    -
    -        table_filters_s = " OR ".join(
    -            self._ilike('table_name', table.replace('"', ''))
    -            for table in tables
    -        )
    -        schema_filter = self._ilike('table_schema', schema)
    -
    -        sql = sql.format(
    -                columns=columns,
    -                schema_filter=schema_filter,
    -                table_filter=table_filters_s,
    -                db_string=db_string)
    -
    -        columns = self.run_sql(sql, fetch='all')
    -        return list(map(self.filter_many_columns, columns))
    -
    -    def get_many_table_columns(self, tables, schema, database=None):
    -        if self.adapter_type == 'snowflake':
    -            result = self.get_many_table_columns_snowflake(tables, schema, database)
    -        else:
    -            result = self.get_many_table_columns_information_schema(tables, schema, database)
    -        result.sort(key=lambda x: '{}.{}'.format(x[0], x[1]))
    -        return result
    -
    -    def filter_many_columns(self, column):
    -        if len(column) == 3:
    -            table_name, column_name, data_type = column
    -            char_size = None
    -        else:
    -            table_name, column_name, data_type, char_size = column
    -        # in snowflake, all varchar widths are created equal
    -        if self.adapter_type == 'snowflake':
    -            if char_size and char_size < 16777216:
    -                char_size = 16777216
    -        return (table_name, column_name, data_type, char_size)
    -
    -    @contextmanager
    -    def get_connection(self, name=None):
    -        """Create a test connection context where all executed macros, etc will
    -        get self.adapter as the adapter.
    -
    -        This allows tests to run normal adapter macros as if reset_adapters()
    -        were not called by handle_and_check (for asserts, etc)
    -        """
    -        if name is None:
    -            name = '__test'
    -        with patch.object(providers, 'get_adapter', return_value=self.adapter):
    -            with self.adapter.connection_named(name):
    -                conn = self.adapter.connections.get_thread_connection()
    -                yield conn
    -
    -    def get_relation_columns(self, relation):
    -        with self.get_connection():
    -            columns = self.adapter.get_columns_in_relation(relation)
    -
    -        return sorted(((c.name, c.dtype, c.char_size) for c in columns),
    -                      key=lambda x: x[0])
    -
    -    def get_table_columns(self, table, schema=None, database=None):
    -        schema = self.unique_schema() if schema is None else schema
    -        database = self.default_database if database is None else database
    -        relation = self.adapter.Relation.create(
    -            database=database,
    -            schema=schema,
    -            identifier=table,
    -            type='table',
    -            quote_policy=self.config.quoting
    -        )
    -        return self.get_relation_columns(relation)
    -
    -    def get_table_columns_as_dict(self, tables, schema=None):
    -        col_matrix = self.get_many_table_columns(tables, schema)
    -        res = {}
    -        for row in col_matrix:
    -            table_name = row[0]
    -            col_def = row[1:]
    -            if table_name not in res:
    -                res[table_name] = []
    -            res[table_name].append(col_def)
    -        return res
    -
    -    def get_models_in_schema_snowflake(self, schema):
    -        sql = 'show objects in schema {}.{}'.format(
    -            self.quote_as_configured(self.default_database, 'database'),
    -            self.quote_as_configured(schema, 'schema')
    -        )
    -        results = {}
    -        for row in self.run_sql(sql, fetch='all'):
    -            # I sure hope these never change!
    -            name = row[1]
    -            kind = row[4]
    -
    -            if kind == 'TABLE':
    -                kind = 'table'
    -            elif kind == 'VIEW':
    -                kind = 'view'
    -
    -            results[name] = kind
    -        return results
    -
    -    def get_models_in_schema(self, schema=None):
    -        schema = self.unique_schema() if schema is None else schema
    -        if self.adapter_type == 'snowflake':
    -            return self.get_models_in_schema_snowflake(schema)
    -
    -        sql = """
    -                select table_name,
    -                        case when table_type = 'BASE TABLE' then 'table'
    -                             when table_type = 'VIEW' then 'view'
    -                             else table_type
    -                        end as materialization
    -                from information_schema.tables
    -                where {}
    -                order by table_name
    -                """
    -
    -        sql = sql.format(self._ilike('table_schema', schema))
    -        result = self.run_sql(sql, fetch='all')
    -
    -        return {model_name: materialization for (model_name, materialization) in result}
    -
    -    def _assertTablesEqualSql(self, relation_a, relation_b, columns=None):
    -        if columns is None:
    -            columns = self.get_relation_columns(relation_a)
    -        column_names = [c[0] for c in columns]
    -
    -        sql = self.adapter.get_rows_different_sql(
    -            relation_a, relation_b, column_names
    -        )
    -
    -        return sql
    -
    -    def assertTablesEqual(self, table_a, table_b,
    -                          table_a_schema=None, table_b_schema=None,
    -                          table_a_db=None, table_b_db=None):
    -        if table_a_schema is None:
    -            table_a_schema = self.unique_schema()
    -
    -        if table_b_schema is None:
    -            table_b_schema = self.unique_schema()
    -
    -        if table_a_db is None:
    -            table_a_db = self.default_database
    -
    -        if table_b_db is None:
    -            table_b_db = self.default_database
    -
    -        relation_a = self._make_relation(table_a, table_a_schema, table_a_db)
    -        relation_b = self._make_relation(table_b, table_b_schema, table_b_db)
    -
    -        self._assertTableColumnsEqual(relation_a, relation_b)
    -
    -        sql = self._assertTablesEqualSql(relation_a, relation_b)
    -        result = self.run_sql(sql, fetch='one')
    -
    -        self.assertEqual(
    -            result[0],
    -            0,
    -            'row_count_difference nonzero: ' + sql
    -        )
    -        self.assertEqual(
    -            result[1],
    -            0,
    -            'num_mismatched nonzero: ' + sql
    -        )
    -
    -    def _make_relation(self, identifier, schema=None, database=None):
    -        if schema is None:
    -            schema = self.unique_schema()
    -        if database is None:
    -            database = self.default_database
    -        return self.adapter.Relation.create(
    -            database=database,
    -            schema=schema,
    -            identifier=identifier,
    -            quote_policy=self.config.quoting
    -        )
    -
    -    def get_many_relation_columns(self, relations):
    -        """Returns a dict of (datbase, schema) -> (dict of (table_name -> list of columns))
    -        """
    -        schema_fqns = {}
    -        for rel in relations:
    -            this_schema = schema_fqns.setdefault((rel.database, rel.schema), [])
    -            this_schema.append(rel.identifier)
    -
    -        column_specs = {}
    -        for key, tables in schema_fqns.items():
    -            database, schema = key
    -            columns = self.get_many_table_columns(tables, schema, database=database)
    -            table_columns = {}
    -            for col in columns:
    -                table_columns.setdefault(col[0], []).append(col[1:])
    -            for rel_name, columns in table_columns.items():
    -                key = (database, schema, rel_name)
    -                column_specs[key] = columns
    -
    -        return column_specs
    -
    -    def assertManyRelationsEqual(self, relations, default_schema=None, default_database=None):
    -        if default_schema is None:
    -            default_schema = self.unique_schema()
    -        if default_database is None:
    -            default_database = self.default_database
    -
    -        specs = []
    -        for relation in relations:
    -            if not isinstance(relation, (tuple, list)):
    -                relation = [relation]
    -
    -            assert len(relation) <= 3
    -
    -            if len(relation) == 3:
    -                relation = self._make_relation(*relation)
    -            elif len(relation) == 2:
    -                relation = self._make_relation(relation[0], relation[1], default_database)
    -            elif len(relation) == 1:
    -                relation = self._make_relation(relation[0], default_schema, default_database)
    -            else:
    -                raise ValueError('relation must be a sequence of 1, 2, or 3 values')
    -
    -            specs.append(relation)
    -
    -        with self.get_connection():
    -            column_specs = self.get_many_relation_columns(specs)
    -
    -        # make sure everyone has equal column definitions
    -        first_columns = None
    -        for relation in specs:
    -            key = (relation.database, relation.schema, relation.identifier)
    -            # get a good error here instead of a hard-to-diagnose KeyError
    -            self.assertIn(key, column_specs, f'No columns found for {key}')
    -            columns = column_specs[key]
    -            if first_columns is None:
    -                first_columns = columns
    -            else:
    -                self.assertEqual(
    -                    first_columns, columns,
    -                    '{} did not match {}'.format(str(specs[0]), str(relation))
    -                )
    -
    -        # make sure everyone has the same data. if we got here, everyone had
    -        # the same column specs!
    -        first_relation = None
    -        for relation in specs:
    -            if first_relation is None:
    -                first_relation = relation
    -            else:
    -                sql = self._assertTablesEqualSql(first_relation, relation,
    -                                                 columns=first_columns)
    -                result = self.run_sql(sql, fetch='one')
    -
    -                self.assertEqual(
    -                    result[0],
    -                    0,
    -                    'row_count_difference nonzero: ' + sql
    -                )
    -                self.assertEqual(
    -                    result[1],
    -                    0,
    -                    'num_mismatched nonzero: ' + sql
    -                )
    -
    -    def assertManyTablesEqual(self, *args):
    -        schema = self.unique_schema()
    -
    -        all_tables = []
    -        for table_equivalencies in args:
    -            all_tables += list(table_equivalencies)
    -
    -        all_cols = self.get_table_columns_as_dict(all_tables, schema)
    -
    -        for table_equivalencies in args:
    -            first_table = table_equivalencies[0]
    -            first_relation = self._make_relation(first_table)
    -
    -            # assert that all tables have the same columns
    -            base_result = all_cols[first_table]
    -            self.assertTrue(len(base_result) > 0)
    -
    -            for other_table in table_equivalencies[1:]:
    -                other_result = all_cols[other_table]
    -                self.assertTrue(len(other_result) > 0)
    -                self.assertEqual(base_result, other_result)
    -
    -                other_relation = self._make_relation(other_table)
    -                sql = self._assertTablesEqualSql(first_relation,
    -                                                 other_relation,
    -                                                 columns=base_result)
    -                result = self.run_sql(sql, fetch='one')
    -
    -                self.assertEqual(
    -                    result[0],
    -                    0,
    -                    'row_count_difference nonzero: ' + sql
    -                )
    -                self.assertEqual(
    -                    result[1],
    -                    0,
    -                    'num_mismatched nonzero: ' + sql
    -                )
    -
    -    def _assertTableRowCountsEqual(self, relation_a, relation_b):
    -        cmp_query = """
    -            with table_a as (
    -
    -                select count(*) as num_rows from {}
    -
    -            ), table_b as (
    -
    -                select count(*) as num_rows from {}
    -
    -            )
    -
    -            select table_a.num_rows - table_b.num_rows as difference
    -            from table_a, table_b
    -
    -        """.format(str(relation_a), str(relation_b))
    -
    -        res = self.run_sql(cmp_query, fetch='one')
    -
    -        self.assertEqual(int(res[0]), 0, "Row count of table {} doesn't match row count of table {}. ({} rows different)".format(
    -                relation_a.identifier,
    -                relation_b.identifier,
    -                res[0]
    -            )
    -        )
    -
    -    def assertTableDoesNotExist(self, table, schema=None, database=None):
    -        columns = self.get_table_columns(table, schema, database)
    -
    -        self.assertEqual(
    -            len(columns),
    -            0
    -        )
    -
    -    def assertTableDoesExist(self, table, schema=None, database=None):
    -        columns = self.get_table_columns(table, schema, database)
    -
    -        self.assertGreater(
    -            len(columns),
    -            0
    -        )
    -
    -    def _assertTableColumnsEqual(self, relation_a, relation_b):
    -        table_a_result = self.get_relation_columns(relation_a)
    -        table_b_result = self.get_relation_columns(relation_b)
    -
    -        text_types = {'text', 'character varying', 'character', 'varchar'}
    -
    -        self.assertEqual(len(table_a_result), len(table_b_result))
    -        for a_column, b_column in zip(table_a_result, table_b_result):
    -            a_name, a_type, a_size = a_column
    -            b_name, b_type, b_size = b_column
    -            self.assertEqual(a_name, b_name,
    -                '{} vs {}: column "{}" != "{}"'.format(
    -                    relation_a, relation_b, a_name, b_name
    -                ))
    -
    -            self.assertEqual(a_type, b_type,
    -                '{} vs {}: column "{}" has type "{}" != "{}"'.format(
    -                    relation_a, relation_b, a_name, a_type, b_type
    -                ))
    -
    -            if self.adapter_type == 'presto' and None in (a_size, b_size):
    -                # None is compatible with any size
    -                continue
    -
    -            self.assertEqual(a_size, b_size,
    -                '{} vs {}: column "{}" has size "{}" != "{}"'.format(
    -                    relation_a, relation_b, a_name, a_size, b_size
    -                ))
    -
    -    def assertEquals(self, *args, **kwargs):
    -        # assertEquals is deprecated. This makes the warnings less chatty
    -        self.assertEqual(*args, **kwargs)
    -
    -    def assertBetween(self, timestr, start, end=None):
    -        datefmt = '%Y-%m-%dT%H:%M:%S.%fZ'
    -        if end is None:
    -            end = datetime.utcnow()
    -
    -        parsed = datetime.strptime(timestr, datefmt)
    -
    -        self.assertLessEqual(start, parsed,
    -            'parsed date {} happened before {}'.format(
    -                parsed,
    -                start.strftime(datefmt))
    -        )
    -        self.assertGreaterEqual(end, parsed,
    -            'parsed date {} happened after {}'.format(
    -                parsed,
    -                end.strftime(datefmt))
    -        )
    -
    -    def copy_file(self, src, dest) -> None:
    -        # move files in the temp testing dir created
    -        shutil.copyfile(
    -            os.path.join(self.test_root_dir, src),
    -            os.path.join(self.test_root_dir, dest),
    -        )
    -
    -    def rm_file(self, src) -> None:
    -        os.remove(os.path.join(self.test_root_dir, src))
    -
    -
    -def use_profile(profile_name):
    -    """A decorator to declare a test method as using a particular profile.
    -    Handles both setting the nose attr and calling self.use_profile.
    -
    -    Use like this:
    -
    -    class TestSomething(DBIntegrationTest):
    -        @use_profile('postgres')
    -        def test_postgres_thing(self):
    -            self.assertEqual(self.adapter_type, 'postgres')
    -
    -        @use_profile('snowflake')
    -        def test_snowflake_thing(self):
    -            self.assertEqual(self.adapter_type, 'snowflake')
    -    """
    -    def outer(wrapped):
    -        @getattr(pytest.mark, 'profile_'+profile_name)
    -        @wraps(wrapped)
    -        def func(self, *args, **kwargs):
    -            return wrapped(self, *args, **kwargs)
    -        # sanity check at import time
    -        assert _profile_from_test_name(wrapped.__name__) == profile_name
    -        return func
    -    return outer
    -
    -
    -class AnyFloat:
    -    """Any float. Use this in assertEqual() calls to assert that it is a float.
    -    """
    -    def __eq__(self, other):
    -        return isinstance(other, float)
    -
    -
    -class AnyString:
    -    """Any string. Use this in assertEqual() calls to assert that it is a string.
    -    """
    -    def __eq__(self, other):
    -        return isinstance(other, str)
    -
    -
    -class AnyStringWith:
    -    def __init__(self, contains=None):
    -        self.contains = contains
    -
    -    def __eq__(self, other):
    -        if not isinstance(other, str):
    -            return False
    -
    -        if self.contains is None:
    -            return True
    -
    -        return self.contains in other
    -
    -    def __repr__(self):
    -        return 'AnyStringWith<{!r}>'.format(self.contains)
    -
    -
    -def get_manifest():
    -    path = './target/partial_parse.msgpack'
    -    if os.path.exists(path):
    -        with open(path, 'rb') as fp:
    -            manifest_mp = fp.read()
    -        manifest: Manifest = Manifest.from_msgpack(manifest_mp)
    -        return manifest
    -    else:
    -        return None
    diff --git a/tests/functional/custom_target_path/test_custom_target_path.py b/tests/functional/custom_target_path/test_custom_target_path.py
    new file mode 100644
    index 00000000000..4c1085f4468
    --- /dev/null
    +++ b/tests/functional/custom_target_path/test_custom_target_path.py
    @@ -0,0 +1,35 @@
    +from pathlib import Path
    +
    +import pytest
    +
    +from dbt.tests.util import run_dbt
    +
    +
    +class TestTargetPathConfig:
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {"config-version": 2, "target-path": "project_target"}
    +
    +    def test_target_path(self, project):
    +        run_dbt(["run"])
    +        assert Path("project_target").is_dir()
    +        assert not Path("target").is_dir()
    +
    +
    +class TestTargetPathEnvVar:
    +    def test_target_path(self, project, monkeypatch):
    +        monkeypatch.setenv("DBT_TARGET_PATH", "env_target")
    +        run_dbt(["run"])
    +        assert Path("env_target").is_dir()
    +        assert not Path("project_target").is_dir()
    +        assert not Path("target").is_dir()
    +
    +
    +class TestTargetPathCliArg:
    +    def test_target_path(self, project, monkeypatch):
    +        monkeypatch.setenv("DBT_TARGET_PATH", "env_target")
    +        run_dbt(["run", "--target-path", "cli_target"])
    +        assert Path("cli_target").is_dir()
    +        assert not Path("env_target").is_dir()
    +        assert not Path("project_target").is_dir()
    +        assert not Path("target").is_dir()
    diff --git a/tox.ini b/tox.ini
    index 53187161c7f..e0f96bf3eda 100644
    --- a/tox.ini
    +++ b/tox.ini
    @@ -25,7 +25,6 @@ passenv =
       POSTGRES_TEST_*
       PYTEST_ADDOPTS
     commands =
    -  {envpython} -m pytest --cov=core -m profile_postgres {posargs} test/integration
       {envpython} -m pytest --cov=core {posargs} tests/functional
       {envpython} -m pytest --cov=core {posargs} tests/adapter
     
    
    From 622e5fd71deed6bf71f2db37f3dd18eb10575110 Mon Sep 17 00:00:00 2001
    From: Emily Rockman 
    Date: Tue, 31 Jan 2023 13:05:13 -0600
    Subject: [PATCH 146/156] fix contributor list generation (#6799)
    
    ---
     .changie.yaml | 22 ++++++++++++++--------
     1 file changed, 14 insertions(+), 8 deletions(-)
    
    diff --git a/.changie.yaml b/.changie.yaml
    index dbbb43daf31..99227bbbf0e 100644
    --- a/.changie.yaml
    +++ b/.changie.yaml
    @@ -88,7 +88,7 @@ custom:
     footerFormat: |
       {{- $contributorDict := dict }}
       {{- /* any names added to this list should be all lowercase for later matching purposes */}}
    -  {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" "nssalian" }}
    +  {{- $core_team := list "michelleark" "peterallenwebb" "emmyoop" "nathaniel-may" "gshank" "leahwicz" "chenyulinx" "stu-k" "iknox-fa" "versusfacit" "mcknight-42" "jtcohen6" "aranke" "dependabot[bot]" "snyk-bot" "colin-rogers-dbt" }}
       {{- range $change := .Changes }}
         {{- $authorList := splitList " " $change.Custom.Author }}
         {{- /* loop through all authors for a single changelog */}}
    @@ -97,22 +97,28 @@ footerFormat: |
           {{- /* we only want to include non-core team contributors */}}
           {{- if not (has $authorLower $core_team)}}
             {{- $changeList := splitList " " $change.Custom.Author }}
    -          {{- /* Docs kind link back to dbt-docs instead of dbt-core issues */}}
    +          {{- $IssueList := list }}
               {{- $changeLink := $change.Kind }}
               {{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }}
    -            {{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $change.Custom.PR }}
    -          {{- else if eq $change.Kind "Docs"}}
    -            {{- $changeLink = "[dbt-docs/#nbr](https://github.com/dbt-labs/dbt-docs/issues/nbr)" | replace "nbr" $change.Custom.Issue }}
    +            {{- $changes := splitList " " $change.Custom.PR }}
    +            {{- range $issueNbr := $changes }}
    +              {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/pull/nbr)" | replace "nbr" $issueNbr }}
    +              {{- $IssueList = append $IssueList $changeLink  }}
    +            {{- end -}}
               {{- else }}
    -            {{- $changeLink = "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $change.Custom.Issue }}
    +            {{- $changes := splitList " " $change.Custom.Issue }}
    +            {{- range $issueNbr := $changes }}
    +              {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-core/issues/nbr)" | replace "nbr" $issueNbr }}
    +              {{- $IssueList = append $IssueList $changeLink  }}
    +            {{- end -}}
               {{- end }}
               {{- /* check if this contributor has other changes associated with them already */}}
               {{- if hasKey $contributorDict $author }}
                 {{- $contributionList := get $contributorDict $author }}
    -            {{- $contributionList = append $contributionList $changeLink  }}
    +            {{- $contributionList = concat $contributionList $IssueList  }}
                 {{- $contributorDict := set $contributorDict $author $contributionList }}
               {{- else }}
    -            {{- $contributionList := list $changeLink }}
    +            {{- $contributionList := $IssueList }}
                 {{- $contributorDict := set $contributorDict $author $contributionList }}
               {{- end }}
             {{- end}}
    
    From 42b7caae1977511e4dff1570cf69adb5c1ff5468 Mon Sep 17 00:00:00 2001
    From: Mila Page <67295367+VersusFacit@users.noreply.github.com>
    Date: Tue, 31 Jan 2023 12:58:19 -0800
    Subject: [PATCH 147/156] Ct 1827/064 column comments tests conversion (#6766)
    
    * Convert test and make it a bit more pytest-onic
    
    * Ax old integration test.
    
    * Run black on test conversion
    
    * I didn't like how pytest was running the fixture so wrapped it into a closure.
    
    * Merge converted test into persist docs.
    
    * Move persist docs tests to the adapter zone. Prep for adapter tests.
    
    * Fix up test names
    
    * Fix name to be less confusing.
    
    ---------
    
    Co-authored-by: Mila Page 
    ---
     .../tests/adapter/persist_docs}/fixtures.py   |  0
     .../persist_docs}/test_persist_docs.py        | 33 +++++++++++++------
     2 files changed, 23 insertions(+), 10 deletions(-)
     rename tests/{functional/persist_docs_tests => adapter/dbt/tests/adapter/persist_docs}/fixtures.py (100%)
     rename tests/{functional/persist_docs_tests => adapter/dbt/tests/adapter/persist_docs}/test_persist_docs.py (89%)
    
    diff --git a/tests/functional/persist_docs_tests/fixtures.py b/tests/adapter/dbt/tests/adapter/persist_docs/fixtures.py
    similarity index 100%
    rename from tests/functional/persist_docs_tests/fixtures.py
    rename to tests/adapter/dbt/tests/adapter/persist_docs/fixtures.py
    diff --git a/tests/functional/persist_docs_tests/test_persist_docs.py b/tests/adapter/dbt/tests/adapter/persist_docs/test_persist_docs.py
    similarity index 89%
    rename from tests/functional/persist_docs_tests/test_persist_docs.py
    rename to tests/adapter/dbt/tests/adapter/persist_docs/test_persist_docs.py
    index 7ca5dcfabe8..99c0ef746f9 100644
    --- a/tests/functional/persist_docs_tests/test_persist_docs.py
    +++ b/tests/adapter/dbt/tests/adapter/persist_docs/test_persist_docs.py
    @@ -2,11 +2,9 @@
     import os
     import pytest
     
    -from dbt.tests.util import (
    -    run_dbt,
    -)
    +from dbt.tests.util import run_dbt
     
    -from tests.functional.persist_docs_tests.fixtures import (
    +from dbt.tests.adapter.persist_docs.fixtures import (
         _DOCS__MY_FUN_DOCS,
         _MODELS__MISSING_COLUMN,
         _MODELS__MODEL_USING_QUOTE_UTIL,
    @@ -20,7 +18,7 @@
     )
     
     
    -class BasePersistDocsTest:
    +class BasePersistDocsBase:
         @pytest.fixture(scope="class", autouse=True)
         def setUp(self, project):
             run_dbt(["seed"])
    @@ -90,7 +88,7 @@ def _assert_has_view_comments(
             assert view_name_comment is None
     
     
    -class TestPersistDocs(BasePersistDocsTest):
    +class BasePersistDocs(BasePersistDocsBase):
         @pytest.fixture(scope="class")
         def project_config_update(self):
             return {
    @@ -120,7 +118,7 @@ def test_has_comments_pglike(self, project):
             self._assert_has_view_comments(no_docs_node, False, False)
     
     
    -class TestPersistDocsColumnMissing(BasePersistDocsTest):
    +class BasePersistDocsColumnMissing(BasePersistDocsBase):
         @pytest.fixture(scope="class")
         def project_config_update(self):
             return {
    @@ -141,7 +139,7 @@ def models(self):
         def properties(self):
             return {"schema.yml": _PROPERITES__SCHEMA_MISSING_COL}
     
    -    def test_postgres_missing_column(self, project):
    +    def test_missing_column(self, project):
             run_dbt(["docs", "generate"])
             with open("target/catalog.json") as fp:
                 catalog_data = json.load(fp)
    @@ -152,7 +150,10 @@ def test_postgres_missing_column(self, project):
             assert table_id_comment.startswith("test id column description")
     
     
    -class TestPersistDocsColumnComment:
    +class BasePersistDocsCommentOnQuotedColumn:
    +    """Covers edge case where column with comment must be quoted.
    +    We set this using the `quote:` tag in the property file."""
    +
         @pytest.fixture(scope="class")
         def models(self):
             return {"quote_model.sql": _MODELS__MODEL_USING_QUOTE_UTIL}
    @@ -190,5 +191,17 @@ def fixt():
     
             return fixt
     
    -    def test_postgres_comments(self, run_has_comments):
    +    def test_quoted_column_comments(self, run_has_comments):
             run_has_comments()
    +
    +
    +class TestPersistDocs(BasePersistDocs):
    +    pass
    +
    +
    +class TestPersistDocsColumnMissing(BasePersistDocsColumnMissing):
    +    pass
    +
    +
    +class TestPersistDocsCommentOnQuotedColumn(BasePersistDocsCommentOnQuotedColumn):
    +    pass
    
    From 1a6e4a00c7be2ea88492192de51d190a46c19561 Mon Sep 17 00:00:00 2001
    From: Mila Page <67295367+VersusFacit@users.noreply.github.com>
    Date: Tue, 31 Jan 2023 13:53:55 -0800
    Subject: [PATCH 148/156] Add clearer directions for custom test suite vars in
     Makefile. (#6764)
    
    * Add clearer directions for custom test suite vars in Makefile.
    
    * Fix up PR for review
    
    * Fix erroneous whitespace.
    
    * Fix a spelling error.
    
    * Add documentation to discourage makefile edits but provide override tooling.
    
    * Fix quotation marks. Very strange behavior
    
    * Compact code and verify quotations happy inside bash and python.
    
    * Fold comments into Makefile.
    
    ---------
    
    Co-authored-by: Mila Page 
    ---
     .../unreleased/Features-20230126-154716.yaml  |  6 ++++
     .gitignore                                    |  1 +
     Makefile                                      | 34 ++++++++++++-------
     3 files changed, 28 insertions(+), 13 deletions(-)
     create mode 100644 .changes/unreleased/Features-20230126-154716.yaml
    
    diff --git a/.changes/unreleased/Features-20230126-154716.yaml b/.changes/unreleased/Features-20230126-154716.yaml
    new file mode 100644
    index 00000000000..4b0bbea2be8
    --- /dev/null
    +++ b/.changes/unreleased/Features-20230126-154716.yaml
    @@ -0,0 +1,6 @@
    +kind: Features
    +body: Adjust makefile to have clearer instructions for CI env var changes.
    +time: 2023-01-26T15:47:16.887327-08:00
    +custom:
    +  Author: versusfacit
    +  Issue: "6689"
    diff --git a/.gitignore b/.gitignore
    index dc9996305d3..8360802f09b 100644
    --- a/.gitignore
    +++ b/.gitignore
    @@ -51,6 +51,7 @@ coverage.xml
     *,cover
     .hypothesis/
     test.env
    +makefile.test.env
     *.pytest_cache/
     
     
    diff --git a/Makefile b/Makefile
    index 566c4de9e4d..62ee6f66e8c 100644
    --- a/Makefile
    +++ b/Makefile
    @@ -6,18 +6,26 @@ ifeq ($(USE_DOCKER),true)
     	DOCKER_CMD := docker-compose run --rm test
     endif
     
    -LOGS_DIR := ./logs
    +#
    +# To override CI_flags, create a file at this repo's root dir named `makefile.test.env`. Fill it
    +# with any ENV_VAR overrides required by your test environment, e.g.
    +#    DBT_TEST_USER_1=user
    +#    LOG_DIR="dir with a space in it"
    +#
    +# Warn: Restrict each line to one variable only.
    +#
    +ifeq (./makefile.test.env,$(wildcard ./makefile.test.env))
    +	include ./makefile.test.env
    +endif
     
    -# Optional flag to invoke tests using our CI env.
    -# But we always want these active for structured
    -# log testing.
     CI_FLAGS =\
    -	DBT_TEST_USER_1=dbt_test_user_1\
    -	DBT_TEST_USER_2=dbt_test_user_2\
    -	DBT_TEST_USER_3=dbt_test_user_3\
    -	RUSTFLAGS="-D warnings"\
    -	LOG_DIR=./logs\
    -	DBT_LOG_FORMAT=json
    +	DBT_TEST_USER_1=$(if $(DBT_TEST_USER_1),$(DBT_TEST_USER_1),dbt_test_user_1)\
    +	DBT_TEST_USER_2=$(if $(DBT_TEST_USER_2),$(DBT_TEST_USER_2),dbt_test_user_2)\
    +	DBT_TEST_USER_3=$(if $(DBT_TEST_USER_3),$(DBT_TEST_USER_3),dbt_test_user_3)\
    +	RUSTFLAGS=$(if $(RUSTFLAGS),$(RUSTFLAGS),"-D warnings")\
    +	LOG_DIR=$(if $(LOG_DIR),$(LOG_DIR),./logs)\
    +	DBT_LOG_FORMAT=$(if $(DBT_LOG_FORMAT),$(DBT_LOG_FORMAT),json)
    +
     
     .PHONY: dev_req
     dev_req: ## Installs dbt-* packages in develop mode along with only development dependencies.
    @@ -66,7 +74,7 @@ test: .env ## Runs unit tests with py and code checks against staged changes.
     .PHONY: integration
     integration: .env ## Runs postgres integration tests with py-integration
     	@\
    -	$(if $(USE_CI_FLAGS), $(CI_FLAGS)) $(DOCKER_CMD) tox -e py-integration -- -nauto
    +	$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto
     
     .PHONY: integration-fail-fast
     integration-fail-fast: .env ## Runs postgres integration tests with py-integration in "fail fast" mode.
    @@ -76,9 +84,9 @@ integration-fail-fast: .env ## Runs postgres integration tests with py-integrati
     .PHONY: interop
     interop: clean
     	@\
    -	mkdir $(LOGS_DIR) && \
    +	mkdir $(LOG_DIR) && \
     	$(CI_FLAGS) $(DOCKER_CMD) tox -e py-integration -- -nauto && \
    -	LOG_DIR=$(LOGS_DIR) cargo run --manifest-path test/interop/log_parsing/Cargo.toml
    +	LOG_DIR=$(LOG_DIR) cargo run --manifest-path test/interop/log_parsing/Cargo.toml
     
     .PHONY: setup-db
     setup-db: ## Setup Postgres database with docker-compose for system testing.
    
    From d9424cc7100cb3fcf92155c7d4fb4f7e030f19fe Mon Sep 17 00:00:00 2001
    From: Gerda Shank 
    Date: Thu, 2 Feb 2023 12:22:48 -0500
    Subject: [PATCH 149/156] CT 2000 fix semver prerelease comparisons (#6838)
    
    * Modify semver.py to not use packaging.version.parse
    
    * Changie
    ---
     .../unreleased/Fixes-20230201-154418.yaml     |  6 +++
     core/dbt/semver.py                            | 47 +++++++++++++++----
     core/setup.py                                 |  2 +-
     test/unit/test_semver.py                      | 12 +++--
     4 files changed, 54 insertions(+), 13 deletions(-)
     create mode 100644 .changes/unreleased/Fixes-20230201-154418.yaml
    
    diff --git a/.changes/unreleased/Fixes-20230201-154418.yaml b/.changes/unreleased/Fixes-20230201-154418.yaml
    new file mode 100644
    index 00000000000..dc2099f94b1
    --- /dev/null
    +++ b/.changes/unreleased/Fixes-20230201-154418.yaml
    @@ -0,0 +1,6 @@
    +kind: Fixes
    +body: Remove pin on packaging and stop using it for prerelease comparisons
    +time: 2023-02-01T15:44:18.279158-05:00
    +custom:
    +  Author: gshank
    +  Issue: "6834"
    diff --git a/core/dbt/semver.py b/core/dbt/semver.py
    index 24f00b333a1..f31840f39b7 100644
    --- a/core/dbt/semver.py
    +++ b/core/dbt/semver.py
    @@ -1,10 +1,7 @@
     from dataclasses import dataclass
     import re
    -import warnings
     from typing import List
     
    -from packaging import version as packaging_version
    -
     from dbt.exceptions import VersionsNotCompatibleError
     import dbt.utils
     
    @@ -70,6 +67,11 @@ class VersionSpecification(dbtClassMixin):
     _VERSION_REGEX = re.compile(_VERSION_REGEX_PAT_STR, re.VERBOSE)
     
     
    +def _cmp(a, b):
    +    """Return negative if ab."""
    +    return (a > b) - (a < b)
    +
    +
     @dataclass
     class VersionSpecifier(VersionSpecification):
         def to_version_string(self, skip_matcher=False):
    @@ -142,13 +144,19 @@ def compare(self, other):
                         return 1
                     if b is None:
                         return -1
    -            # This suppresses the LegacyVersion deprecation warning
    -            with warnings.catch_warnings():
    -                warnings.simplefilter("ignore", category=DeprecationWarning)
    -                if packaging_version.parse(a) > packaging_version.parse(b):
    +
    +                # Check the prerelease component only
    +                prcmp = self._nat_cmp(a, b)
    +                if prcmp != 0:  # either -1 or 1
    +                    return prcmp
    +                # else is equal and will fall through
    +
    +            else:  # major/minor/patch, should all be numbers
    +                if a > b:
                         return 1
    -                elif packaging_version.parse(a) < packaging_version.parse(b):
    +                elif a < b:
                         return -1
    +                # else is equal and will fall through
     
             equal = (
                 self.matcher == Matchers.GREATER_THAN_OR_EQUAL
    @@ -212,6 +220,29 @@ def is_upper_bound(self):
         def is_exact(self):
             return self.matcher == Matchers.EXACT
     
    +    @classmethod
    +    def _nat_cmp(cls, a, b):
    +        def cmp_prerelease_tag(a, b):
    +            if isinstance(a, int) and isinstance(b, int):
    +                return _cmp(a, b)
    +            elif isinstance(a, int):
    +                return -1
    +            elif isinstance(b, int):
    +                return 1
    +            else:
    +                return _cmp(a, b)
    +
    +        a, b = a or "", b or ""
    +        a_parts, b_parts = a.split("."), b.split(".")
    +        a_parts = [int(x) if re.match(r"^\d+$", x) else x for x in a_parts]
    +        b_parts = [int(x) if re.match(r"^\d+$", x) else x for x in b_parts]
    +        for sub_a, sub_b in zip(a_parts, b_parts):
    +            cmp_result = cmp_prerelease_tag(sub_a, sub_b)
    +            if cmp_result != 0:
    +                return cmp_result
    +        else:
    +            return _cmp(len(a), len(b))
    +
     
     @dataclass
     class VersionRange:
    diff --git a/core/setup.py b/core/setup.py
    index b5c43cc184a..b1e304d03dc 100644
    --- a/core/setup.py
    +++ b/core/setup.py
    @@ -58,7 +58,7 @@
             "minimal-snowplow-tracker==0.0.2",
             "networkx>=2.3,<2.8.1;python_version<'3.8'",
             "networkx>=2.3,<3;python_version>='3.8'",
    -        "packaging>=20.9,<22.0",
    +        "packaging>20.9",
             "sqlparse>=0.2.3,<0.5",
             "dbt-extractor~=0.4.1",
             "typing-extensions>=3.7.4",
    diff --git a/test/unit/test_semver.py b/test/unit/test_semver.py
    index b36c403e3a7..45e56c18809 100644
    --- a/test/unit/test_semver.py
    +++ b/test/unit/test_semver.py
    @@ -201,12 +201,16 @@ def test__resolve_to_specific_version(self):
                 '1.1.0')
     
         def test__filter_installable(self):
    -        assert filter_installable(
    +        installable = filter_installable(
                 ['1.1.0',  '1.2.0a1', '1.0.0','2.1.0-alpha','2.2.0asdf','2.1.0','2.2.0','2.2.0-fishtown-beta','2.2.0-2'],
                 install_prerelease=True
    -        ) == ['1.0.0', '1.1.0', '1.2.0a1','2.1.0-alpha','2.1.0','2.2.0asdf','2.2.0-fishtown-beta','2.2.0-2','2.2.0']
    +        )
    +        expected = ['1.0.0', '1.1.0', '1.2.0a1','2.1.0-alpha','2.1.0','2.2.0-2','2.2.0asdf','2.2.0-fishtown-beta','2.2.0']
    +        assert installable == expected
     
    -        assert filter_installable(
    +        installable = filter_installable(
                 ['1.1.0',  '1.2.0a1', '1.0.0','2.1.0-alpha','2.2.0asdf','2.1.0','2.2.0','2.2.0-fishtown-beta'],
                 install_prerelease=False
    -        ) == ['1.0.0', '1.1.0','2.1.0','2.2.0']
    +        )
    +        expected = ['1.0.0', '1.1.0','2.1.0','2.2.0']
    +        assert installable == expected
    
    From 2245d8d71035be0fcc632ec8546cdadcf088c583 Mon Sep 17 00:00:00 2001
    From: Emily Rockman 
    Date: Thu, 2 Feb 2023 12:16:06 -0600
    Subject: [PATCH 150/156] update regex to match all iterations (#6839)
    
    * update regex to match all iterations
    
    * convert to num to match all adapters
    
    * add comments, remove extra .
    
    * clarify with more comments
    
    * Update .bumpversion.cfg
    
    Co-authored-by: Nathaniel May 
    
    ---------
    
    Co-authored-by: Nathaniel May 
    ---
     .bumpversion.cfg | 24 +++++++++++++++---------
     1 file changed, 15 insertions(+), 9 deletions(-)
    
    diff --git a/.bumpversion.cfg b/.bumpversion.cfg
    index 4db0c9a0c58..ad2a19955f1 100644
    --- a/.bumpversion.cfg
    +++ b/.bumpversion.cfg
    @@ -1,15 +1,21 @@
     [bumpversion]
     current_version = 1.5.0a1
    -parse = (?P\d+)
    -	\.(?P\d+)
    -	\.(?P\d+)
    -	((?Pa|b|rc)
    -	(?P
    \d+)  # pre-release version num
    -	)(\.(?P[a-z..0-9]+)
    +
    +# `parse` allows parsing the version into the parts we need to check.  There are some
    +# unnamed groups and that's okay because they do not need to be audited.  If any part
    +# of the version passed and does not match the regex, it will fail.
    +# expected matches: `1.5.0`, `1.5.0a1`, `1.5.0a1.dev123457+nightly`
    +# excepted failures: `1`, `1.5`, `1.5.2-a1`, `text1.5.0`
    +parse = (?P[\d]+) # major version number
    +	\.(?P[\d]+) # minor version number
    +	\.(?P[\d]+) # patch version number
    +	(((?Pa|b|rc) # optional pre-release type
    +	?(?P[\d]+?)) # optional pre-release version number
    +	\.?(?P[a-z0-9]+\+[a-z]+)? # optional nightly release indicator
     	)?
     serialize =
    -	{major}.{minor}.{patch}{prekind}{pre}.{nightly}
    -	{major}.{minor}.{patch}{prekind}{pre}
    +	{major}.{minor}.{patch}{prekind}{num}.{nightly}
    +	{major}.{minor}.{patch}{prekind}{num}
     	{major}.{minor}.{patch}
     commit = False
     tag = False
    @@ -23,7 +29,7 @@ values =
     	rc
     	final
     
    -[bumpversion:part:pre]
    +[bumpversion:part:num]
     first_value = 1
     
     [bumpversion:part:nightly]
    
    From b2ea2b8b256e5db1da0b712dfedd7973e1e50a37 Mon Sep 17 00:00:00 2001
    From: colin-rogers-dbt <111200756+colin-rogers-dbt@users.noreply.github.com>
    Date: Thu, 2 Feb 2023 10:55:09 -0800
    Subject: [PATCH 151/156] move test_store_test_failures.py to adapter zone
     (#6816)
    
    ---
     .../dbt/tests/adapter}/store_test_failures_tests/fixtures.py    | 0
     .../store_test_failures_tests/test_store_test_failures.py       | 2 +-
     2 files changed, 1 insertion(+), 1 deletion(-)
     rename tests/{functional => adapter/dbt/tests/adapter}/store_test_failures_tests/fixtures.py (100%)
     rename tests/{functional => adapter/dbt/tests/adapter}/store_test_failures_tests/test_store_test_failures.py (98%)
    
    diff --git a/tests/functional/store_test_failures_tests/fixtures.py b/tests/adapter/dbt/tests/adapter/store_test_failures_tests/fixtures.py
    similarity index 100%
    rename from tests/functional/store_test_failures_tests/fixtures.py
    rename to tests/adapter/dbt/tests/adapter/store_test_failures_tests/fixtures.py
    diff --git a/tests/functional/store_test_failures_tests/test_store_test_failures.py b/tests/adapter/dbt/tests/adapter/store_test_failures_tests/test_store_test_failures.py
    similarity index 98%
    rename from tests/functional/store_test_failures_tests/test_store_test_failures.py
    rename to tests/adapter/dbt/tests/adapter/store_test_failures_tests/test_store_test_failures.py
    index 15527c86bd3..4074ffd7965 100644
    --- a/tests/functional/store_test_failures_tests/test_store_test_failures.py
    +++ b/tests/adapter/dbt/tests/adapter/store_test_failures_tests/test_store_test_failures.py
    @@ -5,7 +5,7 @@
         run_dbt,
     )
     
    -from tests.functional.store_test_failures_tests.fixtures import (
    +from dbt.tests.adapter.store_test_failures_tests.fixtures import (
         seeds__people,
         seeds__expected_accepted_values,
         seeds__expected_failing_test,
    
    From 4c63b630de1e9cf3c19d48dcb3ca728458ace5f6 Mon Sep 17 00:00:00 2001
    From: Neelesh Salian 
    Date: Mon, 6 Feb 2023 19:51:32 -0800
    Subject: [PATCH 152/156] [CT-1959]: moving simple_seed tests to adapter zone
     (#6859)
    
    * Formatting
    
    * Changelog entry
    
    * Rename to BaseSimpleSeedColumnOverride
    
    * Better error handling
    
    * Update test to include the BOM test
    
    * Cleanup and formating
    
    * Unused import remove
    
    * nit line
    
    * Pr comments
    ---
     .../Under the Hood-20230203-143551.yaml       |   6 +
     core/dbt/tests/util.py                        |  18 +
     .../dbt/tests/adapter/simple_seed/fixtures.py |  95 +++
     .../tests/adapter/simple_seed}/seed_bom.csv   |   0
     .../dbt/tests/adapter/simple_seed/seeds.py}   | 591 +++++++++++++++++-
     .../tests/adapter}/simple_seed/test_seed.py   |  93 +--
     .../simple_seed/test_seed_type_override.py    |  35 +-
     .../simple_seed/data/seed.with.dots.csv       |   2 -
     .../simple_seed/data/seed_actual.csv          | 501 ---------------
     .../simple_seed/data/seed_unicode.csv         |   2 -
     tests/functional/simple_seed/fixtures.py      | 167 -----
     11 files changed, 774 insertions(+), 736 deletions(-)
     create mode 100644 .changes/unreleased/Under the Hood-20230203-143551.yaml
     create mode 100644 tests/adapter/dbt/tests/adapter/simple_seed/fixtures.py
     rename tests/{functional/simple_seed/data => adapter/dbt/tests/adapter/simple_seed}/seed_bom.csv (100%)
     rename tests/{functional/simple_seed/data/seed_expected.sql => adapter/dbt/tests/adapter/simple_seed/seeds.py} (52%)
     rename tests/{functional => adapter/dbt/tests/adapter}/simple_seed/test_seed.py (82%)
     rename tests/{functional => adapter/dbt/tests/adapter}/simple_seed/test_seed_type_override.py (63%)
     delete mode 100644 tests/functional/simple_seed/data/seed.with.dots.csv
     delete mode 100644 tests/functional/simple_seed/data/seed_actual.csv
     delete mode 100644 tests/functional/simple_seed/data/seed_unicode.csv
     delete mode 100644 tests/functional/simple_seed/fixtures.py
    
    diff --git a/.changes/unreleased/Under the Hood-20230203-143551.yaml b/.changes/unreleased/Under the Hood-20230203-143551.yaml
    new file mode 100644
    index 00000000000..a18f754664a
    --- /dev/null
    +++ b/.changes/unreleased/Under the Hood-20230203-143551.yaml	
    @@ -0,0 +1,6 @@
    +kind: Under the Hood
    +body: Moving simple_seed to adapter zone to help adapter test conversions
    +time: 2023-02-03T14:35:51.481856-08:00
    +custom:
    +  Author: nssalian
    +  Issue: CT-1959
    diff --git a/core/dbt/tests/util.py b/core/dbt/tests/util.py
    index 245648ceb48..f73dbad3190 100644
    --- a/core/dbt/tests/util.py
    +++ b/core/dbt/tests/util.py
    @@ -29,6 +29,8 @@
     #   rm_file
     #   write_file
     #   read_file
    +#   mkdir
    +#   rm_dir
     #   get_artifact
     #   update_config_file
     #   write_config_file
    @@ -156,6 +158,22 @@ def read_file(*paths):
         return contents
     
     
    +# To create a directory
    +def mkdir(directory_path):
    +    try:
    +        os.makedirs(directory_path)
    +    except FileExistsError:
    +        raise FileExistsError(f"{directory_path} already exists.")
    +
    +
    +# To remove a directory
    +def rm_dir(directory_path):
    +    try:
    +        shutil.rmtree(directory_path)
    +    except FileNotFoundError:
    +        raise FileNotFoundError(f"{directory_path} does not exist.")
    +
    +
     # Get an artifact (usually from the target directory) such as
     # manifest.json or catalog.json to use in a test
     def get_artifact(*paths):
    diff --git a/tests/adapter/dbt/tests/adapter/simple_seed/fixtures.py b/tests/adapter/dbt/tests/adapter/simple_seed/fixtures.py
    new file mode 100644
    index 00000000000..d32caa5b7d6
    --- /dev/null
    +++ b/tests/adapter/dbt/tests/adapter/simple_seed/fixtures.py
    @@ -0,0 +1,95 @@
    +#
    +# Macros
    +#
    +
    +macros__schema_test = """
    +{% test column_type(model, column_name, type) %}
    +
    +    {% set cols = adapter.get_columns_in_relation(model) %}
    +
    +    {% set col_types = {} %}
    +    {% for col in cols %}
    +        {% do col_types.update({col.name: col.data_type}) %}
    +    {% endfor %}
    +
    +    {% set validation_message = 'Got a column type of ' ~ col_types.get(column_name) ~ ', expected ' ~ type %}
    +
    +    {% set val = 0 if col_types.get(column_name) == type else 1 %}
    +    {% if val == 1 and execute %}
    +        {{ log(validation_message, info=True) }}
    +    {% endif %}
    +
    +    select '{{ validation_message }}' as validation_error
    +    from (select true) as nothing
    +    where {{ val }} = 1
    +
    +{% endtest %}
    +
    +"""
    +
    +#
    +# Models
    +#
    +
    +models__downstream_from_seed_actual = """
    +select * from {{ ref('seed_actual') }}
    +
    +"""
    +models__from_basic_seed = """
    +select * from {{ this.schema }}.seed_expected
    +
    +"""
    +
    +#
    +# Properties
    +#
    +
    +properties__schema_yml = """
    +version: 2
    +seeds:
    +- name: seed_enabled
    +  columns:
    +  - name: birthday
    +    tests:
    +    - column_type:
    +        type: date
    +  - name: seed_id
    +    tests:
    +    - column_type:
    +        type: text
    +
    +- name: seed_tricky
    +  columns:
    +  - name: seed_id
    +    tests:
    +    - column_type:
    +        type: integer
    +  - name: seed_id_str
    +    tests:
    +    - column_type:
    +        type: text
    +  - name: a_bool
    +    tests:
    +    - column_type:
    +        type: boolean
    +  - name: looks_like_a_bool
    +    tests:
    +    - column_type:
    +        type: text
    +  - name: a_date
    +    tests:
    +    - column_type:
    +        type: timestamp without time zone
    +  - name: looks_like_a_date
    +    tests:
    +    - column_type:
    +        type: text
    +  - name: relative
    +    tests:
    +    - column_type:
    +        type: text
    +  - name: weekday
    +    tests:
    +    - column_type:
    +        type: text
    +"""
    diff --git a/tests/functional/simple_seed/data/seed_bom.csv b/tests/adapter/dbt/tests/adapter/simple_seed/seed_bom.csv
    similarity index 100%
    rename from tests/functional/simple_seed/data/seed_bom.csv
    rename to tests/adapter/dbt/tests/adapter/simple_seed/seed_bom.csv
    diff --git a/tests/functional/simple_seed/data/seed_expected.sql b/tests/adapter/dbt/tests/adapter/simple_seed/seeds.py
    similarity index 52%
    rename from tests/functional/simple_seed/data/seed_expected.sql
    rename to tests/adapter/dbt/tests/adapter/simple_seed/seeds.py
    index 6f672b86e08..35ccb0bb7a6 100644
    --- a/tests/functional/simple_seed/data/seed_expected.sql
    +++ b/tests/adapter/dbt/tests/adapter/simple_seed/seeds.py
    @@ -1,9 +1,521 @@
    +# Seed data for tests
    +
    +seed__with_dots_csv = """
    +seed_id
    +1
    +""".lstrip()
    +
    +seed__actual_csv = """
    +seed_id,first_name,email,ip_address,birthday
    +1,Larry,lking0@miitbeian.gov.cn,69.135.206.194,2008-09-12 19:08:31
    +2,Larry,lperkins1@toplist.cz,64.210.133.162,1978-05-09 04:15:14
    +3,Anna,amontgomery2@miitbeian.gov.cn,168.104.64.114,2011-10-16 04:07:57
    +4,Sandra,sgeorge3@livejournal.com,229.235.252.98,1973-07-19 10:52:43
    +5,Fred,fwoods4@google.cn,78.229.170.124,2012-09-30 16:38:29
    +6,Stephen,shanson5@livejournal.com,182.227.157.105,1995-11-07 21:40:50
    +7,William,wmartinez6@upenn.edu,135.139.249.50,1982-09-05 03:11:59
    +8,Jessica,jlong7@hao123.com,203.62.178.210,1991-10-16 11:03:15
    +9,Douglas,dwhite8@tamu.edu,178.187.247.1,1979-10-01 09:49:48
    +10,Lisa,lcoleman9@nydailynews.com,168.234.128.249,2011-05-26 07:45:49
    +11,Ralph,rfieldsa@home.pl,55.152.163.149,1972-11-18 19:06:11
    +12,Louise,lnicholsb@samsung.com,141.116.153.154,2014-11-25 20:56:14
    +13,Clarence,cduncanc@sfgate.com,81.171.31.133,2011-11-17 07:02:36
    +14,Daniel,dfranklind@omniture.com,8.204.211.37,1980-09-13 00:09:04
    +15,Katherine,klanee@auda.org.au,176.96.134.59,1997-08-22 19:36:56
    +16,Billy,bwardf@wikia.com,214.108.78.85,2003-10-19 02:14:47
    +17,Annie,agarzag@ocn.ne.jp,190.108.42.70,1988-10-28 15:12:35
    +18,Shirley,scolemanh@fastcompany.com,109.251.164.84,1988-08-24 10:50:57
    +19,Roger,rfrazieri@scribd.com,38.145.218.108,1985-12-31 15:17:15
    +20,Lillian,lstanleyj@goodreads.com,47.57.236.17,1970-06-08 02:09:05
    +21,Aaron,arodriguezk@nps.gov,205.245.118.221,1985-10-11 23:07:49
    +22,Patrick,pparkerl@techcrunch.com,19.8.100.182,2006-03-29 12:53:56
    +23,Phillip,pmorenom@intel.com,41.38.254.103,2011-11-07 15:35:43
    +24,Henry,hgarcian@newsvine.com,1.191.216.252,2008-08-28 08:30:44
    +25,Irene,iturnero@opera.com,50.17.60.190,1994-04-01 07:15:02
    +26,Andrew,adunnp@pen.io,123.52.253.176,2000-11-01 06:03:25
    +27,David,dgutierrezq@wp.com,238.23.203.42,1988-01-25 07:29:18
    +28,Henry,hsanchezr@cyberchimps.com,248.102.2.185,1983-01-01 13:36:37
    +29,Evelyn,epetersons@gizmodo.com,32.80.46.119,1979-07-16 17:24:12
    +30,Tammy,tmitchellt@purevolume.com,249.246.167.88,2001-04-03 10:00:23
    +31,Jacqueline,jlittleu@domainmarket.com,127.181.97.47,1986-02-11 21:35:50
    +32,Earl,eortizv@opera.com,166.47.248.240,1996-07-06 08:16:27
    +33,Juan,jgordonw@sciencedirect.com,71.77.2.200,1987-01-31 03:46:44
    +34,Diane,dhowellx@nyu.edu,140.94.133.12,1994-06-11 02:30:05
    +35,Randy,rkennedyy@microsoft.com,73.255.34.196,2005-05-26 20:28:39
    +36,Janice,jriveraz@time.com,22.214.227.32,1990-02-09 04:16:52
    +37,Laura,lperry10@diigo.com,159.148.145.73,2015-03-17 05:59:25
    +38,Gary,gray11@statcounter.com,40.193.124.56,1970-01-27 10:04:51
    +39,Jesse,jmcdonald12@typepad.com,31.7.86.103,2009-03-14 08:14:29
    +40,Sandra,sgonzalez13@goodreads.com,223.80.168.239,1993-05-21 14:08:54
    +41,Scott,smoore14@archive.org,38.238.46.83,1980-08-30 11:16:56
    +42,Phillip,pevans15@cisco.com,158.234.59.34,2011-12-15 23:26:31
    +43,Steven,sriley16@google.ca,90.247.57.68,2011-10-29 19:03:28
    +44,Deborah,dbrown17@hexun.com,179.125.143.240,1995-04-10 14:36:07
    +45,Lori,lross18@ow.ly,64.80.162.180,1980-12-27 16:49:15
    +46,Sean,sjackson19@tumblr.com,240.116.183.69,1988-06-12 21:24:45
    +47,Terry,tbarnes1a@163.com,118.38.213.137,1997-09-22 16:43:19
    +48,Dorothy,dross1b@ebay.com,116.81.76.49,2005-02-28 13:33:24
    +49,Samuel,swashington1c@house.gov,38.191.253.40,1989-01-19 21:15:48
    +50,Ralph,rcarter1d@tinyurl.com,104.84.60.174,2007-08-11 10:21:49
    +51,Wayne,whudson1e@princeton.edu,90.61.24.102,1983-07-03 16:58:12
    +52,Rose,rjames1f@plala.or.jp,240.83.81.10,1995-06-08 11:46:23
    +53,Louise,lcox1g@theglobeandmail.com,105.11.82.145,2016-09-19 14:45:51
    +54,Kenneth,kjohnson1h@independent.co.uk,139.5.45.94,1976-08-17 11:26:19
    +55,Donna,dbrown1i@amazon.co.uk,19.45.169.45,2006-05-27 16:51:40
    +56,Johnny,jvasquez1j@trellian.com,118.202.238.23,1975-11-17 08:42:32
    +57,Patrick,pramirez1k@tamu.edu,231.25.153.198,1997-08-06 11:51:09
    +58,Helen,hlarson1l@prweb.com,8.40.21.39,1993-08-04 19:53:40
    +59,Patricia,pspencer1m@gmpg.org,212.198.40.15,1977-08-03 16:37:27
    +60,Joseph,jspencer1n@marriott.com,13.15.63.238,2005-07-23 20:22:06
    +61,Phillip,pschmidt1o@blogtalkradio.com,177.98.201.190,1976-05-19 21:47:44
    +62,Joan,jwebb1p@google.ru,105.229.170.71,1972-09-07 17:53:47
    +63,Phyllis,pkennedy1q@imgur.com,35.145.8.244,2000-01-01 22:33:37
    +64,Katherine,khunter1r@smh.com.au,248.168.205.32,1991-01-09 06:40:24
    +65,Laura,lvasquez1s@wiley.com,128.129.115.152,1997-10-23 12:04:56
    +66,Juan,jdunn1t@state.gov,44.228.124.51,2004-11-10 05:07:35
    +67,Judith,jholmes1u@wiley.com,40.227.179.115,1977-08-02 17:01:45
    +68,Beverly,bbaker1v@wufoo.com,208.34.84.59,2016-03-06 20:07:23
    +69,Lawrence,lcarr1w@flickr.com,59.158.212.223,1988-09-13 06:07:21
    +70,Gloria,gwilliams1x@mtv.com,245.231.88.33,1995-03-18 22:32:46
    +71,Steven,ssims1y@cbslocal.com,104.50.58.255,2001-08-05 21:26:20
    +72,Betty,bmills1z@arstechnica.com,103.177.214.220,1981-12-14 21:26:54
    +73,Mildred,mfuller20@prnewswire.com,151.158.8.130,2000-04-19 10:13:55
    +74,Donald,dday21@icq.com,9.178.102.255,1972-12-03 00:58:24
    +75,Eric,ethomas22@addtoany.com,85.2.241.227,1992-11-01 05:59:30
    +76,Joyce,jarmstrong23@sitemeter.com,169.224.20.36,1985-10-24 06:50:01
    +77,Maria,mmartinez24@amazonaws.com,143.189.167.135,2005-10-05 05:17:42
    +78,Harry,hburton25@youtube.com,156.47.176.237,1978-03-26 05:53:33
    +79,Kevin,klawrence26@hao123.com,79.136.183.83,1994-10-12 04:38:52
    +80,David,dhall27@prweb.com,133.149.172.153,1976-12-15 16:24:24
    +81,Kathy,kperry28@twitter.com,229.242.72.228,1979-03-04 02:58:56
    +82,Adam,aprice29@elegantthemes.com,13.145.21.10,1982-11-07 11:46:59
    +83,Brandon,bgriffin2a@va.gov,73.249.128.212,2013-10-30 05:30:36
    +84,Henry,hnguyen2b@discovery.com,211.36.214.242,1985-01-09 06:37:27
    +85,Eric,esanchez2c@edublogs.org,191.166.188.251,2004-05-01 23:21:42
    +86,Jason,jlee2d@jimdo.com,193.92.16.182,1973-01-08 09:05:39
    +87,Diana,drichards2e@istockphoto.com,19.130.175.245,1994-10-05 22:50:49
    +88,Andrea,awelch2f@abc.net.au,94.155.233.96,2002-04-26 08:41:44
    +89,Louis,lwagner2g@miitbeian.gov.cn,26.217.34.111,2003-08-25 07:56:39
    +90,Jane,jsims2h@seesaa.net,43.4.220.135,1987-03-20 20:39:04
    +91,Larry,lgrant2i@si.edu,97.126.79.34,2000-09-07 20:26:19
    +92,Louis,ldean2j@prnewswire.com,37.148.40.127,2011-09-16 20:12:14
    +93,Jennifer,jcampbell2k@xing.com,38.106.254.142,1988-07-15 05:06:49
    +94,Wayne,wcunningham2l@google.com.hk,223.28.26.187,2009-12-15 06:16:54
    +95,Lori,lstevens2m@icq.com,181.250.181.58,1984-10-28 03:29:19
    +96,Judy,jsimpson2n@marriott.com,180.121.239.219,1986-02-07 15:18:10
    +97,Phillip,phoward2o@usa.gov,255.247.0.175,2002-12-26 08:44:45
    +98,Gloria,gwalker2p@usa.gov,156.140.7.128,1997-10-04 07:58:58
    +99,Paul,pjohnson2q@umn.edu,183.59.198.197,1991-11-14 12:33:55
    +100,Frank,fgreene2r@blogspot.com,150.143.68.121,2010-06-12 23:55:39
    +101,Deborah,dknight2s@reverbnation.com,222.131.211.191,1970-07-08 08:54:23
    +102,Sandra,sblack2t@tripadvisor.com,254.183.128.254,2000-04-12 02:39:36
    +103,Edward,eburns2u@dailymotion.com,253.89.118.18,1993-10-10 10:54:01
    +104,Anthony,ayoung2v@ustream.tv,118.4.193.176,1978-08-26 17:07:29
    +105,Donald,dlawrence2w@wp.com,139.200.159.227,2007-07-21 20:56:20
    +106,Matthew,mfreeman2x@google.fr,205.26.239.92,2014-12-05 17:05:39
    +107,Sean,ssanders2y@trellian.com,143.89.82.108,1993-07-14 21:45:02
    +108,Sharon,srobinson2z@soundcloud.com,66.234.247.54,1977-04-06 19:07:03
    +109,Jennifer,jwatson30@t-online.de,196.102.127.7,1998-03-07 05:12:23
    +110,Clarence,cbrooks31@si.edu,218.93.234.73,2002-11-06 17:22:25
    +111,Jose,jflores32@goo.gl,185.105.244.231,1995-01-05 06:32:21
    +112,George,glee33@adobe.com,173.82.249.196,2015-01-04 02:47:46
    +113,Larry,lhill34@linkedin.com,66.5.206.195,2010-11-02 10:21:17
    +114,Marie,mmeyer35@mysql.com,151.152.88.107,1990-05-22 20:52:51
    +115,Clarence,cwebb36@skype.com,130.198.55.217,1972-10-27 07:38:54
    +116,Sarah,scarter37@answers.com,80.89.18.153,1971-08-24 19:29:30
    +117,Henry,hhughes38@webeden.co.uk,152.60.114.174,1973-01-27 09:00:42
    +118,Teresa,thenry39@hao123.com,32.187.239.106,2015-11-06 01:48:44
    +119,Billy,bgutierrez3a@sun.com,52.37.70.134,2002-03-19 03:20:19
    +120,Anthony,agibson3b@github.io,154.251.232.213,1991-04-19 01:08:15
    +121,Sandra,sromero3c@wikia.com,44.124.171.2,1998-09-06 20:30:34
    +122,Paula,pandrews3d@blogs.com,153.142.118.226,2003-06-24 16:31:24
    +123,Terry,tbaker3e@csmonitor.com,99.120.45.219,1970-12-09 23:57:21
    +124,Lois,lwilson3f@reuters.com,147.44.171.83,1971-01-09 22:28:51
    +125,Sara,smorgan3g@nature.com,197.67.192.230,1992-01-28 20:33:24
    +126,Charles,ctorres3h@china.com.cn,156.115.216.2,1993-10-02 19:36:34
    +127,Richard,ralexander3i@marriott.com,248.235.180.59,1999-02-03 18:40:55
    +128,Christina,charper3j@cocolog-nifty.com,152.114.116.129,1978-09-13 00:37:32
    +129,Steve,sadams3k@economist.com,112.248.91.98,2004-03-21 09:07:43
    +130,Katherine,krobertson3l@ow.ly,37.220.107.28,1977-03-18 19:28:50
    +131,Donna,dgibson3m@state.gov,222.218.76.221,1999-02-01 06:46:16
    +132,Christina,cwest3n@mlb.com,152.114.6.160,1979-12-24 15:30:35
    +133,Sandra,swillis3o@meetup.com,180.71.49.34,1984-09-27 08:05:54
    +134,Clarence,cedwards3p@smugmug.com,10.64.180.186,1979-04-16 16:52:10
    +135,Ruby,rjames3q@wp.com,98.61.54.20,2007-01-13 14:25:52
    +136,Sarah,smontgomery3r@tripod.com,91.45.164.172,2009-07-25 04:34:30
    +137,Sarah,soliver3s@eventbrite.com,30.106.39.146,2012-05-09 22:12:33
    +138,Deborah,dwheeler3t@biblegateway.com,59.105.213.173,1999-11-09 08:08:44
    +139,Deborah,dray3u@i2i.jp,11.108.186.217,2014-02-04 03:15:19
    +140,Paul,parmstrong3v@alexa.com,6.250.59.43,2009-12-21 10:08:53
    +141,Aaron,abishop3w@opera.com,207.145.249.62,1996-04-25 23:20:23
    +142,Henry,hsanders3x@google.ru,140.215.203.171,2012-01-29 11:52:32
    +143,Anne,aanderson3y@1688.com,74.150.102.118,1982-04-03 13:46:17
    +144,Victor,vmurphy3z@hugedomains.com,222.155.99.152,1987-11-03 19:58:41
    +145,Evelyn,ereid40@pbs.org,249.122.33.117,1977-12-14 17:09:57
    +146,Brian,bgonzalez41@wikia.com,246.254.235.141,1991-02-24 00:45:58
    +147,Sandra,sgray42@squarespace.com,150.73.28.159,1972-07-28 17:26:32
    +148,Alice,ajones43@a8.net,78.253.12.177,2002-12-05 16:57:46
    +149,Jessica,jhanson44@mapquest.com,87.229.30.160,1994-01-30 11:40:04
    +150,Louise,lbailey45@reuters.com,191.219.31.101,2011-09-07 21:11:45
    +151,Christopher,cgonzalez46@printfriendly.com,83.137.213.239,1984-10-24 14:58:04
    +152,Gregory,gcollins47@yandex.ru,28.176.10.115,1998-07-25 17:17:10
    +153,Jane,jperkins48@usnews.com,46.53.164.159,1979-08-19 15:25:00
    +154,Phyllis,plong49@yahoo.co.jp,208.140.88.2,1985-07-06 02:16:36
    +155,Adam,acarter4a@scribd.com,78.48.148.204,2005-07-20 03:31:09
    +156,Frank,fweaver4b@angelfire.com,199.180.255.224,2011-03-04 23:07:54
    +157,Ronald,rmurphy4c@cloudflare.com,73.42.97.231,1991-01-11 10:39:41
    +158,Richard,rmorris4d@e-recht24.de,91.9.97.223,2009-01-17 21:05:15
    +159,Rose,rfoster4e@woothemes.com,203.169.53.16,1991-04-21 02:09:38
    +160,George,ggarrett4f@uiuc.edu,186.61.5.167,1989-11-11 11:29:42
    +161,Victor,vhamilton4g@biblegateway.com,121.229.138.38,2012-06-22 18:01:23
    +162,Mark,mbennett4h@businessinsider.com,209.184.29.203,1980-04-16 15:26:34
    +163,Martin,mwells4i@ifeng.com,97.223.55.105,2010-05-26 14:08:18
    +164,Diana,dstone4j@google.ru,90.155.52.47,2013-02-11 00:14:54
    +165,Walter,wferguson4k@blogger.com,30.63.212.44,1986-02-20 17:46:46
    +166,Denise,dcoleman4l@vistaprint.com,10.209.153.77,1992-05-13 20:14:14
    +167,Philip,pknight4m@xing.com,15.28.135.167,2000-09-11 18:41:13
    +168,Russell,rcarr4n@youtube.com,113.55.165.50,2008-07-10 17:49:27
    +169,Donna,dburke4o@dion.ne.jp,70.0.105.111,1992-02-10 17:24:58
    +170,Anne,along4p@squidoo.com,36.154.58.107,2012-08-19 23:35:31
    +171,Clarence,cbanks4q@webeden.co.uk,94.57.53.114,1972-03-11 21:46:44
    +172,Betty,bbowman4r@cyberchimps.com,178.115.209.69,2013-01-13 21:34:51
    +173,Andrew,ahudson4s@nytimes.com,84.32.252.144,1998-09-15 14:20:04
    +174,Keith,kgordon4t@cam.ac.uk,189.237.211.102,2009-01-22 05:34:38
    +175,Patrick,pwheeler4u@mysql.com,47.22.117.226,1984-09-05 22:33:15
    +176,Jesse,jfoster4v@mapquest.com,229.95.131.46,1990-01-20 12:19:15
    +177,Arthur,afisher4w@jugem.jp,107.255.244.98,1983-10-13 11:08:46
    +178,Nicole,nryan4x@wsj.com,243.211.33.221,1974-05-30 23:19:14
    +179,Bruce,bjohnson4y@sfgate.com,17.41.200.101,1992-09-23 02:02:19
    +180,Terry,tcox4z@reference.com,20.189.120.106,1982-02-13 12:43:14
    +181,Ashley,astanley50@kickstarter.com,86.3.56.98,1976-05-09 01:27:16
    +182,Michael,mrivera51@about.me,72.118.249.0,1971-11-11 17:28:37
    +183,Steven,sgonzalez52@mozilla.org,169.112.247.47,2002-08-24 14:59:25
    +184,Kathleen,kfuller53@bloglovin.com,80.93.59.30,2002-03-11 13:41:29
    +185,Nicole,nhenderson54@usda.gov,39.253.60.30,1995-04-24 05:55:07
    +186,Ralph,rharper55@purevolume.com,167.147.142.189,1980-02-10 18:35:45
    +187,Heather,hcunningham56@photobucket.com,96.222.196.229,2007-06-15 05:37:50
    +188,Nancy,nlittle57@cbc.ca,241.53.255.175,2007-07-12 23:42:48
    +189,Juan,jramirez58@pinterest.com,190.128.84.27,1978-11-07 23:37:37
    +190,Beverly,bfowler59@chronoengine.com,54.144.230.49,1979-03-31 23:27:28
    +191,Shirley,sstevens5a@prlog.org,200.97.231.248,2011-12-06 07:08:50
    +192,Annie,areyes5b@squidoo.com,223.32.182.101,2011-05-28 02:42:09
    +193,Jack,jkelley5c@tiny.cc,47.34.118.150,1981-12-05 17:31:40
    +194,Keith,krobinson5d@1und1.de,170.210.209.31,1999-03-09 11:05:43
    +195,Joseph,jmiller5e@google.com.au,136.74.212.139,1984-10-08 13:18:20
    +196,Annie,aday5f@blogspot.com,71.99.186.69,1986-02-18 12:27:34
    +197,Nancy,nperez5g@liveinternet.ru,28.160.6.107,1983-10-20 17:51:20
    +198,Tammy,tward5h@ucoz.ru,141.43.164.70,1980-03-31 04:45:29
    +199,Doris,dryan5i@ted.com,239.117.202.188,1985-07-03 03:17:53
    +200,Rose,rmendoza5j@photobucket.com,150.200.206.79,1973-04-21 21:36:40
    +201,Cynthia,cbutler5k@hubpages.com,80.153.174.161,2001-01-20 01:42:26
    +202,Samuel,soliver5l@people.com.cn,86.127.246.140,1970-09-02 02:19:00
    +203,Carl,csanchez5m@mysql.com,50.149.237.107,1993-12-01 07:02:09
    +204,Kathryn,kowens5n@geocities.jp,145.166.205.201,2004-07-06 18:39:33
    +205,Nicholas,nnichols5o@parallels.com,190.240.66.170,2014-11-11 18:52:19
    +206,Keith,kwillis5p@youtube.com,181.43.206.100,1998-06-13 06:30:51
    +207,Justin,jwebb5q@intel.com,211.54.245.74,2000-11-04 16:58:26
    +208,Gary,ghicks5r@wikipedia.org,196.154.213.104,1992-12-01 19:48:28
    +209,Martin,mpowell5s@flickr.com,153.67.12.241,1983-06-30 06:24:32
    +210,Brenda,bkelley5t@xinhuanet.com,113.100.5.172,2005-01-08 20:50:22
    +211,Edward,eray5u@a8.net,205.187.246.65,2011-09-26 08:04:44
    +212,Steven,slawson5v@senate.gov,238.150.250.36,1978-11-22 02:48:09
    +213,Robert,rthompson5w@furl.net,70.7.89.236,2001-09-12 08:52:07
    +214,Jack,jporter5x@diigo.com,220.172.29.99,1976-07-26 14:29:21
    +215,Lisa,ljenkins5y@oakley.com,150.151.170.180,2010-03-20 19:21:16
    +216,Theresa,tbell5z@mayoclinic.com,247.25.53.173,2001-03-11 05:36:40
    +217,Jimmy,jstephens60@weather.com,145.101.93.235,1983-04-12 09:35:30
    +218,Louis,lhunt61@amazon.co.jp,78.137.6.253,1997-08-29 19:34:34
    +219,Lawrence,lgilbert62@ted.com,243.132.8.78,2015-04-08 22:06:56
    +220,David,dgardner63@4shared.com,204.40.46.136,1971-07-09 03:29:11
    +221,Charles,ckennedy64@gmpg.org,211.83.233.2,2011-02-26 11:55:04
    +222,Lillian,lbanks65@msu.edu,124.233.12.80,2010-05-16 20:29:02
    +223,Ernest,enguyen66@baidu.com,82.45.128.148,1996-07-04 10:07:04
    +224,Ryan,rrussell67@cloudflare.com,202.53.240.223,1983-08-05 12:36:29
    +225,Donald,ddavis68@ustream.tv,47.39.218.137,1989-05-27 02:30:56
    +226,Joe,jscott69@blogspot.com,140.23.131.75,1973-03-16 12:21:31
    +227,Anne,amarshall6a@google.ca,113.162.200.197,1988-12-09 03:38:29
    +228,Willie,wturner6b@constantcontact.com,85.83.182.249,1991-10-06 01:51:10
    +229,Nicole,nwilson6c@sogou.com,30.223.51.135,1977-05-29 19:54:56
    +230,Janet,jwheeler6d@stumbleupon.com,153.194.27.144,2011-03-13 12:48:47
    +231,Lois,lcarr6e@statcounter.com,0.41.36.53,1993-02-06 04:52:01
    +232,Shirley,scruz6f@tmall.com,37.156.39.223,2007-02-18 17:47:01
    +233,Patrick,pford6g@reverbnation.com,36.198.200.89,1977-03-06 15:47:24
    +234,Lisa,lhudson6h@usatoday.com,134.213.58.137,2014-10-28 01:56:56
    +235,Pamela,pmartinez6i@opensource.org,5.151.127.202,1987-11-30 16:44:47
    +236,Larry,lperez6j@infoseek.co.jp,235.122.96.148,1979-01-18 06:33:45
    +237,Pamela,pramirez6k@census.gov,138.233.34.163,2012-01-29 10:35:20
    +238,Daniel,dcarr6l@php.net,146.21.152.242,1984-11-17 08:22:59
    +239,Patrick,psmith6m@indiegogo.com,136.222.199.36,2001-05-30 22:16:44
    +240,Raymond,rhenderson6n@hc360.com,116.31.112.38,2000-01-05 20:35:41
    +241,Teresa,treynolds6o@miitbeian.gov.cn,198.126.205.220,1996-11-08 01:27:31
    +242,Johnny,jmason6p@flickr.com,192.8.232.114,2013-05-14 05:35:50
    +243,Angela,akelly6q@guardian.co.uk,234.116.60.197,1977-08-20 02:05:17
    +244,Douglas,dcole6r@cmu.edu,128.135.212.69,2016-10-26 17:40:36
    +245,Frances,fcampbell6s@twitpic.com,94.22.243.235,1987-04-26 07:07:13
    +246,Donna,dgreen6t@chron.com,227.116.46.107,2011-07-25 12:59:54
    +247,Benjamin,bfranklin6u@redcross.org,89.141.142.89,1974-05-03 20:28:18
    +248,Randy,rpalmer6v@rambler.ru,70.173.63.178,2011-12-20 17:40:18
    +249,Melissa,mmurray6w@bbb.org,114.234.118.137,1991-02-26 12:45:44
    +250,Jean,jlittle6x@epa.gov,141.21.163.254,1991-08-16 04:57:09
    +251,Daniel,dolson6y@nature.com,125.75.104.97,2010-04-23 06:25:54
    +252,Kathryn,kwells6z@eventbrite.com,225.104.28.249,2015-01-31 02:21:50
    +253,Theresa,tgonzalez70@ox.ac.uk,91.93.156.26,1971-12-11 10:31:31
    +254,Beverly,broberts71@bluehost.com,244.40.158.89,2013-09-21 13:02:31
    +255,Pamela,pmurray72@netscape.com,218.54.95.216,1985-04-16 00:34:00
    +256,Timothy,trichardson73@amazonaws.com,235.49.24.229,2000-11-11 09:48:28
    +257,Mildred,mpalmer74@is.gd,234.125.95.132,1992-05-25 02:25:02
    +258,Jessica,jcampbell75@google.it,55.98.30.140,2014-08-26 00:26:34
    +259,Beverly,bthomas76@cpanel.net,48.78.228.176,1970-08-18 10:40:05
    +260,Eugene,eward77@cargocollective.com,139.226.204.2,1996-12-04 23:17:00
    +261,Andrea,aallen78@webnode.com,160.31.214.38,2009-07-06 07:22:37
    +262,Justin,jruiz79@merriam-webster.com,150.149.246.122,2005-06-06 11:44:19
    +263,Kenneth,kedwards7a@networksolutions.com,98.82.193.128,2001-07-03 02:00:10
    +264,Rachel,rday7b@miibeian.gov.cn,114.15.247.221,1994-08-18 19:45:40
    +265,Russell,rmiller7c@instagram.com,184.130.152.253,1977-11-06 01:58:12
    +266,Bonnie,bhudson7d@cornell.edu,235.180.186.206,1990-12-03 22:45:24
    +267,Raymond,rknight7e@yandex.ru,161.2.44.252,1995-08-25 04:31:19
    +268,Bonnie,brussell7f@elpais.com,199.237.57.207,1991-03-29 08:32:06
    +269,Marie,mhenderson7g@elpais.com,52.203.131.144,2004-06-04 21:50:28
    +270,Alan,acarr7h@trellian.com,147.51.205.72,2005-03-03 10:51:31
    +271,Barbara,bturner7i@hugedomains.com,103.160.110.226,2004-08-04 13:42:40
    +272,Christina,cdaniels7j@census.gov,0.238.61.251,1972-10-18 12:47:33
    +273,Jeremy,jgomez7k@reuters.com,111.26.65.56,2013-01-13 10:41:35
    +274,Laura,lwood7l@icio.us,149.153.38.205,2011-06-25 09:33:59
    +275,Matthew,mbowman7m@auda.org.au,182.138.206.172,1999-03-05 03:25:36
    +276,Denise,dparker7n@icq.com,0.213.88.138,2011-11-04 09:43:06
    +277,Phillip,pparker7o@discuz.net,219.242.165.240,1973-10-19 04:22:29
    +278,Joan,jpierce7p@salon.com,63.31.213.202,1989-04-09 22:06:24
    +279,Irene,ibaker7q@cbc.ca,102.33.235.114,1992-09-04 13:00:57
    +280,Betty,bbowman7r@ted.com,170.91.249.242,2015-09-28 08:14:22
    +281,Teresa,truiz7s@boston.com,82.108.158.207,1999-07-18 05:17:09
    +282,Helen,hbrooks7t@slideshare.net,102.87.162.187,2003-01-06 15:45:29
    +283,Karen,kgriffin7u@wunderground.com,43.82.44.184,2010-05-28 01:56:37
    +284,Lisa,lfernandez7v@mtv.com,200.238.218.220,1993-04-03 20:33:51
    +285,Jesse,jlawrence7w@timesonline.co.uk,95.122.105.78,1990-01-05 17:28:43
    +286,Terry,tross7x@macromedia.com,29.112.114.133,2009-08-29 21:32:17
    +287,Angela,abradley7y@icq.com,177.44.27.72,1989-10-04 21:46:06
    +288,Maria,mhart7z@dailymotion.com,55.27.55.202,1975-01-21 01:22:57
    +289,Raymond,randrews80@pinterest.com,88.90.78.67,1992-03-16 21:37:40
    +290,Kathy,krice81@bluehost.com,212.63.196.102,2000-12-14 03:06:44
    +291,Cynthia,cramos82@nymag.com,107.89.190.6,2005-06-28 02:02:33
    +292,Kimberly,kjones83@mysql.com,86.169.101.101,2007-06-13 22:56:49
    +293,Timothy,thansen84@microsoft.com,108.100.254.90,2003-04-04 10:31:57
    +294,Carol,cspencer85@berkeley.edu,75.118.144.187,1999-03-30 14:53:21
    +295,Louis,lmedina86@latimes.com,141.147.163.24,1991-04-11 17:53:13
    +296,Margaret,mcole87@google.fr,53.184.26.83,1991-12-19 01:54:10
    +297,Mary,mgomez88@yellowpages.com,208.56.57.99,1976-05-21 18:05:08
    +298,Amanda,aanderson89@geocities.com,147.73.15.252,1987-08-22 15:05:28
    +299,Kathryn,kgarrett8a@nature.com,27.29.177.220,1976-07-15 04:25:04
    +300,Dorothy,dmason8b@shareasale.com,106.210.99.193,1990-09-03 21:39:31
    +301,Lois,lkennedy8c@amazon.de,194.169.29.187,2007-07-29 14:09:31
    +302,Irene,iburton8d@washingtonpost.com,196.143.110.249,2013-09-05 11:32:46
    +303,Betty,belliott8e@wired.com,183.105.222.199,1979-09-19 19:29:13
    +304,Bobby,bmeyer8f@census.gov,36.13.161.145,2014-05-24 14:34:39
    +305,Ann,amorrison8g@sfgate.com,72.154.54.137,1978-10-05 14:22:34
    +306,Daniel,djackson8h@wunderground.com,144.95.32.34,1990-07-27 13:23:05
    +307,Joe,jboyd8i@alibaba.com,187.105.86.178,2011-09-28 16:46:32
    +308,Ralph,rdunn8j@fc2.com,3.19.87.255,1984-10-18 08:00:40
    +309,Craig,ccarter8k@gizmodo.com,235.152.76.215,1998-07-04 12:15:21
    +310,Paula,pdean8l@hhs.gov,161.100.173.197,1973-02-13 09:38:55
    +311,Andrew,agarrett8m@behance.net,199.253.123.218,1991-02-14 13:36:32
    +312,Janet,jhowell8n@alexa.com,39.189.139.79,2012-11-24 20:17:33
    +313,Keith,khansen8o@godaddy.com,116.186.223.196,1987-08-23 21:22:05
    +314,Nicholas,nedwards8p@state.gov,142.175.142.11,1977-03-28 18:27:27
    +315,Jacqueline,jallen8q@oaic.gov.au,189.66.135.192,1994-10-26 11:44:26
    +316,Frank,fgardner8r@mapy.cz,154.77.119.169,1983-01-29 19:19:51
    +317,Eric,eharrison8s@google.cn,245.139.65.123,1984-02-04 09:54:36
    +318,Gregory,gcooper8t@go.com,171.147.0.221,2004-06-14 05:22:08
    +319,Jean,jfreeman8u@rakuten.co.jp,67.243.121.5,1977-01-07 18:23:43
    +320,Juan,jlewis8v@shinystat.com,216.181.171.189,2001-08-23 17:32:43
    +321,Randy,rwilliams8w@shinystat.com,105.152.146.28,1983-02-17 00:05:50
    +322,Stephen,shart8x@sciencedirect.com,196.131.205.148,2004-02-15 10:12:03
    +323,Annie,ahunter8y@example.com,63.36.34.103,2003-07-23 21:15:25
    +324,Melissa,mflores8z@cbc.ca,151.230.217.90,1983-11-02 14:53:56
    +325,Jane,jweaver90@about.me,0.167.235.217,1987-07-29 00:13:44
    +326,Anthony,asmith91@oracle.com,97.87.48.41,2001-05-31 18:44:11
    +327,Terry,tdavis92@buzzfeed.com,46.20.12.51,2015-09-12 23:13:55
    +328,Brandon,bmontgomery93@gravatar.com,252.101.48.186,2010-10-28 08:26:27
    +329,Chris,cmurray94@bluehost.com,25.158.167.97,2004-05-05 16:10:31
    +330,Denise,dfuller95@hugedomains.com,216.210.149.28,1979-04-20 08:57:24
    +331,Arthur,amcdonald96@sakura.ne.jp,206.42.36.213,2009-08-15 03:26:16
    +332,Jesse,jhoward97@google.cn,46.181.118.30,1974-04-18 14:08:41
    +333,Frank,fsimpson98@domainmarket.com,163.220.211.87,2006-06-30 14:46:52
    +334,Janice,jwoods99@pen.io,229.245.237.182,1988-04-06 11:52:58
    +335,Rebecca,rroberts9a@huffingtonpost.com,148.96.15.80,1976-10-05 08:44:16
    +336,Joshua,jray9b@opensource.org,192.253.12.198,1971-12-25 22:27:07
    +337,Joyce,jcarpenter9c@statcounter.com,125.171.46.215,2001-12-31 22:08:13
    +338,Andrea,awest9d@privacy.gov.au,79.101.180.201,1983-02-18 20:07:47
    +339,Christine,chudson9e@yelp.com,64.198.43.56,1997-09-08 08:03:43
    +340,Joe,jparker9f@earthlink.net,251.215.148.153,1973-11-04 05:08:18
    +341,Thomas,tkim9g@answers.com,49.187.34.47,1991-08-07 21:13:48
    +342,Janice,jdean9h@scientificamerican.com,4.197.117.16,2009-12-08 02:35:49
    +343,James,jmitchell9i@umich.edu,43.121.18.147,2011-04-28 17:04:09
    +344,Charles,cgardner9j@purevolume.com,197.78.240.240,1998-02-11 06:47:07
    +345,Robert,rhenderson9k@friendfeed.com,215.84.180.88,2002-05-10 15:33:14
    +346,Chris,cgray9l@4shared.com,249.70.192.240,1998-10-03 16:43:42
    +347,Gloria,ghayes9m@hibu.com,81.103.138.26,1999-12-26 11:23:13
    +348,Edward,eramirez9n@shareasale.com,38.136.90.136,2010-08-19 08:01:06
    +349,Cheryl,cbutler9o@google.ca,172.180.78.172,1995-05-27 20:03:52
    +350,Margaret,mwatkins9p@sfgate.com,3.20.198.6,2014-10-21 01:42:58
    +351,Rebecca,rwelch9q@examiner.com,45.81.42.208,2001-02-08 12:19:06
    +352,Joe,jpalmer9r@phpbb.com,163.202.92.190,1970-01-05 11:29:12
    +353,Sandra,slewis9s@dyndns.org,77.215.201.236,1974-01-05 07:04:04
    +354,Todd,tfranklin9t@g.co,167.125.181.82,2009-09-28 10:13:58
    +355,Joseph,jlewis9u@webmd.com,244.204.6.11,1990-10-21 15:49:57
    +356,Alan,aknight9v@nydailynews.com,152.197.95.83,1996-03-08 08:43:17
    +357,Sharon,sdean9w@123-reg.co.uk,237.46.40.26,1985-11-30 12:09:24
    +358,Annie,awright9x@cafepress.com,190.45.231.111,2000-08-24 11:56:06
    +359,Diane,dhamilton9y@youtube.com,85.146.171.196,2015-02-24 02:03:57
    +360,Antonio,alane9z@auda.org.au,61.63.146.203,2001-05-13 03:43:34
    +361,Matthew,mallena0@hhs.gov,29.97.32.19,1973-02-19 23:43:32
    +362,Bonnie,bfowlera1@soup.io,251.216.99.53,2013-08-01 15:35:41
    +363,Margaret,mgraya2@examiner.com,69.255.151.79,1998-01-23 22:24:59
    +364,Joan,jwagnera3@printfriendly.com,192.166.120.61,1973-07-13 00:30:22
    +365,Catherine,cperkinsa4@nytimes.com,58.21.24.214,2006-11-19 11:52:26
    +366,Mark,mcartera5@cpanel.net,220.33.102.142,2007-09-09 09:43:27
    +367,Paula,ppricea6@msn.com,36.182.238.124,2009-11-11 09:13:05
    +368,Catherine,cgreena7@army.mil,228.203.58.19,2005-08-09 16:52:15
    +369,Helen,hhamiltona8@symantec.com,155.56.194.99,2005-02-01 05:40:36
    +370,Jane,jmeyera9@ezinearticles.com,133.244.113.213,2013-11-06 22:10:23
    +371,Wanda,wevansaa@bloglovin.com,233.125.192.48,1994-12-26 23:43:42
    +372,Mark,mmarshallab@tumblr.com,114.74.60.47,2016-09-29 18:03:01
    +373,Andrew,amartinezac@google.cn,182.54.37.130,1976-06-06 17:04:17
    +374,Helen,hmoralesad@e-recht24.de,42.45.4.123,1977-03-28 19:06:59
    +375,Bonnie,bstoneae@php.net,196.149.79.137,1970-02-05 17:05:58
    +376,Douglas,dfreemanaf@nasa.gov,215.65.124.218,2008-11-20 21:51:55
    +377,Willie,wwestag@army.mil,35.189.92.118,1992-07-24 05:08:08
    +378,Cheryl,cwagnerah@upenn.edu,228.239.222.141,2010-01-25 06:29:01
    +379,Sandra,swardai@baidu.com,63.11.113.240,1985-05-23 08:07:37
    +380,Julie,jrobinsonaj@jugem.jp,110.58.202.50,2015-03-05 09:42:07
    +381,Larry,lwagnerak@shop-pro.jp,98.234.25.24,1975-07-22 22:22:02
    +382,Juan,jcastilloal@yelp.com,24.174.74.202,2007-01-17 09:32:43
    +383,Donna,dfrazieram@artisteer.com,205.26.147.45,1990-02-11 20:55:46
    +384,Rachel,rfloresan@w3.org,109.60.216.162,1983-05-22 22:42:18
    +385,Robert,rreynoldsao@theguardian.com,122.65.209.130,2009-05-01 18:02:51
    +386,Donald,dbradleyap@etsy.com,42.54.35.126,1997-01-16 16:31:52
    +387,Rachel,rfisheraq@nih.gov,160.243.250.45,2006-02-17 22:05:49
    +388,Nicholas,nhamiltonar@princeton.edu,156.211.37.111,1976-06-21 03:36:29
    +389,Timothy,twhiteas@ca.gov,36.128.23.70,1975-09-24 03:51:18
    +390,Diana,dbradleyat@odnoklassniki.ru,44.102.120.184,1983-04-27 09:02:50
    +391,Billy,bfowlerau@jimdo.com,91.200.68.196,1995-01-29 06:57:35
    +392,Bruce,bandrewsav@ucoz.com,48.12.101.125,1992-10-27 04:31:39
    +393,Linda,lromeroaw@usa.gov,100.71.233.19,1992-06-08 15:13:18
    +394,Debra,dwatkinsax@ucoz.ru,52.160.233.193,2001-11-11 06:51:01
    +395,Katherine,kburkeay@wix.com,151.156.242.141,2010-06-14 19:54:28
    +396,Martha,mharrisonaz@youku.com,21.222.10.199,1989-10-16 14:17:55
    +397,Dennis,dwellsb0@youtu.be,103.16.29.3,1985-12-21 06:05:51
    +398,Gloria,grichardsb1@bloglines.com,90.147.120.234,1982-08-27 01:04:43
    +399,Brenda,bfullerb2@t.co,33.253.63.90,2011-04-20 05:00:35
    +400,Larry,lhendersonb3@disqus.com,88.95.132.128,1982-08-31 02:15:12
    +401,Richard,rlarsonb4@wisc.edu,13.48.231.150,1979-04-15 14:08:09
    +402,Terry,thuntb5@usa.gov,65.91.103.240,1998-05-15 11:50:49
    +403,Harry,hburnsb6@nasa.gov,33.38.21.244,1981-04-12 14:02:20
    +404,Diana,dellisb7@mlb.com,218.229.81.135,1997-01-29 00:17:25
    +405,Jack,jburkeb8@tripadvisor.com,210.227.182.216,1984-03-09 17:24:03
    +406,Julia,jlongb9@fotki.com,10.210.12.104,2005-10-26 03:54:13
    +407,Lois,lscottba@msu.edu,188.79.136.138,1973-02-02 18:40:39
    +408,Sandra,shendersonbb@shareasale.com,114.171.220.108,2012-06-09 18:22:26
    +409,Irene,isanchezbc@cdbaby.com,109.255.50.119,1983-09-28 21:11:27
    +410,Emily,ebrooksbd@bandcamp.com,227.81.93.79,1970-08-31 21:08:01
    +411,Michelle,mdiazbe@businessweek.com,236.249.6.226,1993-05-22 08:07:07
    +412,Tammy,tbennettbf@wisc.edu,145.253.239.152,1978-12-31 20:24:51
    +413,Christine,cgreenebg@flickr.com,97.25.140.118,1978-07-17 12:55:30
    +414,Patricia,pgarzabh@tuttocitta.it,139.246.192.211,1984-02-27 13:40:08
    +415,Kimberly,kromerobi@aol.com,73.56.88.247,1976-09-16 14:22:04
    +416,George,gjohnstonbj@fda.gov,240.36.245.185,1979-07-24 14:36:02
    +417,Eugene,efullerbk@sciencedaily.com,42.38.105.140,2012-09-12 01:56:41
    +418,Andrea,astevensbl@goo.gl,31.152.207.204,1979-05-24 11:06:21
    +419,Shirley,sreidbm@scientificamerican.com,103.60.31.241,1984-02-23 04:07:41
    +420,Terry,tmorenobn@blinklist.com,92.161.34.42,1994-06-25 14:01:35
    +421,Christopher,cmorenobo@go.com,158.86.176.82,1973-09-05 09:18:47
    +422,Dennis,dhansonbp@ning.com,40.160.81.75,1982-01-20 10:19:41
    +423,Beverly,brussellbq@de.vu,138.32.56.204,1997-11-06 07:20:19
    +424,Howard,hparkerbr@163.com,103.171.134.171,2015-06-24 15:37:10
    +425,Helen,hmccoybs@fema.gov,61.200.4.71,1995-06-20 08:59:10
    +426,Ann,ahudsonbt@cafepress.com,239.187.71.125,1977-04-11 07:59:28
    +427,Tina,twestbu@nhs.uk,80.213.117.74,1992-08-19 05:54:44
    +428,Terry,tnguyenbv@noaa.gov,21.93.118.95,1991-09-19 23:22:55
    +429,Ashley,aburtonbw@wix.com,233.176.205.109,2009-11-10 05:01:20
    +430,Eric,emyersbx@1und1.de,168.91.212.67,1987-08-10 07:16:20
    +431,Barbara,blittleby@lycos.com,242.14.189.239,2008-08-02 12:13:04
    +432,Sean,sevansbz@instagram.com,14.39.177.13,2007-04-16 17:28:49
    +433,Shirley,sburtonc0@newsvine.com,34.107.138.76,1980-12-10 02:19:29
    +434,Patricia,pfreemanc1@so-net.ne.jp,219.213.142.117,1987-03-01 02:25:45
    +435,Paula,pfosterc2@vkontakte.ru,227.14.138.141,1972-09-22 12:59:34
    +436,Nicole,nstewartc3@1688.com,8.164.23.115,1998-10-27 00:10:17
    +437,Earl,ekimc4@ovh.net,100.26.244.177,2013-01-22 10:05:46
    +438,Beverly,breedc5@reuters.com,174.12.226.27,1974-09-22 07:29:36
    +439,Lawrence,lbutlerc6@a8.net,105.164.42.164,1992-06-05 00:43:40
    +440,Charles,cmoorec7@ucoz.com,252.197.131.69,1990-04-09 02:34:05
    +441,Alice,alawsonc8@live.com,183.73.220.232,1989-02-28 09:11:04
    +442,Dorothy,dcarpenterc9@arstechnica.com,241.47.200.14,2005-05-02 19:57:21
    +443,Carolyn,cfowlerca@go.com,213.109.55.202,1978-09-10 20:18:20
    +444,Anthony,alongcb@free.fr,169.221.158.204,1984-09-13 01:59:23
    +445,Annie,amoorecc@e-recht24.de,50.34.148.61,2009-03-26 03:41:07
    +446,Carlos,candrewscd@ihg.com,236.69.59.212,1972-03-29 22:42:48
    +447,Beverly,bramosce@google.ca,164.250.184.49,1982-11-10 04:34:01
    +448,Teresa,tlongcf@umich.edu,174.88.53.223,1987-05-17 12:48:00
    +449,Roy,rboydcg@uol.com.br,91.58.243.215,1974-06-16 17:59:54
    +450,Ashley,afieldsch@tamu.edu,130.138.11.126,1983-09-15 05:52:36
    +451,Judith,jhawkinsci@cmu.edu,200.187.103.245,2003-10-22 12:24:03
    +452,Rebecca,rwestcj@ocn.ne.jp,72.85.3.103,1980-11-13 11:01:26
    +453,Raymond,rporterck@infoseek.co.jp,146.33.216.151,1982-05-17 23:58:03
    +454,Janet,jmarshallcl@odnoklassniki.ru,52.46.193.166,1998-10-04 00:02:21
    +455,Shirley,speterscm@salon.com,248.126.31.15,1987-01-30 06:04:59
    +456,Annie,abowmancn@economist.com,222.213.248.59,2006-03-14 23:52:59
    +457,Jean,jlarsonco@blogspot.com,71.41.25.195,2007-09-08 23:49:45
    +458,Phillip,pmoralescp@stanford.edu,74.119.87.28,2011-03-14 20:25:40
    +459,Norma,nrobinsoncq@economist.com,28.225.21.54,1989-10-21 01:22:43
    +460,Kimberly,kclarkcr@dion.ne.jp,149.171.132.153,2008-06-27 02:27:30
    +461,Ruby,rmorriscs@ucla.edu,177.85.163.249,2016-01-28 16:43:44
    +462,Jonathan,jcastilloct@tripod.com,78.4.28.77,2000-05-24 17:33:06
    +463,Edward,ebryantcu@jigsy.com,140.31.98.193,1992-12-17 08:32:47
    +464,Chris,chamiltoncv@eepurl.com,195.171.234.206,1970-12-05 03:42:19
    +465,Michael,mweavercw@reference.com,7.233.133.213,1987-03-29 02:30:54
    +466,Howard,hlawrencecx@businessweek.com,113.225.124.224,1990-07-30 07:20:57
    +467,Philip,phowardcy@comsenz.com,159.170.247.249,2010-10-15 10:18:37
    +468,Mary,mmarshallcz@xing.com,125.132.189.70,2007-07-19 13:48:47
    +469,Scott,salvarezd0@theguardian.com,78.49.103.230,1987-10-31 06:10:44
    +470,Wayne,wcarrolld1@blog.com,238.1.120.204,1980-11-19 03:26:10
    +471,Jennifer,jwoodsd2@multiply.com,92.20.224.49,2010-05-06 22:17:04
    +472,Raymond,rwelchd3@toplist.cz,176.158.35.240,2007-12-12 19:02:51
    +473,Steven,sdixond4@wisc.edu,167.55.237.52,1984-05-05 11:44:37
    +474,Ralph,rjamesd5@ameblo.jp,241.190.50.133,2000-07-06 08:44:37
    +475,Jason,jrobinsond6@hexun.com,138.119.139.56,2006-02-03 05:27:45
    +476,Doris,dwoodd7@fema.gov,180.220.156.190,1978-05-11 20:14:20
    +477,Elizabeth,eberryd8@youtu.be,74.188.53.229,2006-11-18 08:29:06
    +478,Irene,igilbertd9@privacy.gov.au,194.152.218.1,1985-09-17 02:46:52
    +479,Jessica,jdeanda@ameblo.jp,178.103.93.118,1974-06-07 19:04:05
    +480,Rachel,ralvarezdb@phoca.cz,17.22.223.174,1999-03-08 02:43:25
    +481,Kenneth,kthompsondc@shinystat.com,229.119.91.234,2007-05-15 13:17:32
    +482,Harold,hmurraydd@parallels.com,133.26.188.80,1993-11-15 03:42:07
    +483,Paula,phowellde@samsung.com,34.215.28.216,1993-11-29 15:55:00
    +484,Ruth,rpiercedf@tripadvisor.com,111.30.130.123,1986-08-17 10:19:38
    +485,Phyllis,paustindg@vk.com,50.84.34.178,1994-04-13 03:05:24
    +486,Laura,lfosterdh@usnews.com,37.8.101.33,2001-06-30 08:58:59
    +487,Eric,etaylordi@com.com,103.183.253.45,2006-09-15 20:18:46
    +488,Doris,driveradj@prweb.com,247.16.2.199,1989-05-08 09:27:09
    +489,Ryan,rhughesdk@elegantthemes.com,103.234.153.232,1989-08-01 18:36:06
    +490,Steve,smoralesdl@jigsy.com,3.76.84.207,2011-03-13 17:01:05
    +491,Louis,lsullivandm@who.int,78.135.44.208,1975-11-26 16:01:23
    +492,Catherine,ctuckerdn@seattletimes.com,93.137.106.21,1990-03-13 16:14:56
    +493,Ann,adixondo@gmpg.org,191.136.222.111,2002-06-05 14:22:18
    +494,Johnny,jhartdp@amazon.com,103.252.198.39,1988-07-30 23:54:49
    +495,Susan,srichardsdq@skype.com,126.247.192.11,2005-01-09 12:08:14
    +496,Brenda,bparkerdr@skype.com,63.232.216.86,1974-05-18 05:58:29
    +497,Tammy,tmurphyds@constantcontact.com,56.56.37.112,2014-08-05 18:22:25
    +498,Larry,lhayesdt@wordpress.com,162.146.13.46,1997-02-26 14:01:53
    +499,,ethomasdu@hhs.gov,6.241.88.250,2007-09-14 13:03:34
    +500,Paula,pshawdv@networksolutions.com,123.27.47.249,2003-10-30 21:19:20
    +""".lstrip()
    +
    +seeds__expected_sql = """
     create table {schema}.seed_expected (
    -	seed_id INTEGER,
    -	first_name TEXT,
    -	email TEXT,
    -	ip_address TEXT,
    -	birthday TIMESTAMP WITHOUT TIME ZONE
    +seed_id INTEGER,
    +first_name TEXT,
    +email TEXT,
    +ip_address TEXT,
    +birthday TIMESTAMP WITHOUT TIME ZONE
     );
     
     
    @@ -510,3 +1022,72 @@
         (498,'Larry','lhayesdt@wordpress.com','162.146.13.46','1997-02-26 14:01:53'),
         (499,NULL,'ethomasdu@hhs.gov','6.241.88.250','2007-09-14 13:03:34'),
         (500,'Paula','pshawdv@networksolutions.com','123.27.47.249','2003-10-30 21:19:20');
    +"""
    +
    +seed__unicode_csv = """
    +seed_id
    +Uh – Oh
    +""".lstrip()
    +
    +seeds__enabled_in_config_csv = """seed_id,first_name,email,ip_address,birthday
    +1,Larry,lking0@miitbeian.gov.cn,69.135.206.194,2008-09-12 19:08:31
    +2,Larry,lperkins1@toplist.cz,64.210.133.162,1978-05-09 04:15:14
    +3,Anna,amontgomery2@miitbeian.gov.cn,168.104.64.114,2011-10-16 04:07:57
    +4,Sandra,sgeorge3@livejournal.com,229.235.252.98,1973-07-19 10:52:43
    +5,Fred,fwoods4@google.cn,78.229.170.124,2012-09-30 16:38:29
    +6,Stephen,shanson5@livejournal.com,182.227.157.105,1995-11-07 21:40:50
    +7,William,wmartinez6@upenn.edu,135.139.249.50,1982-09-05 03:11:59
    +8,Jessica,jlong7@hao123.com,203.62.178.210,1991-10-16 11:03:15
    +9,Douglas,dwhite8@tamu.edu,178.187.247.1,1979-10-01 09:49:48
    +10,Lisa,lcoleman9@nydailynews.com,168.234.128.249,2011-05-26 07:45:49
    +11,Ralph,rfieldsa@home.pl,55.152.163.149,1972-11-18 19:06:11
    +12,Louise,lnicholsb@samsung.com,141.116.153.154,2014-11-25 20:56:14
    +13,Clarence,cduncanc@sfgate.com,81.171.31.133,2011-11-17 07:02:36
    +14,Daniel,dfranklind@omniture.com,8.204.211.37,1980-09-13 00:09:04
    +15,Katherine,klanee@auda.org.au,176.96.134.59,1997-08-22 19:36:56
    +16,Billy,bwardf@wikia.com,214.108.78.85,2003-10-19 02:14:47
    +17,Annie,agarzag@ocn.ne.jp,190.108.42.70,1988-10-28 15:12:35
    +18,Shirley,scolemanh@fastcompany.com,109.251.164.84,1988-08-24 10:50:57
    +19,Roger,rfrazieri@scribd.com,38.145.218.108,1985-12-31 15:17:15
    +20,Lillian,lstanleyj@goodreads.com,47.57.236.17,1970-06-08 02:09:05
    +""".lstrip()
    +
    +seeds__disabled_in_config_csv = """seed_id,first_name,email,ip_address,birthday
    +1,Larry,lking0@miitbeian.gov.cn,69.135.206.194,2008-09-12 19:08:31
    +2,Larry,lperkins1@toplist.cz,64.210.133.162,1978-05-09 04:15:14
    +3,Anna,amontgomery2@miitbeian.gov.cn,168.104.64.114,2011-10-16 04:07:57
    +4,Sandra,sgeorge3@livejournal.com,229.235.252.98,1973-07-19 10:52:43
    +5,Fred,fwoods4@google.cn,78.229.170.124,2012-09-30 16:38:29
    +6,Stephen,shanson5@livejournal.com,182.227.157.105,1995-11-07 21:40:50
    +7,William,wmartinez6@upenn.edu,135.139.249.50,1982-09-05 03:11:59
    +8,Jessica,jlong7@hao123.com,203.62.178.210,1991-10-16 11:03:15
    +9,Douglas,dwhite8@tamu.edu,178.187.247.1,1979-10-01 09:49:48
    +10,Lisa,lcoleman9@nydailynews.com,168.234.128.249,2011-05-26 07:45:49
    +11,Ralph,rfieldsa@home.pl,55.152.163.149,1972-11-18 19:06:11
    +12,Louise,lnicholsb@samsung.com,141.116.153.154,2014-11-25 20:56:14
    +13,Clarence,cduncanc@sfgate.com,81.171.31.133,2011-11-17 07:02:36
    +14,Daniel,dfranklind@omniture.com,8.204.211.37,1980-09-13 00:09:04
    +15,Katherine,klanee@auda.org.au,176.96.134.59,1997-08-22 19:36:56
    +16,Billy,bwardf@wikia.com,214.108.78.85,2003-10-19 02:14:47
    +17,Annie,agarzag@ocn.ne.jp,190.108.42.70,1988-10-28 15:12:35
    +18,Shirley,scolemanh@fastcompany.com,109.251.164.84,1988-08-24 10:50:57
    +19,Roger,rfrazieri@scribd.com,38.145.218.108,1985-12-31 15:17:15
    +20,Lillian,lstanleyj@goodreads.com,47.57.236.17,1970-06-08 02:09:05
    +""".lstrip()
    +
    +# used to tease out include/exclude edge case behavior for 'dbt seed'
    +seeds__tricky_csv = """
    +seed_id,seed_id_str,a_bool,looks_like_a_bool,a_date,looks_like_a_date,relative,weekday
    +1,1,true,true,2019-01-01 12:32:30,2019-01-01 12:32:30,tomorrow,Saturday
    +2,2,True,True,2019-01-01 12:32:31,2019-01-01 12:32:31,today,Sunday
    +3,3,TRUE,TRUE,2019-01-01 12:32:32,2019-01-01 12:32:32,yesterday,Monday
    +4,4,false,false,2019-01-01 01:32:32,2019-01-01 01:32:32,tomorrow,Saturday
    +5,5,False,False,2019-01-01 01:32:32,2019-01-01 01:32:32,today,Sunday
    +6,6,FALSE,FALSE,2019-01-01 01:32:32,2019-01-01 01:32:32,yesterday,Monday
    +""".lstrip()
    +
    +
    +seeds__wont_parse_csv = """a,b,c
    +1,7,23,90,5
    +2
    +""".lstrip()
    diff --git a/tests/functional/simple_seed/test_seed.py b/tests/adapter/dbt/tests/adapter/simple_seed/test_seed.py
    similarity index 82%
    rename from tests/functional/simple_seed/test_seed.py
    rename to tests/adapter/dbt/tests/adapter/simple_seed/test_seed.py
    index e6309d469cf..40b56e61efb 100644
    --- a/tests/functional/simple_seed/test_seed.py
    +++ b/tests/adapter/dbt/tests/adapter/simple_seed/test_seed.py
    @@ -1,12 +1,13 @@
     import csv
     import pytest
    -import shutil
     
     from codecs import BOM_UTF8
     from pathlib import Path
     
     from dbt.tests.util import (
    -    rm_file,
    +    copy_file,
    +    mkdir,
    +    rm_dir,
         run_dbt,
         read_file,
         check_relations_equal,
    @@ -14,16 +15,21 @@
         check_table_does_not_exist,
     )
     
    -from tests.functional.simple_seed.fixtures import (
    +from dbt.tests.adapter.simple_seed.fixtures import (
         models__downstream_from_seed_actual,
         models__from_basic_seed,
    -    seeds__disabled_in_config,
    -    seeds__enabled_in_config,
    -    seeds__tricky,
    -    seeds__wont_parse,
     )
     
    -# from `test/integration/test_simple_seed`, test_simple_seed
    +from dbt.tests.adapter.simple_seed.seeds import (
    +    seed__actual_csv,
    +    seeds__expected_sql,
    +    seeds__enabled_in_config_csv,
    +    seeds__disabled_in_config_csv,
    +    seeds__tricky_csv,
    +    seeds__wont_parse_csv,
    +    seed__unicode_csv,
    +    seed__with_dots_csv,
    +)
     
     
     class SeedConfigBase(object):
    @@ -40,12 +46,11 @@ class SeedTestBase(SeedConfigBase):
         @pytest.fixture(scope="class", autouse=True)
         def setUp(self, project):
             """Create table for ensuring seeds and models used in tests build correctly"""
    -        project.run_sql_file(project.test_data_dir / Path("seed_expected.sql"))
    +        project.run_sql(seeds__expected_sql)
     
         @pytest.fixture(scope="class")
         def seeds(self, test_data_dir):
    -        seed_actual_csv = read_file(test_data_dir, "seed_actual.csv")
    -        return {"seed_actual.csv": seed_actual_csv}
    +        return {"seed_actual.csv": seed__actual_csv}
     
         @pytest.fixture(scope="class")
         def models(self):
    @@ -110,7 +115,7 @@ def project_config_update(self):
             }
     
         def test_simple_seed_full_refresh_config(self, project):
    -        """Config options should override full-refresh flag because config is higher priority"""
    +        """Config options should override a full-refresh flag because config is higher priority"""
             self._build_relations_for_test(project)
             self._check_relation_end_state(run_result=run_dbt(["seed"]), project=project, exists=True)
             self._check_relation_end_state(
    @@ -122,7 +127,7 @@ class TestSeedCustomSchema(SeedTestBase):
         @pytest.fixture(scope="class", autouse=True)
         def setUp(self, project):
             """Create table for ensuring seeds and models used in tests build correctly"""
    -        project.run_sql_file(project.test_data_dir / Path("seed_expected.sql"))
    +        project.run_sql(seeds__expected_sql)
     
         @pytest.fixture(scope="class")
         def project_config_update(self):
    @@ -134,8 +139,8 @@ def project_config_update(self):
             }
     
         def test_simple_seed_with_schema(self, project):
    -        results = run_dbt(["seed"])
    -        assert len(results) == 1
    +        seed_results = run_dbt(["seed"])
    +        assert len(seed_results) == 1
             custom_schema = f"{project.test_schema}_custom_schema"
             check_relations_equal(project.adapter, [f"{custom_schema}.seed_actual", "seed_expected"])
     
    @@ -146,13 +151,14 @@ def test_simple_seed_with_schema(self, project):
             check_relations_equal(project.adapter, [f"{custom_schema}.seed_actual", "seed_expected"])
     
         def test_simple_seed_with_drop_and_schema(self, project):
    -        results = run_dbt(["seed"])
    -        assert len(results) == 1
    +        seed_results = run_dbt(["seed"])
    +        assert len(seed_results) == 1
             custom_schema = f"{project.test_schema}_custom_schema"
             check_relations_equal(project.adapter, [f"{custom_schema}.seed_actual", "seed_expected"])
     
             # this should drop the seed table, then re-create
             results = run_dbt(["seed", "--full-refresh"])
    +        assert len(results) == 1
             custom_schema = f"{project.test_schema}_custom_schema"
             check_relations_equal(project.adapter, [f"{custom_schema}.seed_actual", "seed_expected"])
     
    @@ -161,9 +167,9 @@ class TestSimpleSeedEnabledViaConfig(object):
         @pytest.fixture(scope="session")
         def seeds(self):
             return {
    -            "seed_enabled.csv": seeds__enabled_in_config,
    -            "seed_disabled.csv": seeds__disabled_in_config,
    -            "seed_tricky.csv": seeds__tricky,
    +            "seed_enabled.csv": seeds__enabled_in_config_csv,
    +            "seed_disabled.csv": seeds__disabled_in_config_csv,
    +            "seed_tricky.csv": seeds__tricky_csv,
             }
     
         @pytest.fixture(scope="class")
    @@ -182,21 +188,21 @@ def clear_test_schema(self, project):
     
         def test_simple_seed_with_disabled(self, clear_test_schema, project):
             results = run_dbt(["seed"])
    -        len(results) == 2
    +        assert len(results) == 2
             check_table_does_exist(project.adapter, "seed_enabled")
             check_table_does_not_exist(project.adapter, "seed_disabled")
             check_table_does_exist(project.adapter, "seed_tricky")
     
         def test_simple_seed_selection(self, clear_test_schema, project):
             results = run_dbt(["seed", "--select", "seed_enabled"])
    -        len(results) == 1
    +        assert len(results) == 1
             check_table_does_exist(project.adapter, "seed_enabled")
             check_table_does_not_exist(project.adapter, "seed_disabled")
             check_table_does_not_exist(project.adapter, "seed_tricky")
     
         def test_simple_seed_exclude(self, clear_test_schema, project):
             results = run_dbt(["seed", "--exclude", "seed_enabled"])
    -        len(results) == 1
    +        assert len(results) == 1
             check_table_does_not_exist(project.adapter, "seed_enabled")
             check_table_does_not_exist(project.adapter, "seed_disabled")
             check_table_does_exist(project.adapter, "seed_tricky")
    @@ -206,11 +212,11 @@ class TestSeedParsing(SeedConfigBase):
         @pytest.fixture(scope="class", autouse=True)
         def setUp(self, project):
             """Create table for ensuring seeds and models used in tests build correctly"""
    -        project.run_sql_file(project.test_data_dir / Path("seed_expected.sql"))
    +        project.run_sql(seeds__expected_sql)
     
         @pytest.fixture(scope="class")
         def seeds(self):
    -        return {"seed.csv": seeds__wont_parse}
    +        return {"seed.csv": seeds__wont_parse_csv}
     
         @pytest.fixture(scope="class")
         def models(self):
    @@ -218,43 +224,44 @@ def models(self):
     
         def test_dbt_run_skips_seeds(self, project):
             # run does not try to parse the seed files
    -        len(run_dbt()) == 1
    +        assert len(run_dbt()) == 1
     
             # make sure 'dbt seed' fails, otherwise our test is invalid!
             run_dbt(["seed"], expect_pass=False)
     
     
    -# BOM = byte order mark; see https://www.ibm.com/docs/en/netezza?topic=formats-byte-order-mark
     class TestSimpleSeedWithBOM(SeedConfigBase):
    +    # Reference: BOM = byte order mark; see https://www.ibm.com/docs/en/netezza?topic=formats-byte-order-mark
    +    # Tests for hidden unicode character in csv
         @pytest.fixture(scope="class", autouse=True)
         def setUp(self, project):
             """Create table for ensuring seeds and models used in tests build correctly"""
    -        project.run_sql_file(project.test_data_dir / Path("seed_expected.sql"))
    -
    -        # manual copy because seed has a special and tricky-to-include unicode character at 0
    -        shutil.copyfile(
    -            project.test_data_dir / Path("seed_bom.csv"),
    -            project.project_root / Path("seeds") / Path("seed_bom.csv"),
    +        project.run_sql(seeds__expected_sql)
    +        copy_file(
    +            project.test_dir,
    +            "seed_bom.csv",
    +            project.project_root / Path("seeds") / "seed_bom.csv",
    +            "",
             )
     
         def test_simple_seed(self, project):
    -        results = run_dbt(["seed"])
    -        assert len(results) == 1
    -
    +        seed_result = run_dbt(["seed"])
    +        assert len(seed_result) == 1
             # encoding param must be specified in open, so long as Python reads files with a
             # default file encoding for character sets beyond extended ASCII.
             with open(
                 project.project_root / Path("seeds") / Path("seed_bom.csv"), encoding="utf-8"
             ) as fp:
                 assert fp.read(1) == BOM_UTF8.decode("utf-8")
    -
             check_relations_equal(project.adapter, ["seed_expected", "seed_bom"])
     
     
     class TestSeedSpecificFormats(SeedConfigBase):
         """Expect all edge cases to build"""
     
    -    def _make_big_seed(self, test_data_dir):
    +    @staticmethod
    +    def _make_big_seed(test_data_dir):
    +        mkdir(test_data_dir)
             big_seed_path = test_data_dir / Path("tmp.csv")
             with open(big_seed_path, "w") as f:
                 writer = csv.writer(f)
    @@ -265,18 +272,16 @@ def _make_big_seed(self, test_data_dir):
     
         @pytest.fixture(scope="class")
         def seeds(self, test_data_dir):
    -        seed_unicode = read_file(test_data_dir, "seed_unicode.csv")
    -        dotted_seed = read_file(test_data_dir, "seed.with.dots.csv")
             big_seed_path = self._make_big_seed(test_data_dir)
             big_seed = read_file(big_seed_path)
     
             yield {
                 "big_seed.csv": big_seed,
    -            "seed.with.dots.csv": dotted_seed,
    -            "seed_unicode.csv": seed_unicode,
    +            "seed.with.dots.csv": seed__with_dots_csv,
    +            "seed_unicode.csv": seed__unicode_csv,
             }
    -        rm_file(big_seed_path)
    +        rm_dir(test_data_dir)
     
         def test_simple_seed(self, project):
             results = run_dbt(["seed"])
    -        len(results) == 3
    +        assert len(results) == 3
    diff --git a/tests/functional/simple_seed/test_seed_type_override.py b/tests/adapter/dbt/tests/adapter/simple_seed/test_seed_type_override.py
    similarity index 63%
    rename from tests/functional/simple_seed/test_seed_type_override.py
    rename to tests/adapter/dbt/tests/adapter/simple_seed/test_seed_type_override.py
    index 1e05635d77d..bd6333e607c 100644
    --- a/tests/functional/simple_seed/test_seed_type_override.py
    +++ b/tests/adapter/dbt/tests/adapter/simple_seed/test_seed_type_override.py
    @@ -2,16 +2,19 @@
     
     from dbt.tests.util import run_dbt
     
    -from tests.functional.simple_seed.fixtures import (
    +from dbt.tests.adapter.simple_seed.fixtures import (
         macros__schema_test,
         properties__schema_yml,
    -    seeds__disabled_in_config,
    -    seeds__enabled_in_config,
    -    seeds__tricky,
    +)
    +
    +from dbt.tests.adapter.simple_seed.seeds import (
    +    seeds__enabled_in_config_csv,
    +    seeds__disabled_in_config_csv,
    +    seeds__tricky_csv,
     )
     
     
    -class SimpleSeedColumnOverride(object):
    +class BaseSimpleSeedColumnOverride:
         @pytest.fixture(scope="class")
         def models(self):
             return {
    @@ -21,9 +24,9 @@ def models(self):
         @pytest.fixture(scope="class")
         def seeds(self):
             return {
    -            "seed_enabled.csv": seeds__enabled_in_config,
    -            "seed_disabled.csv": seeds__disabled_in_config,
    -            "seed_tricky.csv": seeds__tricky,
    +            "seed_enabled.csv": seeds__enabled_in_config_csv,
    +            "seed_disabled.csv": seeds__disabled_in_config_csv,
    +            "seed_tricky.csv": seeds__tricky_csv,
             }
     
         @pytest.fixture(scope="class")
    @@ -46,13 +49,15 @@ def project_config_update(self):
                 },
             }
     
    -    def seed_enabled_types(self):
    +    @staticmethod
    +    def seed_enabled_types():
             return {
                 "seed_id": "text",
                 "birthday": "date",
             }
     
    -    def seed_tricky_types(self):
    +    @staticmethod
    +    def seed_tricky_types():
             return {
                 "seed_id_str": "text",
                 "looks_like_a_bool": "text",
    @@ -60,11 +65,11 @@ def seed_tricky_types(self):
             }
     
         def test_simple_seed_with_column_override(self, project):
    -        results = run_dbt(["seed", "--show"])
    -        len(results) == 2
    -        results = run_dbt(["test"])
    -        len(results) == 10
    +        seed_results = run_dbt(["seed", "--show"])
    +        assert len(seed_results) == 2
    +        test_results = run_dbt(["test"])
    +        assert len(test_results) == 10
     
     
    -class TestSimpleSeedColumnOverride(SimpleSeedColumnOverride):
    +class TestSimpleSeedColumnOverride(BaseSimpleSeedColumnOverride):
         pass
    diff --git a/tests/functional/simple_seed/data/seed.with.dots.csv b/tests/functional/simple_seed/data/seed.with.dots.csv
    deleted file mode 100644
    index 27d7b0ee585..00000000000
    --- a/tests/functional/simple_seed/data/seed.with.dots.csv
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -seed_id
    -1
    diff --git a/tests/functional/simple_seed/data/seed_actual.csv b/tests/functional/simple_seed/data/seed_actual.csv
    deleted file mode 100644
    index 327645b86d0..00000000000
    --- a/tests/functional/simple_seed/data/seed_actual.csv
    +++ /dev/null
    @@ -1,501 +0,0 @@
    -seed_id,first_name,email,ip_address,birthday
    -1,Larry,lking0@miitbeian.gov.cn,69.135.206.194,2008-09-12 19:08:31
    -2,Larry,lperkins1@toplist.cz,64.210.133.162,1978-05-09 04:15:14
    -3,Anna,amontgomery2@miitbeian.gov.cn,168.104.64.114,2011-10-16 04:07:57
    -4,Sandra,sgeorge3@livejournal.com,229.235.252.98,1973-07-19 10:52:43
    -5,Fred,fwoods4@google.cn,78.229.170.124,2012-09-30 16:38:29
    -6,Stephen,shanson5@livejournal.com,182.227.157.105,1995-11-07 21:40:50
    -7,William,wmartinez6@upenn.edu,135.139.249.50,1982-09-05 03:11:59
    -8,Jessica,jlong7@hao123.com,203.62.178.210,1991-10-16 11:03:15
    -9,Douglas,dwhite8@tamu.edu,178.187.247.1,1979-10-01 09:49:48
    -10,Lisa,lcoleman9@nydailynews.com,168.234.128.249,2011-05-26 07:45:49
    -11,Ralph,rfieldsa@home.pl,55.152.163.149,1972-11-18 19:06:11
    -12,Louise,lnicholsb@samsung.com,141.116.153.154,2014-11-25 20:56:14
    -13,Clarence,cduncanc@sfgate.com,81.171.31.133,2011-11-17 07:02:36
    -14,Daniel,dfranklind@omniture.com,8.204.211.37,1980-09-13 00:09:04
    -15,Katherine,klanee@auda.org.au,176.96.134.59,1997-08-22 19:36:56
    -16,Billy,bwardf@wikia.com,214.108.78.85,2003-10-19 02:14:47
    -17,Annie,agarzag@ocn.ne.jp,190.108.42.70,1988-10-28 15:12:35
    -18,Shirley,scolemanh@fastcompany.com,109.251.164.84,1988-08-24 10:50:57
    -19,Roger,rfrazieri@scribd.com,38.145.218.108,1985-12-31 15:17:15
    -20,Lillian,lstanleyj@goodreads.com,47.57.236.17,1970-06-08 02:09:05
    -21,Aaron,arodriguezk@nps.gov,205.245.118.221,1985-10-11 23:07:49
    -22,Patrick,pparkerl@techcrunch.com,19.8.100.182,2006-03-29 12:53:56
    -23,Phillip,pmorenom@intel.com,41.38.254.103,2011-11-07 15:35:43
    -24,Henry,hgarcian@newsvine.com,1.191.216.252,2008-08-28 08:30:44
    -25,Irene,iturnero@opera.com,50.17.60.190,1994-04-01 07:15:02
    -26,Andrew,adunnp@pen.io,123.52.253.176,2000-11-01 06:03:25
    -27,David,dgutierrezq@wp.com,238.23.203.42,1988-01-25 07:29:18
    -28,Henry,hsanchezr@cyberchimps.com,248.102.2.185,1983-01-01 13:36:37
    -29,Evelyn,epetersons@gizmodo.com,32.80.46.119,1979-07-16 17:24:12
    -30,Tammy,tmitchellt@purevolume.com,249.246.167.88,2001-04-03 10:00:23
    -31,Jacqueline,jlittleu@domainmarket.com,127.181.97.47,1986-02-11 21:35:50
    -32,Earl,eortizv@opera.com,166.47.248.240,1996-07-06 08:16:27
    -33,Juan,jgordonw@sciencedirect.com,71.77.2.200,1987-01-31 03:46:44
    -34,Diane,dhowellx@nyu.edu,140.94.133.12,1994-06-11 02:30:05
    -35,Randy,rkennedyy@microsoft.com,73.255.34.196,2005-05-26 20:28:39
    -36,Janice,jriveraz@time.com,22.214.227.32,1990-02-09 04:16:52
    -37,Laura,lperry10@diigo.com,159.148.145.73,2015-03-17 05:59:25
    -38,Gary,gray11@statcounter.com,40.193.124.56,1970-01-27 10:04:51
    -39,Jesse,jmcdonald12@typepad.com,31.7.86.103,2009-03-14 08:14:29
    -40,Sandra,sgonzalez13@goodreads.com,223.80.168.239,1993-05-21 14:08:54
    -41,Scott,smoore14@archive.org,38.238.46.83,1980-08-30 11:16:56
    -42,Phillip,pevans15@cisco.com,158.234.59.34,2011-12-15 23:26:31
    -43,Steven,sriley16@google.ca,90.247.57.68,2011-10-29 19:03:28
    -44,Deborah,dbrown17@hexun.com,179.125.143.240,1995-04-10 14:36:07
    -45,Lori,lross18@ow.ly,64.80.162.180,1980-12-27 16:49:15
    -46,Sean,sjackson19@tumblr.com,240.116.183.69,1988-06-12 21:24:45
    -47,Terry,tbarnes1a@163.com,118.38.213.137,1997-09-22 16:43:19
    -48,Dorothy,dross1b@ebay.com,116.81.76.49,2005-02-28 13:33:24
    -49,Samuel,swashington1c@house.gov,38.191.253.40,1989-01-19 21:15:48
    -50,Ralph,rcarter1d@tinyurl.com,104.84.60.174,2007-08-11 10:21:49
    -51,Wayne,whudson1e@princeton.edu,90.61.24.102,1983-07-03 16:58:12
    -52,Rose,rjames1f@plala.or.jp,240.83.81.10,1995-06-08 11:46:23
    -53,Louise,lcox1g@theglobeandmail.com,105.11.82.145,2016-09-19 14:45:51
    -54,Kenneth,kjohnson1h@independent.co.uk,139.5.45.94,1976-08-17 11:26:19
    -55,Donna,dbrown1i@amazon.co.uk,19.45.169.45,2006-05-27 16:51:40
    -56,Johnny,jvasquez1j@trellian.com,118.202.238.23,1975-11-17 08:42:32
    -57,Patrick,pramirez1k@tamu.edu,231.25.153.198,1997-08-06 11:51:09
    -58,Helen,hlarson1l@prweb.com,8.40.21.39,1993-08-04 19:53:40
    -59,Patricia,pspencer1m@gmpg.org,212.198.40.15,1977-08-03 16:37:27
    -60,Joseph,jspencer1n@marriott.com,13.15.63.238,2005-07-23 20:22:06
    -61,Phillip,pschmidt1o@blogtalkradio.com,177.98.201.190,1976-05-19 21:47:44
    -62,Joan,jwebb1p@google.ru,105.229.170.71,1972-09-07 17:53:47
    -63,Phyllis,pkennedy1q@imgur.com,35.145.8.244,2000-01-01 22:33:37
    -64,Katherine,khunter1r@smh.com.au,248.168.205.32,1991-01-09 06:40:24
    -65,Laura,lvasquez1s@wiley.com,128.129.115.152,1997-10-23 12:04:56
    -66,Juan,jdunn1t@state.gov,44.228.124.51,2004-11-10 05:07:35
    -67,Judith,jholmes1u@wiley.com,40.227.179.115,1977-08-02 17:01:45
    -68,Beverly,bbaker1v@wufoo.com,208.34.84.59,2016-03-06 20:07:23
    -69,Lawrence,lcarr1w@flickr.com,59.158.212.223,1988-09-13 06:07:21
    -70,Gloria,gwilliams1x@mtv.com,245.231.88.33,1995-03-18 22:32:46
    -71,Steven,ssims1y@cbslocal.com,104.50.58.255,2001-08-05 21:26:20
    -72,Betty,bmills1z@arstechnica.com,103.177.214.220,1981-12-14 21:26:54
    -73,Mildred,mfuller20@prnewswire.com,151.158.8.130,2000-04-19 10:13:55
    -74,Donald,dday21@icq.com,9.178.102.255,1972-12-03 00:58:24
    -75,Eric,ethomas22@addtoany.com,85.2.241.227,1992-11-01 05:59:30
    -76,Joyce,jarmstrong23@sitemeter.com,169.224.20.36,1985-10-24 06:50:01
    -77,Maria,mmartinez24@amazonaws.com,143.189.167.135,2005-10-05 05:17:42
    -78,Harry,hburton25@youtube.com,156.47.176.237,1978-03-26 05:53:33
    -79,Kevin,klawrence26@hao123.com,79.136.183.83,1994-10-12 04:38:52
    -80,David,dhall27@prweb.com,133.149.172.153,1976-12-15 16:24:24
    -81,Kathy,kperry28@twitter.com,229.242.72.228,1979-03-04 02:58:56
    -82,Adam,aprice29@elegantthemes.com,13.145.21.10,1982-11-07 11:46:59
    -83,Brandon,bgriffin2a@va.gov,73.249.128.212,2013-10-30 05:30:36
    -84,Henry,hnguyen2b@discovery.com,211.36.214.242,1985-01-09 06:37:27
    -85,Eric,esanchez2c@edublogs.org,191.166.188.251,2004-05-01 23:21:42
    -86,Jason,jlee2d@jimdo.com,193.92.16.182,1973-01-08 09:05:39
    -87,Diana,drichards2e@istockphoto.com,19.130.175.245,1994-10-05 22:50:49
    -88,Andrea,awelch2f@abc.net.au,94.155.233.96,2002-04-26 08:41:44
    -89,Louis,lwagner2g@miitbeian.gov.cn,26.217.34.111,2003-08-25 07:56:39
    -90,Jane,jsims2h@seesaa.net,43.4.220.135,1987-03-20 20:39:04
    -91,Larry,lgrant2i@si.edu,97.126.79.34,2000-09-07 20:26:19
    -92,Louis,ldean2j@prnewswire.com,37.148.40.127,2011-09-16 20:12:14
    -93,Jennifer,jcampbell2k@xing.com,38.106.254.142,1988-07-15 05:06:49
    -94,Wayne,wcunningham2l@google.com.hk,223.28.26.187,2009-12-15 06:16:54
    -95,Lori,lstevens2m@icq.com,181.250.181.58,1984-10-28 03:29:19
    -96,Judy,jsimpson2n@marriott.com,180.121.239.219,1986-02-07 15:18:10
    -97,Phillip,phoward2o@usa.gov,255.247.0.175,2002-12-26 08:44:45
    -98,Gloria,gwalker2p@usa.gov,156.140.7.128,1997-10-04 07:58:58
    -99,Paul,pjohnson2q@umn.edu,183.59.198.197,1991-11-14 12:33:55
    -100,Frank,fgreene2r@blogspot.com,150.143.68.121,2010-06-12 23:55:39
    -101,Deborah,dknight2s@reverbnation.com,222.131.211.191,1970-07-08 08:54:23
    -102,Sandra,sblack2t@tripadvisor.com,254.183.128.254,2000-04-12 02:39:36
    -103,Edward,eburns2u@dailymotion.com,253.89.118.18,1993-10-10 10:54:01
    -104,Anthony,ayoung2v@ustream.tv,118.4.193.176,1978-08-26 17:07:29
    -105,Donald,dlawrence2w@wp.com,139.200.159.227,2007-07-21 20:56:20
    -106,Matthew,mfreeman2x@google.fr,205.26.239.92,2014-12-05 17:05:39
    -107,Sean,ssanders2y@trellian.com,143.89.82.108,1993-07-14 21:45:02
    -108,Sharon,srobinson2z@soundcloud.com,66.234.247.54,1977-04-06 19:07:03
    -109,Jennifer,jwatson30@t-online.de,196.102.127.7,1998-03-07 05:12:23
    -110,Clarence,cbrooks31@si.edu,218.93.234.73,2002-11-06 17:22:25
    -111,Jose,jflores32@goo.gl,185.105.244.231,1995-01-05 06:32:21
    -112,George,glee33@adobe.com,173.82.249.196,2015-01-04 02:47:46
    -113,Larry,lhill34@linkedin.com,66.5.206.195,2010-11-02 10:21:17
    -114,Marie,mmeyer35@mysql.com,151.152.88.107,1990-05-22 20:52:51
    -115,Clarence,cwebb36@skype.com,130.198.55.217,1972-10-27 07:38:54
    -116,Sarah,scarter37@answers.com,80.89.18.153,1971-08-24 19:29:30
    -117,Henry,hhughes38@webeden.co.uk,152.60.114.174,1973-01-27 09:00:42
    -118,Teresa,thenry39@hao123.com,32.187.239.106,2015-11-06 01:48:44
    -119,Billy,bgutierrez3a@sun.com,52.37.70.134,2002-03-19 03:20:19
    -120,Anthony,agibson3b@github.io,154.251.232.213,1991-04-19 01:08:15
    -121,Sandra,sromero3c@wikia.com,44.124.171.2,1998-09-06 20:30:34
    -122,Paula,pandrews3d@blogs.com,153.142.118.226,2003-06-24 16:31:24
    -123,Terry,tbaker3e@csmonitor.com,99.120.45.219,1970-12-09 23:57:21
    -124,Lois,lwilson3f@reuters.com,147.44.171.83,1971-01-09 22:28:51
    -125,Sara,smorgan3g@nature.com,197.67.192.230,1992-01-28 20:33:24
    -126,Charles,ctorres3h@china.com.cn,156.115.216.2,1993-10-02 19:36:34
    -127,Richard,ralexander3i@marriott.com,248.235.180.59,1999-02-03 18:40:55
    -128,Christina,charper3j@cocolog-nifty.com,152.114.116.129,1978-09-13 00:37:32
    -129,Steve,sadams3k@economist.com,112.248.91.98,2004-03-21 09:07:43
    -130,Katherine,krobertson3l@ow.ly,37.220.107.28,1977-03-18 19:28:50
    -131,Donna,dgibson3m@state.gov,222.218.76.221,1999-02-01 06:46:16
    -132,Christina,cwest3n@mlb.com,152.114.6.160,1979-12-24 15:30:35
    -133,Sandra,swillis3o@meetup.com,180.71.49.34,1984-09-27 08:05:54
    -134,Clarence,cedwards3p@smugmug.com,10.64.180.186,1979-04-16 16:52:10
    -135,Ruby,rjames3q@wp.com,98.61.54.20,2007-01-13 14:25:52
    -136,Sarah,smontgomery3r@tripod.com,91.45.164.172,2009-07-25 04:34:30
    -137,Sarah,soliver3s@eventbrite.com,30.106.39.146,2012-05-09 22:12:33
    -138,Deborah,dwheeler3t@biblegateway.com,59.105.213.173,1999-11-09 08:08:44
    -139,Deborah,dray3u@i2i.jp,11.108.186.217,2014-02-04 03:15:19
    -140,Paul,parmstrong3v@alexa.com,6.250.59.43,2009-12-21 10:08:53
    -141,Aaron,abishop3w@opera.com,207.145.249.62,1996-04-25 23:20:23
    -142,Henry,hsanders3x@google.ru,140.215.203.171,2012-01-29 11:52:32
    -143,Anne,aanderson3y@1688.com,74.150.102.118,1982-04-03 13:46:17
    -144,Victor,vmurphy3z@hugedomains.com,222.155.99.152,1987-11-03 19:58:41
    -145,Evelyn,ereid40@pbs.org,249.122.33.117,1977-12-14 17:09:57
    -146,Brian,bgonzalez41@wikia.com,246.254.235.141,1991-02-24 00:45:58
    -147,Sandra,sgray42@squarespace.com,150.73.28.159,1972-07-28 17:26:32
    -148,Alice,ajones43@a8.net,78.253.12.177,2002-12-05 16:57:46
    -149,Jessica,jhanson44@mapquest.com,87.229.30.160,1994-01-30 11:40:04
    -150,Louise,lbailey45@reuters.com,191.219.31.101,2011-09-07 21:11:45
    -151,Christopher,cgonzalez46@printfriendly.com,83.137.213.239,1984-10-24 14:58:04
    -152,Gregory,gcollins47@yandex.ru,28.176.10.115,1998-07-25 17:17:10
    -153,Jane,jperkins48@usnews.com,46.53.164.159,1979-08-19 15:25:00
    -154,Phyllis,plong49@yahoo.co.jp,208.140.88.2,1985-07-06 02:16:36
    -155,Adam,acarter4a@scribd.com,78.48.148.204,2005-07-20 03:31:09
    -156,Frank,fweaver4b@angelfire.com,199.180.255.224,2011-03-04 23:07:54
    -157,Ronald,rmurphy4c@cloudflare.com,73.42.97.231,1991-01-11 10:39:41
    -158,Richard,rmorris4d@e-recht24.de,91.9.97.223,2009-01-17 21:05:15
    -159,Rose,rfoster4e@woothemes.com,203.169.53.16,1991-04-21 02:09:38
    -160,George,ggarrett4f@uiuc.edu,186.61.5.167,1989-11-11 11:29:42
    -161,Victor,vhamilton4g@biblegateway.com,121.229.138.38,2012-06-22 18:01:23
    -162,Mark,mbennett4h@businessinsider.com,209.184.29.203,1980-04-16 15:26:34
    -163,Martin,mwells4i@ifeng.com,97.223.55.105,2010-05-26 14:08:18
    -164,Diana,dstone4j@google.ru,90.155.52.47,2013-02-11 00:14:54
    -165,Walter,wferguson4k@blogger.com,30.63.212.44,1986-02-20 17:46:46
    -166,Denise,dcoleman4l@vistaprint.com,10.209.153.77,1992-05-13 20:14:14
    -167,Philip,pknight4m@xing.com,15.28.135.167,2000-09-11 18:41:13
    -168,Russell,rcarr4n@youtube.com,113.55.165.50,2008-07-10 17:49:27
    -169,Donna,dburke4o@dion.ne.jp,70.0.105.111,1992-02-10 17:24:58
    -170,Anne,along4p@squidoo.com,36.154.58.107,2012-08-19 23:35:31
    -171,Clarence,cbanks4q@webeden.co.uk,94.57.53.114,1972-03-11 21:46:44
    -172,Betty,bbowman4r@cyberchimps.com,178.115.209.69,2013-01-13 21:34:51
    -173,Andrew,ahudson4s@nytimes.com,84.32.252.144,1998-09-15 14:20:04
    -174,Keith,kgordon4t@cam.ac.uk,189.237.211.102,2009-01-22 05:34:38
    -175,Patrick,pwheeler4u@mysql.com,47.22.117.226,1984-09-05 22:33:15
    -176,Jesse,jfoster4v@mapquest.com,229.95.131.46,1990-01-20 12:19:15
    -177,Arthur,afisher4w@jugem.jp,107.255.244.98,1983-10-13 11:08:46
    -178,Nicole,nryan4x@wsj.com,243.211.33.221,1974-05-30 23:19:14
    -179,Bruce,bjohnson4y@sfgate.com,17.41.200.101,1992-09-23 02:02:19
    -180,Terry,tcox4z@reference.com,20.189.120.106,1982-02-13 12:43:14
    -181,Ashley,astanley50@kickstarter.com,86.3.56.98,1976-05-09 01:27:16
    -182,Michael,mrivera51@about.me,72.118.249.0,1971-11-11 17:28:37
    -183,Steven,sgonzalez52@mozilla.org,169.112.247.47,2002-08-24 14:59:25
    -184,Kathleen,kfuller53@bloglovin.com,80.93.59.30,2002-03-11 13:41:29
    -185,Nicole,nhenderson54@usda.gov,39.253.60.30,1995-04-24 05:55:07
    -186,Ralph,rharper55@purevolume.com,167.147.142.189,1980-02-10 18:35:45
    -187,Heather,hcunningham56@photobucket.com,96.222.196.229,2007-06-15 05:37:50
    -188,Nancy,nlittle57@cbc.ca,241.53.255.175,2007-07-12 23:42:48
    -189,Juan,jramirez58@pinterest.com,190.128.84.27,1978-11-07 23:37:37
    -190,Beverly,bfowler59@chronoengine.com,54.144.230.49,1979-03-31 23:27:28
    -191,Shirley,sstevens5a@prlog.org,200.97.231.248,2011-12-06 07:08:50
    -192,Annie,areyes5b@squidoo.com,223.32.182.101,2011-05-28 02:42:09
    -193,Jack,jkelley5c@tiny.cc,47.34.118.150,1981-12-05 17:31:40
    -194,Keith,krobinson5d@1und1.de,170.210.209.31,1999-03-09 11:05:43
    -195,Joseph,jmiller5e@google.com.au,136.74.212.139,1984-10-08 13:18:20
    -196,Annie,aday5f@blogspot.com,71.99.186.69,1986-02-18 12:27:34
    -197,Nancy,nperez5g@liveinternet.ru,28.160.6.107,1983-10-20 17:51:20
    -198,Tammy,tward5h@ucoz.ru,141.43.164.70,1980-03-31 04:45:29
    -199,Doris,dryan5i@ted.com,239.117.202.188,1985-07-03 03:17:53
    -200,Rose,rmendoza5j@photobucket.com,150.200.206.79,1973-04-21 21:36:40
    -201,Cynthia,cbutler5k@hubpages.com,80.153.174.161,2001-01-20 01:42:26
    -202,Samuel,soliver5l@people.com.cn,86.127.246.140,1970-09-02 02:19:00
    -203,Carl,csanchez5m@mysql.com,50.149.237.107,1993-12-01 07:02:09
    -204,Kathryn,kowens5n@geocities.jp,145.166.205.201,2004-07-06 18:39:33
    -205,Nicholas,nnichols5o@parallels.com,190.240.66.170,2014-11-11 18:52:19
    -206,Keith,kwillis5p@youtube.com,181.43.206.100,1998-06-13 06:30:51
    -207,Justin,jwebb5q@intel.com,211.54.245.74,2000-11-04 16:58:26
    -208,Gary,ghicks5r@wikipedia.org,196.154.213.104,1992-12-01 19:48:28
    -209,Martin,mpowell5s@flickr.com,153.67.12.241,1983-06-30 06:24:32
    -210,Brenda,bkelley5t@xinhuanet.com,113.100.5.172,2005-01-08 20:50:22
    -211,Edward,eray5u@a8.net,205.187.246.65,2011-09-26 08:04:44
    -212,Steven,slawson5v@senate.gov,238.150.250.36,1978-11-22 02:48:09
    -213,Robert,rthompson5w@furl.net,70.7.89.236,2001-09-12 08:52:07
    -214,Jack,jporter5x@diigo.com,220.172.29.99,1976-07-26 14:29:21
    -215,Lisa,ljenkins5y@oakley.com,150.151.170.180,2010-03-20 19:21:16
    -216,Theresa,tbell5z@mayoclinic.com,247.25.53.173,2001-03-11 05:36:40
    -217,Jimmy,jstephens60@weather.com,145.101.93.235,1983-04-12 09:35:30
    -218,Louis,lhunt61@amazon.co.jp,78.137.6.253,1997-08-29 19:34:34
    -219,Lawrence,lgilbert62@ted.com,243.132.8.78,2015-04-08 22:06:56
    -220,David,dgardner63@4shared.com,204.40.46.136,1971-07-09 03:29:11
    -221,Charles,ckennedy64@gmpg.org,211.83.233.2,2011-02-26 11:55:04
    -222,Lillian,lbanks65@msu.edu,124.233.12.80,2010-05-16 20:29:02
    -223,Ernest,enguyen66@baidu.com,82.45.128.148,1996-07-04 10:07:04
    -224,Ryan,rrussell67@cloudflare.com,202.53.240.223,1983-08-05 12:36:29
    -225,Donald,ddavis68@ustream.tv,47.39.218.137,1989-05-27 02:30:56
    -226,Joe,jscott69@blogspot.com,140.23.131.75,1973-03-16 12:21:31
    -227,Anne,amarshall6a@google.ca,113.162.200.197,1988-12-09 03:38:29
    -228,Willie,wturner6b@constantcontact.com,85.83.182.249,1991-10-06 01:51:10
    -229,Nicole,nwilson6c@sogou.com,30.223.51.135,1977-05-29 19:54:56
    -230,Janet,jwheeler6d@stumbleupon.com,153.194.27.144,2011-03-13 12:48:47
    -231,Lois,lcarr6e@statcounter.com,0.41.36.53,1993-02-06 04:52:01
    -232,Shirley,scruz6f@tmall.com,37.156.39.223,2007-02-18 17:47:01
    -233,Patrick,pford6g@reverbnation.com,36.198.200.89,1977-03-06 15:47:24
    -234,Lisa,lhudson6h@usatoday.com,134.213.58.137,2014-10-28 01:56:56
    -235,Pamela,pmartinez6i@opensource.org,5.151.127.202,1987-11-30 16:44:47
    -236,Larry,lperez6j@infoseek.co.jp,235.122.96.148,1979-01-18 06:33:45
    -237,Pamela,pramirez6k@census.gov,138.233.34.163,2012-01-29 10:35:20
    -238,Daniel,dcarr6l@php.net,146.21.152.242,1984-11-17 08:22:59
    -239,Patrick,psmith6m@indiegogo.com,136.222.199.36,2001-05-30 22:16:44
    -240,Raymond,rhenderson6n@hc360.com,116.31.112.38,2000-01-05 20:35:41
    -241,Teresa,treynolds6o@miitbeian.gov.cn,198.126.205.220,1996-11-08 01:27:31
    -242,Johnny,jmason6p@flickr.com,192.8.232.114,2013-05-14 05:35:50
    -243,Angela,akelly6q@guardian.co.uk,234.116.60.197,1977-08-20 02:05:17
    -244,Douglas,dcole6r@cmu.edu,128.135.212.69,2016-10-26 17:40:36
    -245,Frances,fcampbell6s@twitpic.com,94.22.243.235,1987-04-26 07:07:13
    -246,Donna,dgreen6t@chron.com,227.116.46.107,2011-07-25 12:59:54
    -247,Benjamin,bfranklin6u@redcross.org,89.141.142.89,1974-05-03 20:28:18
    -248,Randy,rpalmer6v@rambler.ru,70.173.63.178,2011-12-20 17:40:18
    -249,Melissa,mmurray6w@bbb.org,114.234.118.137,1991-02-26 12:45:44
    -250,Jean,jlittle6x@epa.gov,141.21.163.254,1991-08-16 04:57:09
    -251,Daniel,dolson6y@nature.com,125.75.104.97,2010-04-23 06:25:54
    -252,Kathryn,kwells6z@eventbrite.com,225.104.28.249,2015-01-31 02:21:50
    -253,Theresa,tgonzalez70@ox.ac.uk,91.93.156.26,1971-12-11 10:31:31
    -254,Beverly,broberts71@bluehost.com,244.40.158.89,2013-09-21 13:02:31
    -255,Pamela,pmurray72@netscape.com,218.54.95.216,1985-04-16 00:34:00
    -256,Timothy,trichardson73@amazonaws.com,235.49.24.229,2000-11-11 09:48:28
    -257,Mildred,mpalmer74@is.gd,234.125.95.132,1992-05-25 02:25:02
    -258,Jessica,jcampbell75@google.it,55.98.30.140,2014-08-26 00:26:34
    -259,Beverly,bthomas76@cpanel.net,48.78.228.176,1970-08-18 10:40:05
    -260,Eugene,eward77@cargocollective.com,139.226.204.2,1996-12-04 23:17:00
    -261,Andrea,aallen78@webnode.com,160.31.214.38,2009-07-06 07:22:37
    -262,Justin,jruiz79@merriam-webster.com,150.149.246.122,2005-06-06 11:44:19
    -263,Kenneth,kedwards7a@networksolutions.com,98.82.193.128,2001-07-03 02:00:10
    -264,Rachel,rday7b@miibeian.gov.cn,114.15.247.221,1994-08-18 19:45:40
    -265,Russell,rmiller7c@instagram.com,184.130.152.253,1977-11-06 01:58:12
    -266,Bonnie,bhudson7d@cornell.edu,235.180.186.206,1990-12-03 22:45:24
    -267,Raymond,rknight7e@yandex.ru,161.2.44.252,1995-08-25 04:31:19
    -268,Bonnie,brussell7f@elpais.com,199.237.57.207,1991-03-29 08:32:06
    -269,Marie,mhenderson7g@elpais.com,52.203.131.144,2004-06-04 21:50:28
    -270,Alan,acarr7h@trellian.com,147.51.205.72,2005-03-03 10:51:31
    -271,Barbara,bturner7i@hugedomains.com,103.160.110.226,2004-08-04 13:42:40
    -272,Christina,cdaniels7j@census.gov,0.238.61.251,1972-10-18 12:47:33
    -273,Jeremy,jgomez7k@reuters.com,111.26.65.56,2013-01-13 10:41:35
    -274,Laura,lwood7l@icio.us,149.153.38.205,2011-06-25 09:33:59
    -275,Matthew,mbowman7m@auda.org.au,182.138.206.172,1999-03-05 03:25:36
    -276,Denise,dparker7n@icq.com,0.213.88.138,2011-11-04 09:43:06
    -277,Phillip,pparker7o@discuz.net,219.242.165.240,1973-10-19 04:22:29
    -278,Joan,jpierce7p@salon.com,63.31.213.202,1989-04-09 22:06:24
    -279,Irene,ibaker7q@cbc.ca,102.33.235.114,1992-09-04 13:00:57
    -280,Betty,bbowman7r@ted.com,170.91.249.242,2015-09-28 08:14:22
    -281,Teresa,truiz7s@boston.com,82.108.158.207,1999-07-18 05:17:09
    -282,Helen,hbrooks7t@slideshare.net,102.87.162.187,2003-01-06 15:45:29
    -283,Karen,kgriffin7u@wunderground.com,43.82.44.184,2010-05-28 01:56:37
    -284,Lisa,lfernandez7v@mtv.com,200.238.218.220,1993-04-03 20:33:51
    -285,Jesse,jlawrence7w@timesonline.co.uk,95.122.105.78,1990-01-05 17:28:43
    -286,Terry,tross7x@macromedia.com,29.112.114.133,2009-08-29 21:32:17
    -287,Angela,abradley7y@icq.com,177.44.27.72,1989-10-04 21:46:06
    -288,Maria,mhart7z@dailymotion.com,55.27.55.202,1975-01-21 01:22:57
    -289,Raymond,randrews80@pinterest.com,88.90.78.67,1992-03-16 21:37:40
    -290,Kathy,krice81@bluehost.com,212.63.196.102,2000-12-14 03:06:44
    -291,Cynthia,cramos82@nymag.com,107.89.190.6,2005-06-28 02:02:33
    -292,Kimberly,kjones83@mysql.com,86.169.101.101,2007-06-13 22:56:49
    -293,Timothy,thansen84@microsoft.com,108.100.254.90,2003-04-04 10:31:57
    -294,Carol,cspencer85@berkeley.edu,75.118.144.187,1999-03-30 14:53:21
    -295,Louis,lmedina86@latimes.com,141.147.163.24,1991-04-11 17:53:13
    -296,Margaret,mcole87@google.fr,53.184.26.83,1991-12-19 01:54:10
    -297,Mary,mgomez88@yellowpages.com,208.56.57.99,1976-05-21 18:05:08
    -298,Amanda,aanderson89@geocities.com,147.73.15.252,1987-08-22 15:05:28
    -299,Kathryn,kgarrett8a@nature.com,27.29.177.220,1976-07-15 04:25:04
    -300,Dorothy,dmason8b@shareasale.com,106.210.99.193,1990-09-03 21:39:31
    -301,Lois,lkennedy8c@amazon.de,194.169.29.187,2007-07-29 14:09:31
    -302,Irene,iburton8d@washingtonpost.com,196.143.110.249,2013-09-05 11:32:46
    -303,Betty,belliott8e@wired.com,183.105.222.199,1979-09-19 19:29:13
    -304,Bobby,bmeyer8f@census.gov,36.13.161.145,2014-05-24 14:34:39
    -305,Ann,amorrison8g@sfgate.com,72.154.54.137,1978-10-05 14:22:34
    -306,Daniel,djackson8h@wunderground.com,144.95.32.34,1990-07-27 13:23:05
    -307,Joe,jboyd8i@alibaba.com,187.105.86.178,2011-09-28 16:46:32
    -308,Ralph,rdunn8j@fc2.com,3.19.87.255,1984-10-18 08:00:40
    -309,Craig,ccarter8k@gizmodo.com,235.152.76.215,1998-07-04 12:15:21
    -310,Paula,pdean8l@hhs.gov,161.100.173.197,1973-02-13 09:38:55
    -311,Andrew,agarrett8m@behance.net,199.253.123.218,1991-02-14 13:36:32
    -312,Janet,jhowell8n@alexa.com,39.189.139.79,2012-11-24 20:17:33
    -313,Keith,khansen8o@godaddy.com,116.186.223.196,1987-08-23 21:22:05
    -314,Nicholas,nedwards8p@state.gov,142.175.142.11,1977-03-28 18:27:27
    -315,Jacqueline,jallen8q@oaic.gov.au,189.66.135.192,1994-10-26 11:44:26
    -316,Frank,fgardner8r@mapy.cz,154.77.119.169,1983-01-29 19:19:51
    -317,Eric,eharrison8s@google.cn,245.139.65.123,1984-02-04 09:54:36
    -318,Gregory,gcooper8t@go.com,171.147.0.221,2004-06-14 05:22:08
    -319,Jean,jfreeman8u@rakuten.co.jp,67.243.121.5,1977-01-07 18:23:43
    -320,Juan,jlewis8v@shinystat.com,216.181.171.189,2001-08-23 17:32:43
    -321,Randy,rwilliams8w@shinystat.com,105.152.146.28,1983-02-17 00:05:50
    -322,Stephen,shart8x@sciencedirect.com,196.131.205.148,2004-02-15 10:12:03
    -323,Annie,ahunter8y@example.com,63.36.34.103,2003-07-23 21:15:25
    -324,Melissa,mflores8z@cbc.ca,151.230.217.90,1983-11-02 14:53:56
    -325,Jane,jweaver90@about.me,0.167.235.217,1987-07-29 00:13:44
    -326,Anthony,asmith91@oracle.com,97.87.48.41,2001-05-31 18:44:11
    -327,Terry,tdavis92@buzzfeed.com,46.20.12.51,2015-09-12 23:13:55
    -328,Brandon,bmontgomery93@gravatar.com,252.101.48.186,2010-10-28 08:26:27
    -329,Chris,cmurray94@bluehost.com,25.158.167.97,2004-05-05 16:10:31
    -330,Denise,dfuller95@hugedomains.com,216.210.149.28,1979-04-20 08:57:24
    -331,Arthur,amcdonald96@sakura.ne.jp,206.42.36.213,2009-08-15 03:26:16
    -332,Jesse,jhoward97@google.cn,46.181.118.30,1974-04-18 14:08:41
    -333,Frank,fsimpson98@domainmarket.com,163.220.211.87,2006-06-30 14:46:52
    -334,Janice,jwoods99@pen.io,229.245.237.182,1988-04-06 11:52:58
    -335,Rebecca,rroberts9a@huffingtonpost.com,148.96.15.80,1976-10-05 08:44:16
    -336,Joshua,jray9b@opensource.org,192.253.12.198,1971-12-25 22:27:07
    -337,Joyce,jcarpenter9c@statcounter.com,125.171.46.215,2001-12-31 22:08:13
    -338,Andrea,awest9d@privacy.gov.au,79.101.180.201,1983-02-18 20:07:47
    -339,Christine,chudson9e@yelp.com,64.198.43.56,1997-09-08 08:03:43
    -340,Joe,jparker9f@earthlink.net,251.215.148.153,1973-11-04 05:08:18
    -341,Thomas,tkim9g@answers.com,49.187.34.47,1991-08-07 21:13:48
    -342,Janice,jdean9h@scientificamerican.com,4.197.117.16,2009-12-08 02:35:49
    -343,James,jmitchell9i@umich.edu,43.121.18.147,2011-04-28 17:04:09
    -344,Charles,cgardner9j@purevolume.com,197.78.240.240,1998-02-11 06:47:07
    -345,Robert,rhenderson9k@friendfeed.com,215.84.180.88,2002-05-10 15:33:14
    -346,Chris,cgray9l@4shared.com,249.70.192.240,1998-10-03 16:43:42
    -347,Gloria,ghayes9m@hibu.com,81.103.138.26,1999-12-26 11:23:13
    -348,Edward,eramirez9n@shareasale.com,38.136.90.136,2010-08-19 08:01:06
    -349,Cheryl,cbutler9o@google.ca,172.180.78.172,1995-05-27 20:03:52
    -350,Margaret,mwatkins9p@sfgate.com,3.20.198.6,2014-10-21 01:42:58
    -351,Rebecca,rwelch9q@examiner.com,45.81.42.208,2001-02-08 12:19:06
    -352,Joe,jpalmer9r@phpbb.com,163.202.92.190,1970-01-05 11:29:12
    -353,Sandra,slewis9s@dyndns.org,77.215.201.236,1974-01-05 07:04:04
    -354,Todd,tfranklin9t@g.co,167.125.181.82,2009-09-28 10:13:58
    -355,Joseph,jlewis9u@webmd.com,244.204.6.11,1990-10-21 15:49:57
    -356,Alan,aknight9v@nydailynews.com,152.197.95.83,1996-03-08 08:43:17
    -357,Sharon,sdean9w@123-reg.co.uk,237.46.40.26,1985-11-30 12:09:24
    -358,Annie,awright9x@cafepress.com,190.45.231.111,2000-08-24 11:56:06
    -359,Diane,dhamilton9y@youtube.com,85.146.171.196,2015-02-24 02:03:57
    -360,Antonio,alane9z@auda.org.au,61.63.146.203,2001-05-13 03:43:34
    -361,Matthew,mallena0@hhs.gov,29.97.32.19,1973-02-19 23:43:32
    -362,Bonnie,bfowlera1@soup.io,251.216.99.53,2013-08-01 15:35:41
    -363,Margaret,mgraya2@examiner.com,69.255.151.79,1998-01-23 22:24:59
    -364,Joan,jwagnera3@printfriendly.com,192.166.120.61,1973-07-13 00:30:22
    -365,Catherine,cperkinsa4@nytimes.com,58.21.24.214,2006-11-19 11:52:26
    -366,Mark,mcartera5@cpanel.net,220.33.102.142,2007-09-09 09:43:27
    -367,Paula,ppricea6@msn.com,36.182.238.124,2009-11-11 09:13:05
    -368,Catherine,cgreena7@army.mil,228.203.58.19,2005-08-09 16:52:15
    -369,Helen,hhamiltona8@symantec.com,155.56.194.99,2005-02-01 05:40:36
    -370,Jane,jmeyera9@ezinearticles.com,133.244.113.213,2013-11-06 22:10:23
    -371,Wanda,wevansaa@bloglovin.com,233.125.192.48,1994-12-26 23:43:42
    -372,Mark,mmarshallab@tumblr.com,114.74.60.47,2016-09-29 18:03:01
    -373,Andrew,amartinezac@google.cn,182.54.37.130,1976-06-06 17:04:17
    -374,Helen,hmoralesad@e-recht24.de,42.45.4.123,1977-03-28 19:06:59
    -375,Bonnie,bstoneae@php.net,196.149.79.137,1970-02-05 17:05:58
    -376,Douglas,dfreemanaf@nasa.gov,215.65.124.218,2008-11-20 21:51:55
    -377,Willie,wwestag@army.mil,35.189.92.118,1992-07-24 05:08:08
    -378,Cheryl,cwagnerah@upenn.edu,228.239.222.141,2010-01-25 06:29:01
    -379,Sandra,swardai@baidu.com,63.11.113.240,1985-05-23 08:07:37
    -380,Julie,jrobinsonaj@jugem.jp,110.58.202.50,2015-03-05 09:42:07
    -381,Larry,lwagnerak@shop-pro.jp,98.234.25.24,1975-07-22 22:22:02
    -382,Juan,jcastilloal@yelp.com,24.174.74.202,2007-01-17 09:32:43
    -383,Donna,dfrazieram@artisteer.com,205.26.147.45,1990-02-11 20:55:46
    -384,Rachel,rfloresan@w3.org,109.60.216.162,1983-05-22 22:42:18
    -385,Robert,rreynoldsao@theguardian.com,122.65.209.130,2009-05-01 18:02:51
    -386,Donald,dbradleyap@etsy.com,42.54.35.126,1997-01-16 16:31:52
    -387,Rachel,rfisheraq@nih.gov,160.243.250.45,2006-02-17 22:05:49
    -388,Nicholas,nhamiltonar@princeton.edu,156.211.37.111,1976-06-21 03:36:29
    -389,Timothy,twhiteas@ca.gov,36.128.23.70,1975-09-24 03:51:18
    -390,Diana,dbradleyat@odnoklassniki.ru,44.102.120.184,1983-04-27 09:02:50
    -391,Billy,bfowlerau@jimdo.com,91.200.68.196,1995-01-29 06:57:35
    -392,Bruce,bandrewsav@ucoz.com,48.12.101.125,1992-10-27 04:31:39
    -393,Linda,lromeroaw@usa.gov,100.71.233.19,1992-06-08 15:13:18
    -394,Debra,dwatkinsax@ucoz.ru,52.160.233.193,2001-11-11 06:51:01
    -395,Katherine,kburkeay@wix.com,151.156.242.141,2010-06-14 19:54:28
    -396,Martha,mharrisonaz@youku.com,21.222.10.199,1989-10-16 14:17:55
    -397,Dennis,dwellsb0@youtu.be,103.16.29.3,1985-12-21 06:05:51
    -398,Gloria,grichardsb1@bloglines.com,90.147.120.234,1982-08-27 01:04:43
    -399,Brenda,bfullerb2@t.co,33.253.63.90,2011-04-20 05:00:35
    -400,Larry,lhendersonb3@disqus.com,88.95.132.128,1982-08-31 02:15:12
    -401,Richard,rlarsonb4@wisc.edu,13.48.231.150,1979-04-15 14:08:09
    -402,Terry,thuntb5@usa.gov,65.91.103.240,1998-05-15 11:50:49
    -403,Harry,hburnsb6@nasa.gov,33.38.21.244,1981-04-12 14:02:20
    -404,Diana,dellisb7@mlb.com,218.229.81.135,1997-01-29 00:17:25
    -405,Jack,jburkeb8@tripadvisor.com,210.227.182.216,1984-03-09 17:24:03
    -406,Julia,jlongb9@fotki.com,10.210.12.104,2005-10-26 03:54:13
    -407,Lois,lscottba@msu.edu,188.79.136.138,1973-02-02 18:40:39
    -408,Sandra,shendersonbb@shareasale.com,114.171.220.108,2012-06-09 18:22:26
    -409,Irene,isanchezbc@cdbaby.com,109.255.50.119,1983-09-28 21:11:27
    -410,Emily,ebrooksbd@bandcamp.com,227.81.93.79,1970-08-31 21:08:01
    -411,Michelle,mdiazbe@businessweek.com,236.249.6.226,1993-05-22 08:07:07
    -412,Tammy,tbennettbf@wisc.edu,145.253.239.152,1978-12-31 20:24:51
    -413,Christine,cgreenebg@flickr.com,97.25.140.118,1978-07-17 12:55:30
    -414,Patricia,pgarzabh@tuttocitta.it,139.246.192.211,1984-02-27 13:40:08
    -415,Kimberly,kromerobi@aol.com,73.56.88.247,1976-09-16 14:22:04
    -416,George,gjohnstonbj@fda.gov,240.36.245.185,1979-07-24 14:36:02
    -417,Eugene,efullerbk@sciencedaily.com,42.38.105.140,2012-09-12 01:56:41
    -418,Andrea,astevensbl@goo.gl,31.152.207.204,1979-05-24 11:06:21
    -419,Shirley,sreidbm@scientificamerican.com,103.60.31.241,1984-02-23 04:07:41
    -420,Terry,tmorenobn@blinklist.com,92.161.34.42,1994-06-25 14:01:35
    -421,Christopher,cmorenobo@go.com,158.86.176.82,1973-09-05 09:18:47
    -422,Dennis,dhansonbp@ning.com,40.160.81.75,1982-01-20 10:19:41
    -423,Beverly,brussellbq@de.vu,138.32.56.204,1997-11-06 07:20:19
    -424,Howard,hparkerbr@163.com,103.171.134.171,2015-06-24 15:37:10
    -425,Helen,hmccoybs@fema.gov,61.200.4.71,1995-06-20 08:59:10
    -426,Ann,ahudsonbt@cafepress.com,239.187.71.125,1977-04-11 07:59:28
    -427,Tina,twestbu@nhs.uk,80.213.117.74,1992-08-19 05:54:44
    -428,Terry,tnguyenbv@noaa.gov,21.93.118.95,1991-09-19 23:22:55
    -429,Ashley,aburtonbw@wix.com,233.176.205.109,2009-11-10 05:01:20
    -430,Eric,emyersbx@1und1.de,168.91.212.67,1987-08-10 07:16:20
    -431,Barbara,blittleby@lycos.com,242.14.189.239,2008-08-02 12:13:04
    -432,Sean,sevansbz@instagram.com,14.39.177.13,2007-04-16 17:28:49
    -433,Shirley,sburtonc0@newsvine.com,34.107.138.76,1980-12-10 02:19:29
    -434,Patricia,pfreemanc1@so-net.ne.jp,219.213.142.117,1987-03-01 02:25:45
    -435,Paula,pfosterc2@vkontakte.ru,227.14.138.141,1972-09-22 12:59:34
    -436,Nicole,nstewartc3@1688.com,8.164.23.115,1998-10-27 00:10:17
    -437,Earl,ekimc4@ovh.net,100.26.244.177,2013-01-22 10:05:46
    -438,Beverly,breedc5@reuters.com,174.12.226.27,1974-09-22 07:29:36
    -439,Lawrence,lbutlerc6@a8.net,105.164.42.164,1992-06-05 00:43:40
    -440,Charles,cmoorec7@ucoz.com,252.197.131.69,1990-04-09 02:34:05
    -441,Alice,alawsonc8@live.com,183.73.220.232,1989-02-28 09:11:04
    -442,Dorothy,dcarpenterc9@arstechnica.com,241.47.200.14,2005-05-02 19:57:21
    -443,Carolyn,cfowlerca@go.com,213.109.55.202,1978-09-10 20:18:20
    -444,Anthony,alongcb@free.fr,169.221.158.204,1984-09-13 01:59:23
    -445,Annie,amoorecc@e-recht24.de,50.34.148.61,2009-03-26 03:41:07
    -446,Carlos,candrewscd@ihg.com,236.69.59.212,1972-03-29 22:42:48
    -447,Beverly,bramosce@google.ca,164.250.184.49,1982-11-10 04:34:01
    -448,Teresa,tlongcf@umich.edu,174.88.53.223,1987-05-17 12:48:00
    -449,Roy,rboydcg@uol.com.br,91.58.243.215,1974-06-16 17:59:54
    -450,Ashley,afieldsch@tamu.edu,130.138.11.126,1983-09-15 05:52:36
    -451,Judith,jhawkinsci@cmu.edu,200.187.103.245,2003-10-22 12:24:03
    -452,Rebecca,rwestcj@ocn.ne.jp,72.85.3.103,1980-11-13 11:01:26
    -453,Raymond,rporterck@infoseek.co.jp,146.33.216.151,1982-05-17 23:58:03
    -454,Janet,jmarshallcl@odnoklassniki.ru,52.46.193.166,1998-10-04 00:02:21
    -455,Shirley,speterscm@salon.com,248.126.31.15,1987-01-30 06:04:59
    -456,Annie,abowmancn@economist.com,222.213.248.59,2006-03-14 23:52:59
    -457,Jean,jlarsonco@blogspot.com,71.41.25.195,2007-09-08 23:49:45
    -458,Phillip,pmoralescp@stanford.edu,74.119.87.28,2011-03-14 20:25:40
    -459,Norma,nrobinsoncq@economist.com,28.225.21.54,1989-10-21 01:22:43
    -460,Kimberly,kclarkcr@dion.ne.jp,149.171.132.153,2008-06-27 02:27:30
    -461,Ruby,rmorriscs@ucla.edu,177.85.163.249,2016-01-28 16:43:44
    -462,Jonathan,jcastilloct@tripod.com,78.4.28.77,2000-05-24 17:33:06
    -463,Edward,ebryantcu@jigsy.com,140.31.98.193,1992-12-17 08:32:47
    -464,Chris,chamiltoncv@eepurl.com,195.171.234.206,1970-12-05 03:42:19
    -465,Michael,mweavercw@reference.com,7.233.133.213,1987-03-29 02:30:54
    -466,Howard,hlawrencecx@businessweek.com,113.225.124.224,1990-07-30 07:20:57
    -467,Philip,phowardcy@comsenz.com,159.170.247.249,2010-10-15 10:18:37
    -468,Mary,mmarshallcz@xing.com,125.132.189.70,2007-07-19 13:48:47
    -469,Scott,salvarezd0@theguardian.com,78.49.103.230,1987-10-31 06:10:44
    -470,Wayne,wcarrolld1@blog.com,238.1.120.204,1980-11-19 03:26:10
    -471,Jennifer,jwoodsd2@multiply.com,92.20.224.49,2010-05-06 22:17:04
    -472,Raymond,rwelchd3@toplist.cz,176.158.35.240,2007-12-12 19:02:51
    -473,Steven,sdixond4@wisc.edu,167.55.237.52,1984-05-05 11:44:37
    -474,Ralph,rjamesd5@ameblo.jp,241.190.50.133,2000-07-06 08:44:37
    -475,Jason,jrobinsond6@hexun.com,138.119.139.56,2006-02-03 05:27:45
    -476,Doris,dwoodd7@fema.gov,180.220.156.190,1978-05-11 20:14:20
    -477,Elizabeth,eberryd8@youtu.be,74.188.53.229,2006-11-18 08:29:06
    -478,Irene,igilbertd9@privacy.gov.au,194.152.218.1,1985-09-17 02:46:52
    -479,Jessica,jdeanda@ameblo.jp,178.103.93.118,1974-06-07 19:04:05
    -480,Rachel,ralvarezdb@phoca.cz,17.22.223.174,1999-03-08 02:43:25
    -481,Kenneth,kthompsondc@shinystat.com,229.119.91.234,2007-05-15 13:17:32
    -482,Harold,hmurraydd@parallels.com,133.26.188.80,1993-11-15 03:42:07
    -483,Paula,phowellde@samsung.com,34.215.28.216,1993-11-29 15:55:00
    -484,Ruth,rpiercedf@tripadvisor.com,111.30.130.123,1986-08-17 10:19:38
    -485,Phyllis,paustindg@vk.com,50.84.34.178,1994-04-13 03:05:24
    -486,Laura,lfosterdh@usnews.com,37.8.101.33,2001-06-30 08:58:59
    -487,Eric,etaylordi@com.com,103.183.253.45,2006-09-15 20:18:46
    -488,Doris,driveradj@prweb.com,247.16.2.199,1989-05-08 09:27:09
    -489,Ryan,rhughesdk@elegantthemes.com,103.234.153.232,1989-08-01 18:36:06
    -490,Steve,smoralesdl@jigsy.com,3.76.84.207,2011-03-13 17:01:05
    -491,Louis,lsullivandm@who.int,78.135.44.208,1975-11-26 16:01:23
    -492,Catherine,ctuckerdn@seattletimes.com,93.137.106.21,1990-03-13 16:14:56
    -493,Ann,adixondo@gmpg.org,191.136.222.111,2002-06-05 14:22:18
    -494,Johnny,jhartdp@amazon.com,103.252.198.39,1988-07-30 23:54:49
    -495,Susan,srichardsdq@skype.com,126.247.192.11,2005-01-09 12:08:14
    -496,Brenda,bparkerdr@skype.com,63.232.216.86,1974-05-18 05:58:29
    -497,Tammy,tmurphyds@constantcontact.com,56.56.37.112,2014-08-05 18:22:25
    -498,Larry,lhayesdt@wordpress.com,162.146.13.46,1997-02-26 14:01:53
    -499,,ethomasdu@hhs.gov,6.241.88.250,2007-09-14 13:03:34
    -500,Paula,pshawdv@networksolutions.com,123.27.47.249,2003-10-30 21:19:20
    diff --git a/tests/functional/simple_seed/data/seed_unicode.csv b/tests/functional/simple_seed/data/seed_unicode.csv
    deleted file mode 100644
    index 0c1cc36aaae..00000000000
    --- a/tests/functional/simple_seed/data/seed_unicode.csv
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -seed_id
    -Uh – Oh
    diff --git a/tests/functional/simple_seed/fixtures.py b/tests/functional/simple_seed/fixtures.py
    deleted file mode 100644
    index afbe6e289f7..00000000000
    --- a/tests/functional/simple_seed/fixtures.py
    +++ /dev/null
    @@ -1,167 +0,0 @@
    -#
    -# Seeds
    -#
    -
    -seeds__enabled_in_config = """seed_id,first_name,email,ip_address,birthday
    -1,Larry,lking0@miitbeian.gov.cn,69.135.206.194,2008-09-12 19:08:31
    -2,Larry,lperkins1@toplist.cz,64.210.133.162,1978-05-09 04:15:14
    -3,Anna,amontgomery2@miitbeian.gov.cn,168.104.64.114,2011-10-16 04:07:57
    -4,Sandra,sgeorge3@livejournal.com,229.235.252.98,1973-07-19 10:52:43
    -5,Fred,fwoods4@google.cn,78.229.170.124,2012-09-30 16:38:29
    -6,Stephen,shanson5@livejournal.com,182.227.157.105,1995-11-07 21:40:50
    -7,William,wmartinez6@upenn.edu,135.139.249.50,1982-09-05 03:11:59
    -8,Jessica,jlong7@hao123.com,203.62.178.210,1991-10-16 11:03:15
    -9,Douglas,dwhite8@tamu.edu,178.187.247.1,1979-10-01 09:49:48
    -10,Lisa,lcoleman9@nydailynews.com,168.234.128.249,2011-05-26 07:45:49
    -11,Ralph,rfieldsa@home.pl,55.152.163.149,1972-11-18 19:06:11
    -12,Louise,lnicholsb@samsung.com,141.116.153.154,2014-11-25 20:56:14
    -13,Clarence,cduncanc@sfgate.com,81.171.31.133,2011-11-17 07:02:36
    -14,Daniel,dfranklind@omniture.com,8.204.211.37,1980-09-13 00:09:04
    -15,Katherine,klanee@auda.org.au,176.96.134.59,1997-08-22 19:36:56
    -16,Billy,bwardf@wikia.com,214.108.78.85,2003-10-19 02:14:47
    -17,Annie,agarzag@ocn.ne.jp,190.108.42.70,1988-10-28 15:12:35
    -18,Shirley,scolemanh@fastcompany.com,109.251.164.84,1988-08-24 10:50:57
    -19,Roger,rfrazieri@scribd.com,38.145.218.108,1985-12-31 15:17:15
    -20,Lillian,lstanleyj@goodreads.com,47.57.236.17,1970-06-08 02:09:05
    -
    -"""
    -
    -seeds__disabled_in_config = """seed_id,first_name,email,ip_address,birthday
    -1,Larry,lking0@miitbeian.gov.cn,69.135.206.194,2008-09-12 19:08:31
    -2,Larry,lperkins1@toplist.cz,64.210.133.162,1978-05-09 04:15:14
    -3,Anna,amontgomery2@miitbeian.gov.cn,168.104.64.114,2011-10-16 04:07:57
    -4,Sandra,sgeorge3@livejournal.com,229.235.252.98,1973-07-19 10:52:43
    -5,Fred,fwoods4@google.cn,78.229.170.124,2012-09-30 16:38:29
    -6,Stephen,shanson5@livejournal.com,182.227.157.105,1995-11-07 21:40:50
    -7,William,wmartinez6@upenn.edu,135.139.249.50,1982-09-05 03:11:59
    -8,Jessica,jlong7@hao123.com,203.62.178.210,1991-10-16 11:03:15
    -9,Douglas,dwhite8@tamu.edu,178.187.247.1,1979-10-01 09:49:48
    -10,Lisa,lcoleman9@nydailynews.com,168.234.128.249,2011-05-26 07:45:49
    -11,Ralph,rfieldsa@home.pl,55.152.163.149,1972-11-18 19:06:11
    -12,Louise,lnicholsb@samsung.com,141.116.153.154,2014-11-25 20:56:14
    -13,Clarence,cduncanc@sfgate.com,81.171.31.133,2011-11-17 07:02:36
    -14,Daniel,dfranklind@omniture.com,8.204.211.37,1980-09-13 00:09:04
    -15,Katherine,klanee@auda.org.au,176.96.134.59,1997-08-22 19:36:56
    -16,Billy,bwardf@wikia.com,214.108.78.85,2003-10-19 02:14:47
    -17,Annie,agarzag@ocn.ne.jp,190.108.42.70,1988-10-28 15:12:35
    -18,Shirley,scolemanh@fastcompany.com,109.251.164.84,1988-08-24 10:50:57
    -19,Roger,rfrazieri@scribd.com,38.145.218.108,1985-12-31 15:17:15
    -20,Lillian,lstanleyj@goodreads.com,47.57.236.17,1970-06-08 02:09:05
    -
    -"""
    -
    -# used to tease out include/exclude edge case behavior for 'dbt seed'
    -seeds__tricky = """\
    -seed_id,seed_id_str,a_bool,looks_like_a_bool,a_date,looks_like_a_date,relative,weekday
    -1,1,true,true,2019-01-01 12:32:30,2019-01-01 12:32:30,tomorrow,Saturday
    -2,2,True,True,2019-01-01 12:32:31,2019-01-01 12:32:31,today,Sunday
    -3,3,TRUE,TRUE,2019-01-01 12:32:32,2019-01-01 12:32:32,yesterday,Monday
    -4,4,false,false,2019-01-01 01:32:32,2019-01-01 01:32:32,tomorrow,Saturday
    -5,5,False,False,2019-01-01 01:32:32,2019-01-01 01:32:32,today,Sunday
    -6,6,FALSE,FALSE,2019-01-01 01:32:32,2019-01-01 01:32:32,yesterday,Monday
    -
    -"""
    -
    -
    -seeds__wont_parse = """a,b,c
    -1,7,23,90,5
    -2
    -
    -"""
    -
    -#
    -# Macros
    -#
    -
    -macros__schema_test = """
    -{% test column_type(model, column_name, type) %}
    -
    -    {% set cols = adapter.get_columns_in_relation(model) %}
    -
    -    {% set col_types = {} %}
    -    {% for col in cols %}
    -        {% do col_types.update({col.name: col.data_type}) %}
    -    {% endfor %}
    -
    -    {% set validation_message = 'Got a column type of ' ~ col_types.get(column_name) ~ ', expected ' ~ type %}
    -
    -    {% set val = 0 if col_types.get(column_name) == type else 1 %}
    -    {% if val == 1 and execute %}
    -        {{ log(validation_message, info=True) }}
    -    {% endif %}
    -
    -    select '{{ validation_message }}' as validation_error
    -    from (select true) as nothing
    -    where {{ val }} = 1
    -
    -{% endtest %}
    -
    -"""
    -
    -#
    -# Models
    -#
    -
    -models__downstream_from_seed_actual = """
    -select * from {{ ref('seed_actual') }}
    -
    -"""
    -models__from_basic_seed = """
    -select * from {{ this.schema }}.seed_expected
    -
    -"""
    -
    -#
    -# Properties
    -#
    -
    -properties__schema_yml = """
    -version: 2
    -seeds:
    -- name: seed_enabled
    -  columns:
    -  - name: birthday
    -    tests:
    -    - column_type:
    -        type: date
    -  - name: seed_id
    -    tests:
    -    - column_type:
    -        type: text
    -
    -- name: seed_tricky
    -  columns:
    -  - name: seed_id
    -    tests:
    -    - column_type:
    -        type: integer
    -  - name: seed_id_str
    -    tests:
    -    - column_type:
    -        type: text
    -  - name: a_bool
    -    tests:
    -    - column_type:
    -        type: boolean
    -  - name: looks_like_a_bool
    -    tests:
    -    - column_type:
    -        type: text
    -  - name: a_date
    -    tests:
    -    - column_type:
    -        type: timestamp without time zone
    -  - name: looks_like_a_date
    -    tests:
    -    - column_type:
    -        type: text
    -  - name: relative
    -    tests:
    -    - column_type:
    -        type: text
    -  - name: weekday
    -    tests:
    -    - column_type:
    -        type: text
    -
    -"""
    
    From ccb4fa26cd682812f15d2fd50b37a8c422e092f8 Mon Sep 17 00:00:00 2001
    From: Peter Webb 
    Date: Tue, 7 Feb 2023 16:15:42 -0500
    Subject: [PATCH 153/156] CT-1917: Fix a regression in the behavior of the
     -q/--quiet cli parameter (#6886)
    
    ---
     .changes/unreleased/Fixes-20230207-143544.yaml |  6 ++++++
     core/dbt/events/functions.py                   | 12 +++++++++---
     core/dbt/main.py                               | 10 +++++++++-
     3 files changed, 24 insertions(+), 4 deletions(-)
     create mode 100644 .changes/unreleased/Fixes-20230207-143544.yaml
    
    diff --git a/.changes/unreleased/Fixes-20230207-143544.yaml b/.changes/unreleased/Fixes-20230207-143544.yaml
    new file mode 100644
    index 00000000000..67850c91927
    --- /dev/null
    +++ b/.changes/unreleased/Fixes-20230207-143544.yaml
    @@ -0,0 +1,6 @@
    +kind: Fixes
    +body: Fix regression of --quiet cli parameter behavior
    +time: 2023-02-07T14:35:44.160163-05:00
    +custom:
    +  Author: peterallenwebb
    +  Issue: "6749"
    diff --git a/core/dbt/events/functions.py b/core/dbt/events/functions.py
    index 786e9cdf91d..ce5d5be98b0 100644
    --- a/core/dbt/events/functions.py
    +++ b/core/dbt/events/functions.py
    @@ -18,6 +18,14 @@
     metadata_vars: Optional[Dict[str, str]] = None
     
     
    +# The "fallback" logger is used as a stop-gap so that console logging works before the logging
    +# configuration is fully loaded.
    +def setup_fallback_logger(use_legacy: bool, level: EventLevel) -> None:
    +    cleanup_event_logger()
    +    config = _get_logbook_log_config(level) if use_legacy else _get_stdout_config(level)
    +    EVENT_MANAGER.add_logger(config)
    +
    +
     def setup_event_logger(log_path: str, level_override: Optional[EventLevel] = None):
         cleanup_event_logger()
         make_log_dir_if_missing(log_path)
    @@ -113,9 +121,7 @@ def cleanup_event_logger():
     # currently fire before logs can be configured by setup_event_logger(), we
     # create a default configuration with default settings and no file output.
     EVENT_MANAGER: EventManager = EventManager()
    -EVENT_MANAGER.add_logger(
    -    _get_logbook_log_config() if flags.ENABLE_LEGACY_LOGGER else _get_stdout_config()
    -)
    +setup_fallback_logger(bool(flags.ENABLE_LEGACY_LOGGER), EventLevel.INFO)
     
     
     # This global, and the following two functions for capturing stdout logs are
    diff --git a/core/dbt/main.py b/core/dbt/main.py
    index 429d823be52..f0db2a98053 100644
    --- a/core/dbt/main.py
    +++ b/core/dbt/main.py
    @@ -11,8 +11,9 @@
     from pathlib import Path
     
     import dbt.version
    -from dbt.events.functions import fire_event, setup_event_logger, LOG_VERSION
    +from dbt.events.functions import fire_event, setup_event_logger, setup_fallback_logger, LOG_VERSION
     from dbt.events.types import (
    +    EventLevel,
         MainEncounteredError,
         MainKeyboardInterrupt,
         MainReportVersion,
    @@ -178,6 +179,13 @@ def handle_and_check(args):
             # Set flags from args, user config, and env vars
             user_config = read_user_config(flags.PROFILES_DIR)  # This is read again later
             flags.set_from_args(parsed, user_config)
    +
    +        # If the user has asked to supress non-error logging on the cli, we want to respect that as soon as possible,
    +        # so that any non-error logging done before full log config is loaded and ready is filtered accordingly.
    +        setup_fallback_logger(
    +            bool(flags.ENABLE_LEGACY_LOGGER), EventLevel.ERROR if flags.QUIET else EventLevel.INFO
    +        )
    +
             dbt.tracking.initialize_from_flags()
             # Set log_format from flags
             parsed.cls.set_log_format()
    
    From df64511feb8d27be89a049f2c72a566b520ca1ed Mon Sep 17 00:00:00 2001
    From: Emily Rockman 
    Date: Wed, 8 Feb 2023 08:02:21 -0600
    Subject: [PATCH 154/156] Dynamically list all `.latest` branches for scheduled
     testing (#6682)
    
    * first pass at automating latest branches
    
    * checkout repo first
    
    * fetch all history
    
    * reorg
    
    * debugging
    
    * update test id
    
    * swap lines
    
    * incorporate new branch aciton
    
    * tweak vars
    ---
     .github/workflows/release-branch-tests.yml | 30 +++++++++++++++++++++-
     1 file changed, 29 insertions(+), 1 deletion(-)
    
    diff --git a/.github/workflows/release-branch-tests.yml b/.github/workflows/release-branch-tests.yml
    index bdd01aa495a..b31a7c8c3b1 100644
    --- a/.github/workflows/release-branch-tests.yml
    +++ b/.github/workflows/release-branch-tests.yml
    @@ -28,7 +28,33 @@ on:
     permissions: read-all
     
     jobs:
    +  fetch-latest-branches:
    +    runs-on: ubuntu-latest
    +
    +    outputs:
    +      latest-branches: ${{ steps.get-latest-branches.outputs.repo-branches }}
    +
    +    steps:
    +      - name: "Fetch dbt-core Latest Branches"
    +        uses: dbt-labs/actions/fetch-repo-branches@v1.1.1
    +        id: get-latest-branches
    +        with:
    +          repo_name: ${{ github.event.repository.name }}
    +          organization: "dbt-labs"
    +          pat: ${{ secrets.GITHUB_TOKEN }}
    +          fetch_protected_branches_only: true
    +          regex: "^1.[0-9]+.latest$"
    +          perform_match_method: "match"
    +          retries: 3
    +
    +      - name: "[ANNOTATION] ${{ github.event.repository.name }} - branches to test"
    +        run: |
    +          title="${{ github.event.repository.name }} - branches to test"
    +          message="The workflow will run tests for the following branches of the ${{ github.event.repository.name }} repo: ${{ steps.get-latest-branches.outputs.repo-branches }}"
    +          echo "::notice $title::$message"
    +
       kick-off-ci:
    +    needs: [fetch-latest-branches]
         name: Kick-off CI
         runs-on: ubuntu-latest
     
    @@ -39,7 +65,9 @@ jobs:
           max-parallel: 1
           fail-fast: false
           matrix:
    -        branch: [1.0.latest, 1.1.latest, 1.2.latest, 1.3.latest, 1.4.latest, main]
    +        branch: ${{ fromJSON(needs.fetch-latest-branches.outputs.latest-branches) }}
    +        include:
    +          - branch: 'main'
     
         steps:
         - name: Call CI workflow for ${{ matrix.branch }} branch
    
    From 3ad40372e6c1c5e9f1a44d202401c01dcfeb03c1 Mon Sep 17 00:00:00 2001
    From: dave-connors-3 <73915542+dave-connors-3@users.noreply.github.com>
    Date: Wed, 8 Feb 2023 12:20:13 -0600
    Subject: [PATCH 155/156] add base class for merge exclude tests (#6700)
    
    * add base class for merge exclude tests
    
    * changie <33
    
    * remove comments
    
    * add comments to sql, remove and clarify contents of resultholder
    ---
     .../unreleased/Fixes-20230123-132814.yaml     |   6 +
     .../test_incremental_merge_exclude_columns.py | 116 ++++++++++++++++++
     2 files changed, 122 insertions(+)
     create mode 100644 .changes/unreleased/Fixes-20230123-132814.yaml
     create mode 100644 tests/adapter/dbt/tests/adapter/incremental/test_incremental_merge_exclude_columns.py
    
    diff --git a/.changes/unreleased/Fixes-20230123-132814.yaml b/.changes/unreleased/Fixes-20230123-132814.yaml
    new file mode 100644
    index 00000000000..f05bac4571a
    --- /dev/null
    +++ b/.changes/unreleased/Fixes-20230123-132814.yaml
    @@ -0,0 +1,6 @@
    +kind: Fixes
    +body: add merge_exclude_columns adapter tests
    +time: 2023-01-23T13:28:14.808748-06:00
    +custom:
    +  Author: dave-connors-3
    +  Issue: "6699"
    diff --git a/tests/adapter/dbt/tests/adapter/incremental/test_incremental_merge_exclude_columns.py b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_merge_exclude_columns.py
    new file mode 100644
    index 00000000000..db958f1eda4
    --- /dev/null
    +++ b/tests/adapter/dbt/tests/adapter/incremental/test_incremental_merge_exclude_columns.py
    @@ -0,0 +1,116 @@
    +import pytest
    +from dbt.tests.util import run_dbt, check_relations_equal
    +from collections import namedtuple
    +
    +
    +models__merge_exclude_columns_sql = """
    +{{ config(
    +    materialized = 'incremental',
    +    unique_key = 'id',
    +    incremental_strategy='merge',
    +    merge_exclude_columns=['msg']
    +) }}
    +
    +{% if not is_incremental() %}
    +
    +-- data for first invocation of model
    +
    +select 1 as id, 'hello' as msg, 'blue' as color
    +union all
    +select 2 as id, 'goodbye' as msg, 'red' as color
    +
    +{% else %}
    +
    +-- data for subsequent incremental update
    +
    +select 1 as id, 'hey' as msg, 'blue' as color
    +union all
    +select 2 as id, 'yo' as msg, 'green' as color
    +union all
    +select 3 as id, 'anyway' as msg, 'purple' as color
    +
    +{% endif %}
    +"""
    +
    +seeds__expected_merge_exclude_columns_csv = """id,msg,color
    +1,hello,blue
    +2,goodbye,green
    +3,anyway,purple
    +"""
    +
    +ResultHolder = namedtuple(
    +    "ResultHolder",
    +    [
    +        "seed_count",
    +        "model_count",
    +        "seed_rows",
    +        "inc_test_model_count",
    +        "relation",
    +    ],
    +)
    +
    +
    +class BaseMergeExcludeColumns:
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"merge_exclude_columns.sql": models__merge_exclude_columns_sql}
    +
    +    @pytest.fixture(scope="class")
    +    def seeds(self):
    +        return {"expected_merge_exclude_columns.csv": seeds__expected_merge_exclude_columns_csv}
    +
    +    def update_incremental_model(self, incremental_model):
    +        """update incremental model after the seed table has been updated"""
    +        model_result_set = run_dbt(["run", "--select", incremental_model])
    +        return len(model_result_set)
    +
    +    def get_test_fields(self, project, seed, incremental_model, update_sql_file):
    +
    +        seed_count = len(run_dbt(["seed", "--select", seed, "--full-refresh"]))
    +
    +        model_count = len(run_dbt(["run", "--select", incremental_model, "--full-refresh"]))
    +
    +        relation = incremental_model
    +        # update seed in anticipation of incremental model update
    +        row_count_query = "select * from {}.{}".format(project.test_schema, seed)
    +
    +        seed_rows = len(project.run_sql(row_count_query, fetch="all"))
    +
    +        # propagate seed state to incremental model according to unique keys
    +        inc_test_model_count = self.update_incremental_model(incremental_model=incremental_model)
    +
    +        return ResultHolder(seed_count, model_count, seed_rows, inc_test_model_count, relation)
    +
    +    def check_scenario_correctness(self, expected_fields, test_case_fields, project):
    +        """Invoke assertions to verify correct build functionality"""
    +        # 1. test seed(s) should build afresh
    +        assert expected_fields.seed_count == test_case_fields.seed_count
    +        # 2. test model(s) should build afresh
    +        assert expected_fields.model_count == test_case_fields.model_count
    +        # 3. seeds should have intended row counts post update
    +        assert expected_fields.seed_rows == test_case_fields.seed_rows
    +        # 4. incremental test model(s) should be updated
    +        assert expected_fields.inc_test_model_count == test_case_fields.inc_test_model_count
    +        # 5. result table should match intended result set (itself a relation)
    +        check_relations_equal(
    +            project.adapter, [expected_fields.relation, test_case_fields.relation]
    +        )
    +
    +    def test__merge_exclude_columns(self, project):
    +        """seed should match model after two incremental runs"""
    +
    +        expected_fields = ResultHolder(
    +            seed_count=1,
    +            model_count=1,
    +            inc_test_model_count=1,
    +            seed_rows=3,
    +            relation="expected_merge_exclude_columns",
    +        )
    +
    +        test_case_fields = self.get_test_fields(
    +            project,
    +            seed="expected_merge_exclude_columns",
    +            incremental_model="merge_exclude_columns",
    +            update_sql_file=None,
    +        )
    +        self.check_scenario_correctness(expected_fields, test_case_fields, project)
    
    From abbece88760d98638db3ce0465d89fd966b0740b Mon Sep 17 00:00:00 2001
    From: Emily Rockman 
    Date: Wed, 8 Feb 2023 13:49:24 -0600
    Subject: [PATCH 156/156] 1.4 regression: Check if status has node attribute
     (#6899)
    
    * check for node
    
    * add changelog
    
    * add test for regression
    ---
     .../unreleased/Fixes-20230208-110551.yaml     |  6 ++++
     core/dbt/task/run.py                          |  2 +-
     tests/functional/hooks/fixtures.py            | 28 +++++++++++++++++++
     tests/functional/hooks/test_run_hooks.py      | 24 ++++++++++++++++
     4 files changed, 59 insertions(+), 1 deletion(-)
     create mode 100644 .changes/unreleased/Fixes-20230208-110551.yaml
    
    diff --git a/.changes/unreleased/Fixes-20230208-110551.yaml b/.changes/unreleased/Fixes-20230208-110551.yaml
    new file mode 100644
    index 00000000000..591374a4a33
    --- /dev/null
    +++ b/.changes/unreleased/Fixes-20230208-110551.yaml
    @@ -0,0 +1,6 @@
    +kind: Fixes
    +body: Ensure results from hooks contain nodes when processing them
    +time: 2023-02-08T11:05:51.952494-06:00
    +custom:
    +  Author: emmyoop
    +  Issue: "6796"
    diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py
    index fd24dd06ba2..5d9c858fc5e 100644
    --- a/core/dbt/task/run.py
    +++ b/core/dbt/task/run.py
    @@ -443,7 +443,7 @@ def after_run(self, adapter, results):
             database_schema_set: Set[Tuple[Optional[str], str]] = {
                 (r.node.database, r.node.schema)
                 for r in results
    -            if r.node.is_relational
    +            if (hasattr(r, "node") and r.node.is_relational)
                 and r.status not in (NodeStatus.Error, NodeStatus.Fail, NodeStatus.Skipped)
             }
     
    diff --git a/tests/functional/hooks/fixtures.py b/tests/functional/hooks/fixtures.py
    index 6a721fffea5..36f6eaa350a 100644
    --- a/tests/functional/hooks/fixtures.py
    +++ b/tests/functional/hooks/fixtures.py
    @@ -1,3 +1,31 @@
    +macros_missing_column = """
    +{% macro export_table_check() %}
    +
    +    {% set table = 'test_column' %}
    +
    +    {% set query %}
    +        SELECT column_name
    +        FROM {{ref(table)}}
    +        LIMIT 1
    +    {% endset %}
    +
    +    {%- if flags.WHICH in ('run', 'build') -%}
    +        {% set results = run_query(query) %}
    +        {% if execute %}
    +            {%- if results.rows -%}
    +                {{ exceptions.raise_compiler_error("ON_RUN_START_CHECK_NOT_PASSED: Data already exported. DBT Run aborted.") }}
    +            {% else -%}
    +                {{ log("No data found in " ~ table ~ " for current day and runtime region. Proceeding...", true) }}
    +            {%- endif -%}
    +        {%- endif -%}
    +    {%- endif -%}
    +{% endmacro %}
    +"""
    +
    +models__missing_column = """
    +select 1 as col
    +"""
    +
     macros__before_and_after = """
     {% macro custom_run_hook(state, target, run_started_at, invocation_id) %}
     
    diff --git a/tests/functional/hooks/test_run_hooks.py b/tests/functional/hooks/test_run_hooks.py
    index ddda7473fc1..cc285198523 100644
    --- a/tests/functional/hooks/test_run_hooks.py
    +++ b/tests/functional/hooks/test_run_hooks.py
    @@ -8,6 +8,8 @@
         macros__before_and_after,
         models__hooks,
         seeds__example_seed_csv,
    +    macros_missing_column,
    +    models__missing_column,
     )
     
     from dbt.tests.util import (
    @@ -141,3 +143,25 @@ def test_pre_and_post_seed_hooks(self, setUp, project, dbt_profile_target):
             check_table_does_not_exist(project.adapter, "start_hook_order_test")
             check_table_does_not_exist(project.adapter, "end_hook_order_test")
             self.assert_used_schemas(project)
    +
    +
    +class TestAfterRunHooks(object):
    +    @pytest.fixture(scope="class")
    +    def macros(self):
    +        return {"temp_macro.sql": macros_missing_column}
    +
    +    @pytest.fixture(scope="class")
    +    def models(self):
    +        return {"test_column.sql": models__missing_column}
    +
    +    @pytest.fixture(scope="class")
    +    def project_config_update(self):
    +        return {
    +            # The create and drop table statements here validate that these hooks run
    +            # in the same order that they are defined. Drop before create is an error.
    +            # Also check that the table does not exist below.
    +            "on-run-start": "- {{ export_table_check() }}"
    +        }
    +
    +    def test_missing_column_pre_hook(self, project):
    +        run_dbt(["run"], expect_pass=False)