diff --git a/superset/common/query_actions.py b/superset/common/query_actions.py index d0ef7d3f26575..1987872470bfa 100644 --- a/superset/common/query_actions.py +++ b/superset/common/query_actions.py @@ -27,6 +27,7 @@ ChartDataResultType, extract_column_dtype, extract_dataframe_dtypes, + ExtraFiltersReasonType, get_time_filter_status, QueryStatus, ) @@ -114,7 +115,7 @@ def _get_full( {"column": col} for col in filter_columns if col in columns ] + applied_time_columns payload["rejected_filters"] = [ - {"reason": "not_in_datasource", "column": col} + {"reason": ExtraFiltersReasonType.COL_NOT_IN_DATASOURCE, "column": col} for col in filter_columns if col not in columns ] + rejected_time_columns diff --git a/superset/utils/core.py b/superset/utils/core.py index 062c3fa50061d..eadf14d6086b4 100644 --- a/superset/utils/core.py +++ b/superset/utils/core.py @@ -122,6 +122,8 @@ DTTM_ALIAS = "__timestamp" +NO_TIME_RANGE = "No filter" + TIME_COMPARISION = "__" JS_MAX_INTEGER = 9007199254740991 # Largest int Java Script can handle 2^53-1 @@ -223,6 +225,12 @@ class ExtraFiltersTimeColumnType(str, Enum): TIME_RANGE = "__time_range" +class ExtraFiltersReasonType(str, Enum): + NO_TEMPORAL_COLUMN = "no_temporal_column" + COL_NOT_IN_DATASOURCE = "not_in_datasource" + NOT_DRUID_DATASOURCE = "not_druid_datasource" + + class FilterOperator(str, Enum): """ Operators used filter controls @@ -1620,7 +1628,7 @@ def get_time_filter_status( # pylint: disable=too-many-branches else: rejected.append( { - "reason": "not_in_datasource", + "reason": ExtraFiltersReasonType.COL_NOT_IN_DATASOURCE, "column": ExtraFiltersTimeColumnType.TIME_COL, } ) @@ -1632,19 +1640,20 @@ def get_time_filter_status( # pylint: disable=too-many-branches else: rejected.append( { - "reason": "no_temporal_column", + "reason": ExtraFiltersReasonType.NO_TEMPORAL_COLUMN, "column": ExtraFiltersTimeColumnType.TIME_GRAIN, } ) - if ExtraFiltersTimeColumnType.TIME_RANGE in applied_time_extras: + time_range = applied_time_extras.get(ExtraFiltersTimeColumnType.TIME_RANGE) + if time_range and time_range != NO_TIME_RANGE: # are there any temporal columns to assign the time grain to? if temporal_columns: applied.append({"column": ExtraFiltersTimeColumnType.TIME_RANGE}) else: rejected.append( { - "reason": "no_temporal_column", + "reason": ExtraFiltersReasonType.NO_TEMPORAL_COLUMN, "column": ExtraFiltersTimeColumnType.TIME_RANGE, } ) @@ -1655,7 +1664,7 @@ def get_time_filter_status( # pylint: disable=too-many-branches else: rejected.append( { - "reason": "not_druid_datasource", + "reason": ExtraFiltersReasonType.NOT_DRUID_DATASOURCE, "column": ExtraFiltersTimeColumnType.TIME_ORIGIN, } ) @@ -1666,7 +1675,7 @@ def get_time_filter_status( # pylint: disable=too-many-branches else: rejected.append( { - "reason": "not_druid_datasource", + "reason": ExtraFiltersReasonType.NOT_DRUID_DATASOURCE, "column": ExtraFiltersTimeColumnType.GRANULARITY, } ) diff --git a/superset/utils/date_parser.py b/superset/utils/date_parser.py index 51e5b8a8d8a7c..eaa7f7f98dd7f 100644 --- a/superset/utils/date_parser.py +++ b/superset/utils/date_parser.py @@ -44,6 +44,7 @@ TimeRangeAmbiguousError, TimeRangeParseFailError, ) +from superset.utils.core import NO_TIME_RANGE from superset.utils.memoized import memoized ParserElement.enablePackrat() @@ -174,7 +175,7 @@ def get_since_until( _relative_start = relative_start if relative_start else "today" _relative_end = relative_end if relative_end else "today" - if time_range == "No filter": + if time_range == NO_TIME_RANGE: return None, None if time_range and time_range.startswith("Last") and separator not in time_range: diff --git a/superset/viz.py b/superset/viz.py index cd19807b11b19..8886157811d06 100644 --- a/superset/viz.py +++ b/superset/viz.py @@ -71,6 +71,7 @@ from superset.utils.cache import set_and_log_cache from superset.utils.core import ( DTTM_ALIAS, + ExtraFiltersReasonType, JS_MAX_INTEGER, merge_extra_filters, QueryMode, @@ -477,7 +478,7 @@ def get_payload(self, query_obj: Optional[QueryObjectDict] = None) -> VizPayload if col in columns or col in filter_values_columns ] + applied_time_columns payload["rejected_filters"] = [ - {"reason": "not_in_datasource", "column": col} + {"reason": ExtraFiltersReasonType.COL_NOT_IN_DATASOURCE, "column": col} for col in filter_columns if col not in columns and col not in filter_values_columns ] + rejected_time_columns diff --git a/tests/unit_tests/core_tests.py b/tests/unit_tests/core_tests.py index 51d1d0993aff4..1238f509d4c53 100644 --- a/tests/unit_tests/core_tests.py +++ b/tests/unit_tests/core_tests.py @@ -20,11 +20,16 @@ from superset.utils.core import ( AdhocMetric, + ExtraFiltersReasonType, + ExtraFiltersTimeColumnType, GenericDataType, get_metric_name, get_metric_names, + get_time_filter_status, is_adhoc_metric, + NO_TIME_RANGE, ) +from tests.unit_tests.fixtures.datasets import get_dataset_mock STR_METRIC = "my_metric" SIMPLE_SUM_ADHOC_METRIC: AdhocMetric = { @@ -98,3 +103,72 @@ def test_is_adhoc_metric(): assert is_adhoc_metric(STR_METRIC) is False assert is_adhoc_metric(SIMPLE_SUM_ADHOC_METRIC) is True assert is_adhoc_metric(SQL_ADHOC_METRIC) is True + + +def test_get_time_filter_status_time_col(): + dataset = get_dataset_mock() + + assert get_time_filter_status( + dataset, {ExtraFiltersTimeColumnType.TIME_COL: "ds"} + ) == ([{"column": ExtraFiltersTimeColumnType.TIME_COL}], []) + + +def test_get_time_filter_status_time_range(): + dataset = get_dataset_mock() + + assert get_time_filter_status( + dataset, {ExtraFiltersTimeColumnType.TIME_RANGE: NO_TIME_RANGE} + ) == ([], []) + + assert get_time_filter_status( + dataset, {ExtraFiltersTimeColumnType.TIME_RANGE: "1 year ago"} + ) == ([{"column": ExtraFiltersTimeColumnType.TIME_RANGE}], []) + + +def test_get_time_filter_status_time_grain(): + dataset = get_dataset_mock() + + assert get_time_filter_status( + dataset, {ExtraFiltersTimeColumnType.TIME_GRAIN: "PT1M"} + ) == ([{"column": ExtraFiltersTimeColumnType.TIME_GRAIN}], []) + + +def test_get_time_filter_status_no_temporal_col(): + dataset = get_dataset_mock() + dataset.columns[0].is_dttm = False + + assert get_time_filter_status( + dataset, {ExtraFiltersTimeColumnType.TIME_COL: "foobar"} + ) == ( + [], + [ + { + "reason": ExtraFiltersReasonType.COL_NOT_IN_DATASOURCE, + "column": ExtraFiltersTimeColumnType.TIME_COL, + } + ], + ) + + assert get_time_filter_status( + dataset, {ExtraFiltersTimeColumnType.TIME_RANGE: "1 year ago"} + ) == ( + [], + [ + { + "reason": ExtraFiltersReasonType.NO_TEMPORAL_COLUMN, + "column": ExtraFiltersTimeColumnType.TIME_RANGE, + } + ], + ) + + assert get_time_filter_status( + dataset, {ExtraFiltersTimeColumnType.TIME_GRAIN: "PT1M"} + ) == ( + [], + [ + { + "reason": ExtraFiltersReasonType.NO_TEMPORAL_COLUMN, + "column": ExtraFiltersTimeColumnType.TIME_GRAIN, + } + ], + ) diff --git a/tests/unit_tests/fixtures/datasets.py b/tests/unit_tests/fixtures/datasets.py new file mode 100644 index 0000000000000..5d5466a5e8135 --- /dev/null +++ b/tests/unit_tests/fixtures/datasets.py @@ -0,0 +1,206 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from typing import Any, Dict +from unittest.mock import Mock + + +def get_column_mock(params: Dict[str, Any]) -> Mock: + mock = Mock() + mock.id = params["id"] + mock.column_name = params["column_name"] + mock.verbose_name = params["verbose_name"] + mock.description = params["description"] + mock.expression = params["expression"] + mock.filterable = params["filterable"] + mock.groupby = params["groupby"] + mock.is_dttm = params["is_dttm"] + mock.type = params["type"] + return mock + + +def get_metric_mock(params: Dict[str, Any]) -> Mock: + mock = Mock() + mock.id = params["id"] + mock.metric_name = params["metric_name"] + mock.metric_name = params["verbose_name"] + mock.description = params["description"] + mock.expression = params["expression"] + mock.warning_text = params["warning_text"] + mock.d3format = params["d3format"] + return mock + + +def get_dataset_mock() -> Mock: + mock = Mock() + mock.id = None + mock.column_formats = {"ratio": ".2%"} + mock.database = {"id": 1} + mock.description = "Adding a DESCRip" + mock.default_endpoint = "" + mock.filter_select_enabled = True + mock.name = "birth_names" + mock.table_name = "birth_names" + mock.datasource_name = "birth_names" + mock.type = "table" + mock.schema = None + mock.offset = 66 + mock.cache_timeout = 55 + mock.sql = "" + mock.columns = [ + get_column_mock( + { + "id": 504, + "column_name": "ds", + "verbose_name": "", + "description": None, + "expression": "", + "filterable": True, + "groupby": True, + "is_dttm": True, + "type": "DATETIME", + } + ), + get_column_mock( + { + "id": 505, + "column_name": "gender", + "verbose_name": None, + "description": None, + "expression": "", + "filterable": True, + "groupby": True, + "is_dttm": False, + "type": "VARCHAR(16)", + } + ), + get_column_mock( + { + "id": 506, + "column_name": "name", + "verbose_name": None, + "description": None, + "expression": None, + "filterable": True, + "groupby": True, + "is_dttm": None, + "type": "VARCHAR(255)", + } + ), + get_column_mock( + { + "id": 508, + "column_name": "state", + "verbose_name": None, + "description": None, + "expression": None, + "filterable": True, + "groupby": True, + "is_dttm": None, + "type": "VARCHAR(10)", + } + ), + get_column_mock( + { + "id": 509, + "column_name": "num_boys", + "verbose_name": None, + "description": None, + "expression": None, + "filterable": True, + "groupby": True, + "is_dttm": None, + "type": "BIGINT(20)", + } + ), + get_column_mock( + { + "id": 510, + "column_name": "num_girls", + "verbose_name": None, + "description": None, + "expression": "", + "filterable": False, + "groupby": False, + "is_dttm": False, + "type": "BIGINT(20)", + } + ), + get_column_mock( + { + "id": 532, + "column_name": "num", + "verbose_name": None, + "description": None, + "expression": None, + "filterable": True, + "groupby": True, + "is_dttm": None, + "type": "BIGINT(20)", + } + ), + get_column_mock( + { + "id": 522, + "column_name": "num_california", + "verbose_name": None, + "description": None, + "expression": "CASE WHEN state = 'CA' THEN num ELSE 0 END", + "filterable": False, + "groupby": False, + "is_dttm": False, + "type": "NUMBER", + } + ), + ] + mock.metrics = ( + [ + get_metric_mock( + { + "id": 824, + "metric_name": "sum__num", + "verbose_name": "Babies", + "description": "", + "expression": "SUM(num)", + "warning_text": "", + "d3format": "", + } + ), + get_metric_mock( + { + "id": 836, + "metric_name": "count", + "verbose_name": "", + "description": None, + "expression": "count(1)", + "warning_text": None, + "d3format": None, + } + ), + get_metric_mock( + { + "id": 843, + "metric_name": "ratio", + "verbose_name": "Ratio Boys/Girls", + "description": "This represents the ratio of boys/girls", + "expression": "sum(num_boys) / sum(num_girls)", + "warning_text": "no warning", + "d3format": ".2%", + } + ), + ], + ) + return mock