Skip to content

Commit

Permalink
chore: Remove obsolete legacy visualizations
Browse files Browse the repository at this point in the history
  • Loading branch information
john-bodley committed Jul 13, 2023
1 parent a156816 commit ac7761a
Show file tree
Hide file tree
Showing 8 changed files with 20 additions and 526 deletions.
213 changes: 0 additions & 213 deletions superset/viz.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,8 @@
get_column_name,
get_column_names,
get_column_names_from_columns,
get_metric_names,
JS_MAX_INTEGER,
merge_extra_filters,
QueryMode,
simple_filter_to_adhoc,
)
from superset.utils.date_parser import get_since_until, parse_past_timedelta
Expand Down Expand Up @@ -701,158 +699,6 @@ def raise_for_access(self) -> None:
security_manager.raise_for_access(viz=self)


class TableViz(BaseViz):

"""A basic html table that is sortable and searchable"""

viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False

@deprecated(deprecated_in="3.0")
def process_metrics(self) -> None:
"""Process form data and store parsed column configs.
1. Determine query mode based on form_data params.
- Use `query_mode` if it has a valid value
- Set as RAW mode if `all_columns` is set
- Otherwise defaults to AGG mode
2. Determine output columns based on query mode.
"""
# Verify form data first: if not specifying query mode, then cannot have both
# GROUP BY and RAW COLUMNS.
if (
not self.form_data.get("query_mode")
and self.form_data.get("all_columns")
and (
self.form_data.get("groupby")
or self.form_data.get("metrics")
or self.form_data.get("percent_metrics")
)
):
raise QueryObjectValidationError(
_(
"You cannot use [Columns] in combination with "
"[Group By]/[Metrics]/[Percentage Metrics]. "
"Please choose one or the other."
)
)

super().process_metrics()

self.query_mode: QueryMode = QueryMode.get(
self.form_data.get("query_mode")
) or (
# infer query mode from the presence of other fields
QueryMode.RAW
if len(self.form_data.get("all_columns") or []) > 0
else QueryMode.AGGREGATE
)

columns: list[str] # output columns sans time and percent_metric column
percent_columns: list[str] = [] # percent columns that needs extra computation

if self.query_mode == QueryMode.RAW:
columns = get_metric_names(self.form_data.get("all_columns"))
else:
columns = get_column_names(self.groupby) + get_metric_names(
self.form_data.get("metrics")
)
percent_columns = get_metric_names(
self.form_data.get("percent_metrics") or []
)

self.columns = columns
self.percent_columns = percent_columns
self.is_timeseries = self.should_be_timeseries()

@deprecated(deprecated_in="3.0")
def should_be_timeseries(self) -> bool:
# TODO handle datasource-type-specific code in datasource
conditions_met = self.form_data.get("granularity_sqla") and self.form_data.get(
"time_grain_sqla"
)
if self.form_data.get("include_time") and not conditions_met:
raise QueryObjectValidationError(
_("Pick a granularity in the Time section or " "uncheck 'Include Time'")
)
return bool(self.form_data.get("include_time"))

@deprecated(deprecated_in="3.0")
def query_obj(self) -> QueryObjectDict:
query_obj = super().query_obj()
if self.query_mode == QueryMode.RAW:
query_obj["columns"] = self.form_data.get("all_columns")
order_by_cols = self.form_data.get("order_by_cols") or []
query_obj["orderby"] = [json.loads(t) for t in order_by_cols]
# must disable groupby and metrics in raw mode
query_obj["groupby"] = []
query_obj["metrics"] = []
# raw mode does not support timeseries queries
query_obj["timeseries_limit_metric"] = None
query_obj["timeseries_limit"] = None
query_obj["is_timeseries"] = None
else:
sort_by = self.form_data.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in utils.get_metric_names(query_obj["metrics"]):
query_obj["metrics"].append(sort_by)
query_obj["orderby"] = [
(sort_by, not self.form_data.get("order_desc", True))
]
elif query_obj["metrics"]:
# Legacy behavior of sorting by first metric by default
first_metric = query_obj["metrics"][0]
query_obj["orderby"] = [
(first_metric, not self.form_data.get("order_desc", True))
]
return query_obj

@deprecated(deprecated_in="3.0")
def get_data(self, df: pd.DataFrame) -> VizData:
"""
Transform the query result to the table representation.
:param df: The interim dataframe
:returns: The table visualization data
The interim dataframe comprises of the group-by and non-group-by columns and
the union of the metrics representing the non-percent and percent metrics. Note
the percent metrics have yet to be transformed.
"""
# Transform the data frame to adhere to the UI ordering of the columns and
# metrics whilst simultaneously computing the percentages (via normalization)
# for the percent metrics.
if df.empty:
return None

columns, percent_columns = self.columns, self.percent_columns
if DTTM_ALIAS in df and self.is_timeseries:
columns = [DTTM_ALIAS] + columns
df = pd.concat(
[
df[columns],
(df[percent_columns].div(df[percent_columns].sum()).add_prefix("%")),
],
axis=1,
)
return self.handle_js_int_overflow(
dict(records=df.to_dict(orient="records"), columns=list(df.columns))
)

@staticmethod
@deprecated(deprecated_in="3.0")
def json_dumps(query_obj: Any, sort_keys: bool = False) -> str:
return json.dumps(
query_obj,
default=utils.json_iso_dttm_ser,
sort_keys=sort_keys,
ignore_nan=True,
)


class TimeTableViz(BaseViz):

"""A data table with rich time-series related columns"""
Expand Down Expand Up @@ -1076,65 +922,6 @@ def get_data(self, df: pd.DataFrame) -> VizData:
}


class BigNumberViz(BaseViz):

"""Put emphasis on a single metric with this big number viz"""

viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True

@deprecated(deprecated_in="3.0")
def query_obj(self) -> QueryObjectDict:
query_obj = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise QueryObjectValidationError(_("Pick a metric!"))
query_obj["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
return query_obj

@deprecated(deprecated_in="3.0")
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None

df = df.pivot_table(
index=DTTM_ALIAS,
columns=[],
values=self.metric_labels,
dropna=False,
aggfunc=np.min, # looking for any (only) value, preserving `None`
)
df = self.apply_rolling(df)
df[DTTM_ALIAS] = df.index
return super().get_data(df)


class BigNumberTotalViz(BaseViz):

"""Put emphasis on a single metric with this big number viz"""

viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False

@deprecated(deprecated_in="3.0")
def query_obj(self) -> QueryObjectDict:
query_obj = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise QueryObjectValidationError(_("Pick a metric!"))
query_obj["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric

# Limiting rows is not required as only one cell is returned
query_obj["row_limit"] = None
return query_obj


class NVD3TimeSeriesViz(NVD3Viz):

"""A rich line chart component with tons of options"""
Expand Down
4 changes: 2 additions & 2 deletions tests/integration_tests/cache_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def test_no_data_cache(self):
app.config["DATA_CACHE_CONFIG"] = {"CACHE_TYPE": "NullCache"}
cache_manager.init_app(app)

slc = self.get_slice("Girls", db.session)
slc = self.get_slice("Top 10 Girl Name Share", db.session)
json_endpoint = "/superset/explore_json/{}/{}/".format(
slc.datasource_type, slc.datasource_id
)
Expand All @@ -73,7 +73,7 @@ def test_slice_data_cache(self):
}
cache_manager.init_app(app)

slc = self.get_slice("Boys", db.session)
slc = self.get_slice("Top 10 Girl Name Share", db.session)
json_endpoint = "/superset/explore_json/{}/{}/".format(
slc.datasource_type, slc.datasource_id
)
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/charts/api_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -1715,7 +1715,7 @@ def test_gets_owned_created_favorited_by_me_filter(self):
)
def test_warm_up_cache(self):
self.login()
slc = self.get_slice("Girls", db.session)
slc = self.get_slice("Top 10 Girl Name Share", db.session)
rv = self.client.put("/api/v1/chart/warm_up_cache", json={"chart_id": slc.id})
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/charts/commands_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ def test_warm_up_cache_command_chart_not_found(self):

@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_warm_up_cache(self):
slc = self.get_slice("Girls", db.session)
slc = self.get_slice("Top 10 Girl Name Share", db.session)
result = ChartWarmUpCacheCommand(slc.id, None, None).run()
self.assertEqual(
result, {"chart_id": slc.id, "viz_error": None, "viz_status": "success"}
Expand Down
14 changes: 8 additions & 6 deletions tests/integration_tests/core_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def test_slice_endpoint(self):
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_viz_cache_key(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
slc = self.get_slice("Top 10 Girl Name Share", db.session)

viz = slc.viz
qobj = viz.query_obj()
Expand Down Expand Up @@ -279,7 +279,9 @@ def test_slice_data(self):
# slice data should have some required attributes
self.login(username="admin")
slc = self.get_slice(
slice_name="Girls", session=db.session, expunge_from_session=False
slice_name="Top 10 Girl Name Share",
session=db.session,
expunge_from_session=False,
)
slc_data_attributes = slc.data.keys()
assert "changed_on" in slc_data_attributes
Expand Down Expand Up @@ -391,7 +393,7 @@ def test_databaseview_edit(self, username="admin"):
)
def test_warm_up_cache(self):
self.login()
slc = self.get_slice("Girls", db.session)
slc = self.get_slice("Top 10 Girl Name Share", db.session)
data = self.get_json_resp(f"/superset/warm_up_cache?slice_id={slc.id}")
self.assertEqual(
data, [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
Expand All @@ -418,10 +420,10 @@ def test_cache_logging(self):
self.login("admin")
store_cache_keys = app.config["STORE_CACHE_KEYS_IN_METADATA_DB"]
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = True
girls_slice = self.get_slice("Girls", db.session)
self.get_json_resp(f"/superset/warm_up_cache?slice_id={girls_slice.id}")
slc = self.get_slice("Top 10 Girl Name Share", db.session)
self.get_json_resp(f"/superset/warm_up_cache?slice_id={slc.id}")
ck = db.session.query(CacheKey).order_by(CacheKey.id.desc()).first()
assert ck.datasource_uid == f"{girls_slice.table.id}__table"
assert ck.datasource_uid == f"{slc.table.id}__table"
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = store_cache_keys

def test_redirect_invalid(self):
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/security_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -1680,7 +1680,7 @@ def test_raise_for_access_table(self, mock_can_access):
def test_raise_for_access_viz(
self, mock_can_access_schema, mock_can_access, mock_is_owner
):
test_viz = viz.TableViz(self.get_datasource_mock(), form_data={})
test_viz = viz.TimeTableViz(self.get_datasource_mock(), form_data={})

mock_can_access_schema.return_value = True
security_manager.raise_for_access(viz=test_viz)
Expand Down
2 changes: 1 addition & 1 deletion tests/integration_tests/utils_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -974,7 +974,7 @@ def test_get_form_data_corrupted_json(self) -> None:
def test_log_this(self) -> None:
# TODO: Add additional scenarios.
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
slc = self.get_slice("Top 10 Girl Name Share", db.session)
dashboard_id = 1

assert slc.viz is not None
Expand Down
Loading

0 comments on commit ac7761a

Please sign in to comment.