-
Notifications
You must be signed in to change notification settings - Fork 172
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: Supports Stddev #348
Changes from 5 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
@@ -0,0 +1,181 @@ | ||||||
// Licensed to the Apache Software Foundation (ASF) under one | ||||||
// or more contributor license agreements. See the NOTICE file | ||||||
// distributed with this work for additional information | ||||||
// regarding copyright ownership. The ASF licenses this file | ||||||
// to you under the Apache License, Version 2.0 (the | ||||||
// "License"); you may not use this file except in compliance | ||||||
// with the License. You may obtain a copy of the License at | ||||||
// | ||||||
// http://www.apache.org/licenses/LICENSE-2.0 | ||||||
// | ||||||
// Unless required by applicable law or agreed to in writing, | ||||||
// software distributed under the License is distributed on an | ||||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||||||
// KIND, either express or implied. See the License for the | ||||||
// specific language governing permissions and limitations | ||||||
// under the License. | ||||||
|
||||||
//! Defines physical expressions that can evaluated at runtime during query execution | ||||||
|
||||||
use std::{any::Any, sync::Arc}; | ||||||
|
||||||
use crate::execution::datafusion::expressions::{ | ||||||
stats::StatsType, utils::down_cast_any_ref, variance::VarianceAccumulator, | ||||||
}; | ||||||
use arrow::{ | ||||||
array::ArrayRef, | ||||||
datatypes::{DataType, Field}, | ||||||
}; | ||||||
use datafusion::logical_expr::Accumulator; | ||||||
use datafusion_common::{internal_err, Result, ScalarValue}; | ||||||
use datafusion_physical_expr::{expressions::format_state_name, AggregateExpr, PhysicalExpr}; | ||||||
|
||||||
/// STDDEV and STDDEV_SAMP (standard deviation) aggregate expression | ||||||
/// The implementation mostly is the same as the DataFusion's implementation. The reason | ||||||
/// we have our own implementation is that DataFusion has UInt64 for state_field `count`, | ||||||
/// while Spark has Double for count. Also we have added `null_on_divide_by_zero` | ||||||
/// to be consistent with Spark's implementation. | ||||||
#[derive(Debug)] | ||||||
pub struct Stddev { | ||||||
name: String, | ||||||
expr: Arc<dyn PhysicalExpr>, | ||||||
stats_type: StatsType, | ||||||
null_on_divide_by_zero: bool, | ||||||
} | ||||||
|
||||||
impl Stddev { | ||||||
/// Create a new STDDEV aggregate function | ||||||
pub fn new( | ||||||
expr: Arc<dyn PhysicalExpr>, | ||||||
name: impl Into<String>, | ||||||
data_type: DataType, | ||||||
stats_type: StatsType, | ||||||
null_on_divide_by_zero: bool, | ||||||
) -> Self { | ||||||
// the result of stddev just support FLOAT64 and Decimal data type. | ||||||
assert!(matches!(data_type, DataType::Float64)); | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm? So we also need to add DecimalType here? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's |
||||||
Self { | ||||||
name: name.into(), | ||||||
expr, | ||||||
stats_type, | ||||||
null_on_divide_by_zero, | ||||||
} | ||||||
} | ||||||
} | ||||||
|
||||||
impl AggregateExpr for Stddev { | ||||||
/// Return a reference to Any that can be used for downcasting | ||||||
fn as_any(&self) -> &dyn Any { | ||||||
self | ||||||
} | ||||||
|
||||||
fn field(&self) -> Result<Field> { | ||||||
Ok(Field::new(&self.name, DataType::Float64, true)) | ||||||
} | ||||||
|
||||||
fn create_accumulator(&self) -> Result<Box<dyn Accumulator>> { | ||||||
Ok(Box::new(StddevAccumulator::try_new( | ||||||
self.stats_type, | ||||||
self.null_on_divide_by_zero, | ||||||
)?)) | ||||||
} | ||||||
|
||||||
fn create_sliding_accumulator(&self) -> Result<Box<dyn Accumulator>> { | ||||||
Ok(Box::new(StddevAccumulator::try_new( | ||||||
self.stats_type, | ||||||
self.null_on_divide_by_zero, | ||||||
)?)) | ||||||
} | ||||||
|
||||||
fn state_fields(&self) -> Result<Vec<Field>> { | ||||||
Ok(vec![ | ||||||
Field::new( | ||||||
format_state_name(&self.name, "count"), | ||||||
DataType::Float64, | ||||||
true, | ||||||
), | ||||||
Field::new( | ||||||
format_state_name(&self.name, "mean"), | ||||||
DataType::Float64, | ||||||
true, | ||||||
), | ||||||
Field::new(format_state_name(&self.name, "m2"), DataType::Float64, true), | ||||||
]) | ||||||
} | ||||||
|
||||||
fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> { | ||||||
vec![self.expr.clone()] | ||||||
} | ||||||
|
||||||
fn name(&self) -> &str { | ||||||
&self.name | ||||||
} | ||||||
} | ||||||
|
||||||
impl PartialEq<dyn Any> for Stddev { | ||||||
fn eq(&self, other: &dyn Any) -> bool { | ||||||
down_cast_any_ref(other) | ||||||
.downcast_ref::<Self>() | ||||||
.map(|x| { | ||||||
self.name == x.name | ||||||
&& self.expr.eq(&x.expr) | ||||||
&& self.null_on_divide_by_zero == x.null_on_divide_by_zero | ||||||
andygrove marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
&& self.stats_type == x.stats_type | ||||||
}) | ||||||
.unwrap_or(false) | ||||||
} | ||||||
} | ||||||
|
||||||
/// An accumulator to compute the average | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Changed. Thanks |
||||||
#[derive(Debug)] | ||||||
pub struct StddevAccumulator { | ||||||
variance: VarianceAccumulator, | ||||||
} | ||||||
|
||||||
impl StddevAccumulator { | ||||||
/// Creates a new `StddevAccumulator` | ||||||
pub fn try_new(s_type: StatsType, null_on_divide_by_zero: bool) -> Result<Self> { | ||||||
Ok(Self { | ||||||
variance: VarianceAccumulator::try_new(s_type, null_on_divide_by_zero)?, | ||||||
}) | ||||||
} | ||||||
|
||||||
pub fn get_m2(&self) -> f64 { | ||||||
self.variance.get_m2() | ||||||
} | ||||||
} | ||||||
|
||||||
impl Accumulator for StddevAccumulator { | ||||||
fn state(&mut self) -> Result<Vec<ScalarValue>> { | ||||||
Ok(vec![ | ||||||
ScalarValue::from(self.variance.get_count()), | ||||||
ScalarValue::from(self.variance.get_mean()), | ||||||
ScalarValue::from(self.variance.get_m2()), | ||||||
]) | ||||||
} | ||||||
|
||||||
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> { | ||||||
self.variance.update_batch(values) | ||||||
} | ||||||
|
||||||
fn retract_batch(&mut self, values: &[ArrayRef]) -> Result<()> { | ||||||
self.variance.retract_batch(values) | ||||||
} | ||||||
|
||||||
fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> { | ||||||
self.variance.merge_batch(states) | ||||||
} | ||||||
|
||||||
fn evaluate(&mut self) -> Result<ScalarValue> { | ||||||
let variance = self.variance.evaluate()?; | ||||||
match variance { | ||||||
ScalarValue::Float64(Some(e)) => Ok(ScalarValue::Float64(Some(e.sqrt()))), | ||||||
ScalarValue::Float64(None) => Ok(ScalarValue::Float64(None)), | ||||||
_ => internal_err!("Variance should be f64"), | ||||||
} | ||||||
} | ||||||
|
||||||
fn size(&self) -> usize { | ||||||
std::mem::align_of_val(self) - std::mem::align_of_val(&self.variance) + self.variance.size() | ||||||
} | ||||||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -31,7 +31,7 @@ struct<w_warehouse_sk:int,i_item_sk:int,d_moy:int,mean:double,cov:double,w_wareh | |
1 12259 1 326.5 1.219693210219279 1 12259 2 292.6666666666667 1.2808898286830026 | ||
1 12641 1 321.25 1.1286221893301993 1 12641 2 279.25 1.129134558577743 | ||
1 13043 1 260.5 1.355894484625015 1 13043 2 295.0 1.056210118409035 | ||
1 13157 1 260.5 1.5242630430075292 1 13157 2 413.5 1.0422561797285326 | ||
1 13157 1 260.5 1.524263043007529 1 13157 2 413.5 1.0422561797285326 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Wondering what is causing the digit difference... There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I am not sure what caused the digit difference. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. also cc @viirya There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ideally, it would be good to compare floating point numbers based on an epsilon to make sure they are within some tolerance threshold. I assume we are currently just comparing text file output directly? Do we have a way to generate the output into a structured file type such as CSV or JSON? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The difference may be down to order of operations - depending on the order that batches that are being processed from different partitions, for example. I don't think we can expect it to be 100% deterministic in a distributed system. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, we are currently just comparing text file output directly. We are using Spark's TPCDSQuerySuite. It doesn't seem to be a way to generate the output into a structured file type. |
||
1 13293 1 325.25 1.1599721810918615 1 13293 2 345.75 1.0626233629994524 | ||
1 13729 1 486.0 1.0680776434770018 1 13729 2 389.6666666666667 1.3522269473359647 | ||
1 14137 1 427.0 1.0418229612154228 1 14137 2 387.5 1.0294855239302605 | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -73,8 +73,12 @@ class CometTPCDSQuerySuite | |
"q36", | ||
"q37", | ||
"q38", | ||
"q39a", | ||
"q39b", | ||
// TODO: comment 39a and 39b for now because the expected result for stddev failed: | ||
// expected: 1.5242630430075292, actual: 1.524263043007529. | ||
// Will change the comparison logic to detect floating-point numbers and compare | ||
// with epsilon | ||
// "q39a", | ||
// "q39b", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should create a ticket for this. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. opened #392 |
||
"q40", | ||
"q41", | ||
"q42", | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Seems copied from somewhere and not related?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
removed. Thanks