Skip to content

Commit

Permalink
chore: bump to datafusion 39, arrow 52, pyo3 0.21 (#2581)
Browse files Browse the repository at this point in the history
# Description
Updates the arrow and datafusion dependencies to 52 and 39(-rc1)
respectively. This is necessary for updating pyo3.

While most changes with trivial, some required big rewrites. Namely, the
logic for the Updates operation had to be rewritten (and simplified) to
accommodate some new sanity checks inside datafusion:
(apache/datafusion#10088).

Depends on delta-kernel having its arrow and object-store version bumped
as well. This PR doesn't include any major changes for pyo3, I'll open a
separate PR depending on this PR.

# Related Issue(s)
<!---
For example:

- closes #106
--->

# Documentation

<!---
Share links to useful documentation
--->

---------

Co-authored-by: R. Tyler Croy <rtyler@brokenco.de>
  • Loading branch information
abhiaagarwal and rtyler authored Jun 11, 2024
1 parent 0a44a0d commit 57795da
Show file tree
Hide file tree
Showing 43 changed files with 368 additions and 463 deletions.
44 changes: 22 additions & 22 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,33 +26,33 @@ debug = true
debug = "line-tables-only"

[workspace.dependencies]
delta_kernel = { version = "0.1" }
delta_kernel = { version = "0.1.1" }
# delta_kernel = { path = "../delta-kernel-rs/kernel" }

# arrow
arrow = { version = "51" }
arrow-arith = { version = "51" }
arrow-array = { version = "51", features = ["chrono-tz"] }
arrow-buffer = { version = "51" }
arrow-cast = { version = "51" }
arrow-ipc = { version = "51" }
arrow-json = { version = "51" }
arrow-ord = { version = "51" }
arrow-row = { version = "51" }
arrow-schema = { version = "51" }
arrow-select = { version = "51" }
object_store = { version = "0.9" }
parquet = { version = "51" }
arrow = { version = "52" }
arrow-arith = { version = "52" }
arrow-array = { version = "52", features = ["chrono-tz"] }
arrow-buffer = { version = "52" }
arrow-cast = { version = "52" }
arrow-ipc = { version = "52" }
arrow-json = { version = "52" }
arrow-ord = { version = "52" }
arrow-row = { version = "52" }
arrow-schema = { version = "52" }
arrow-select = { version = "52" }
object_store = { version = "0.10.1" }
parquet = { version = "52" }

# datafusion
datafusion = { version = "37.1" }
datafusion-expr = { version = "37.1" }
datafusion-common = { version = "37.1" }
datafusion-proto = { version = "37.1" }
datafusion-sql = { version = "37.1" }
datafusion-physical-expr = { version = "37.1" }
datafusion-functions = { version = "37.1" }
datafusion-functions-array = { version = "37.1" }
datafusion = { version = "39" }
datafusion-expr = { version = "39" }
datafusion-common = { version = "39" }
datafusion-proto = { version = "39" }
datafusion-sql = { version = "39" }
datafusion-physical-expr = { version = "39" }
datafusion-functions = { version = "39" }
datafusion-functions-array = { version = "39" }

# serde
serde = { version = "1.0.194", features = ["derive"] }
Expand Down
6 changes: 3 additions & 3 deletions crates/aws/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,15 +189,15 @@ impl DynamoDbLockClient {
if dynamodb_override_endpoint exists/AWS_ENDPOINT_URL_DYNAMODB is specified by user
use dynamodb_override_endpoint to create dynamodb client
*/
let dynamodb_sdk_config = match dynamodb_override_endpoint {

match dynamodb_override_endpoint {
Some(dynamodb_endpoint_url) => sdk_config
.to_owned()
.to_builder()
.endpoint_url(dynamodb_endpoint_url)
.build(),
None => sdk_config.to_owned(),
};
dynamodb_sdk_config
}
}

/// Create the lock table where DynamoDb stores the commit information for all delta tables.
Expand Down
23 changes: 10 additions & 13 deletions crates/aws/src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,22 @@ use aws_config::provider_config::ProviderConfig;
use aws_config::{Region, SdkConfig};
use bytes::Bytes;
use deltalake_core::storage::object_store::{
aws::AmazonS3ConfigKey, parse_url_opts, GetOptions, GetResult, ListResult, MultipartId,
ObjectMeta, ObjectStore, PutOptions, PutResult, Result as ObjectStoreResult,
aws::AmazonS3ConfigKey, parse_url_opts, GetOptions, GetResult, ListResult, ObjectMeta,
ObjectStore, PutOptions, PutResult, Result as ObjectStoreResult,
};
use deltalake_core::storage::{
limit_store_handler, str_is_truthy, ObjectStoreFactory, ObjectStoreRef, StorageOptions,
};
use deltalake_core::{DeltaResult, ObjectStoreError, Path};
use futures::stream::BoxStream;
use futures::Future;
use object_store::{MultipartUpload, PutMultipartOpts, PutPayload};
use std::collections::HashMap;
use std::fmt::Debug;
use std::ops::Range;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use tokio::io::AsyncWrite;
use url::Url;

use crate::errors::DynamoDbConfigError;
Expand Down Expand Up @@ -334,14 +334,14 @@ impl std::fmt::Debug for S3StorageBackend {

#[async_trait::async_trait]
impl ObjectStore for S3StorageBackend {
async fn put(&self, location: &Path, bytes: Bytes) -> ObjectStoreResult<PutResult> {
async fn put(&self, location: &Path, bytes: PutPayload) -> ObjectStoreResult<PutResult> {
self.inner.put(location, bytes).await
}

async fn put_opts(
&self,
location: &Path,
bytes: Bytes,
bytes: PutPayload,
options: PutOptions,
) -> ObjectStoreResult<PutResult> {
self.inner.put_opts(location, bytes, options).await
Expand Down Expand Up @@ -402,19 +402,16 @@ impl ObjectStore for S3StorageBackend {
}
}

async fn put_multipart(
&self,
location: &Path,
) -> ObjectStoreResult<(MultipartId, Box<dyn AsyncWrite + Unpin + Send>)> {
async fn put_multipart(&self, location: &Path) -> ObjectStoreResult<Box<dyn MultipartUpload>> {
self.inner.put_multipart(location).await
}

async fn abort_multipart(
async fn put_multipart_opts(
&self,
location: &Path,
multipart_id: &MultipartId,
) -> ObjectStoreResult<()> {
self.inner.abort_multipart(location, multipart_id).await
options: PutMultipartOpts,
) -> ObjectStoreResult<Box<dyn MultipartUpload>> {
self.inner.put_multipart_opts(location, options).await
}
}

Expand Down
6 changes: 3 additions & 3 deletions crates/aws/tests/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ impl S3Integration {
"dynamodb",
"create-table",
"--table-name",
&table_name,
table_name,
"--provisioned-throughput",
"ReadCapacityUnits=1,WriteCapacityUnits=1",
"--attribute-definitions",
Expand All @@ -112,7 +112,7 @@ impl S3Integration {
}

fn wait_for_table(table_name: &str) -> std::io::Result<()> {
let args = ["dynamodb", "describe-table", "--table-name", &table_name];
let args = ["dynamodb", "describe-table", "--table-name", table_name];
loop {
let output = Command::new("aws")
.args(args)
Expand Down Expand Up @@ -145,7 +145,7 @@ impl S3Integration {

fn delete_dynamodb_table(table_name: &str) -> std::io::Result<ExitStatus> {
let mut child = Command::new("aws")
.args(["dynamodb", "delete-table", "--table-name", &table_name])
.args(["dynamodb", "delete-table", "--table-name", table_name])
.stdout(Stdio::null())
.spawn()
.expect("aws command is installed");
Expand Down
22 changes: 10 additions & 12 deletions crates/aws/tests/repair_s3_rename_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use deltalake_core::storage::object_store::{
use deltalake_core::{DeltaTableBuilder, ObjectStore, Path};
use deltalake_test::utils::IntegrationContext;
use futures::stream::BoxStream;
use object_store::{MultipartUpload, PutMultipartOpts, PutPayload};
use serial_test::serial;
use std::ops::Range;
use std::sync::{Arc, Mutex};
Expand Down Expand Up @@ -60,8 +61,8 @@ async fn run_repair_test_case(path: &str, pause_copy: bool) -> Result<(), Object
};
let (s3_2, _) = create_s3_backend(&context, "w2", None, None);

s3_1.put(&src1, Bytes::from("test1")).await.unwrap();
s3_2.put(&src2, Bytes::from("test2")).await.unwrap();
s3_1.put(&src1, Bytes::from("test1").into()).await.unwrap();
s3_2.put(&src2, Bytes::from("test2").into()).await.unwrap();

let rename1 = rename(s3_1, &src1, &dst1);
// to ensure that first one is started actually first
Expand Down Expand Up @@ -166,14 +167,14 @@ impl ObjectStore for DelayedObjectStore {
self.delete(from).await
}

async fn put(&self, location: &Path, bytes: Bytes) -> ObjectStoreResult<PutResult> {
async fn put(&self, location: &Path, bytes: PutPayload) -> ObjectStoreResult<PutResult> {
self.inner.put(location, bytes).await
}

async fn put_opts(
&self,
location: &Path,
bytes: Bytes,
bytes: PutPayload,
options: PutOptions,
) -> ObjectStoreResult<PutResult> {
self.inner.put_opts(location, bytes, options).await
Expand Down Expand Up @@ -227,19 +228,16 @@ impl ObjectStore for DelayedObjectStore {
self.inner.rename_if_not_exists(from, to).await
}

async fn put_multipart(
&self,
location: &Path,
) -> ObjectStoreResult<(MultipartId, Box<dyn AsyncWrite + Unpin + Send>)> {
async fn put_multipart(&self, location: &Path) -> ObjectStoreResult<Box<dyn MultipartUpload>> {
self.inner.put_multipart(location).await
}

async fn abort_multipart(
async fn put_multipart_opts(
&self,
location: &Path,
multipart_id: &MultipartId,
) -> ObjectStoreResult<()> {
self.inner.abort_multipart(location, multipart_id).await
options: PutMultipartOpts,
) -> ObjectStoreResult<Box<dyn MultipartUpload>> {
self.inner.put_multipart_opts(location, options).await
}
}

Expand Down
5 changes: 4 additions & 1 deletion crates/azure/tests/integration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,10 @@ async fn read_write_test_onelake(context: &IntegrationContext, path: &Path) -> T

let expected = Bytes::from_static(b"test world from delta-rs on friday");

delta_store.put(path, expected.clone()).await.unwrap();
delta_store
.put(path, expected.clone().into())
.await
.unwrap();
let fetched = delta_store.get(path).await.unwrap().bytes().await.unwrap();
assert_eq!(expected, fetched);

Expand Down
3 changes: 2 additions & 1 deletion crates/benchmarks/src/bin/merge.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@ use arrow::datatypes::Schema as ArrowSchema;
use arrow_array::{RecordBatch, StringArray, UInt32Array};
use chrono::Duration;
use clap::{command, Args, Parser, Subcommand};
use datafusion::functions::expr_fn::random;
use datafusion::{datasource::MemTable, prelude::DataFrame};
use datafusion_common::DataFusionError;
use datafusion_expr::{cast, col, lit, random};
use datafusion_expr::{cast, col, lit};
use deltalake_core::protocol::SaveMode;
use deltalake_core::{
arrow::{
Expand Down
2 changes: 1 addition & 1 deletion crates/core/src/delta_datafusion/cdf/scan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ impl ExecutionPlan for DeltaCdfScan {
self.plan.properties()
}

fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
fn children(&self) -> Vec<&Arc<dyn ExecutionPlan>> {
vec![]
}

Expand Down
1 change: 1 addition & 0 deletions crates/core/src/delta_datafusion/cdf/scan_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ pub fn create_partition_values<F: FileAction>(
partition_values: new_part_values.clone(),
extensions: None,
range: None,
statistics: None,
};

file_groups.entry(new_part_values).or_default().push(part);
Expand Down
Loading

0 comments on commit 57795da

Please sign in to comment.