Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: add support for microsoft onelake #1564

Closed
wants to merge 19 commits into from
Closed
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 16 additions & 16 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,23 @@ debug = "line-tables-only"

[workspace.dependencies]
# arrow
arrow = { version = "45" }
arrow-array = { version = "45" }
arrow-buffer = { version = "45" }
arrow-cast = { version = "45" }
arrow-ord = { version = "45" }
arrow-row = { version = "45" }
arrow-schema = { version = "45" }
arrow-select = { version = "45" }
parquet = { version = "45" }
arrow = { version = "46.0.0" }
arrow-array = { version = "46.0.0" }
arrow-buffer = { version = "46.0.0" }
arrow-cast = { version = "46.0.0" }
arrow-ord = { version = "46.0.0" }
arrow-row = { version = "46.0.0" }
arrow-schema = { version = "46.0.0" }
arrow-select = { version = "46.0.0" }
parquet = { version = "46.0.0" }

# datafusion
datafusion = { version = "30" }
datafusion-expr = { version = "30" }
datafusion-common = { version = "30" }
datafusion-proto = { version = "30" }
datafusion-sql = { version = "30" }
datafusion-physical-expr = { version = "30" }
datafusion = { version = "31" }
datafusion-expr = { version = "31" }
datafusion-common = { version = "31" }
datafusion-proto = { version = "31" }
datafusion-sql = { version = "31" }
datafusion-physical-expr = { version = "31" }

# serde
serde = { version = "1", features = ["derive"] }
Expand All @@ -49,4 +49,4 @@ uuid = { version = "1" }
async-trait = { version = "0.1" }
futures = { version = "0.3" }
tokio = { version = "1" }
num_cpus = { version = "1" }
num_cpus = { version = "1" }
4 changes: 2 additions & 2 deletions rust/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ log = "0"
libc = ">=0.2.90, <1"
num-bigint = "0.4"
num-traits = "0.2.15"
object_store = "0.6.1"
object_store = "0.7.0"
once_cell = "1.16.0"
parking_lot = "0.12"
parquet2 = { version = "0.17", optional = true }
Expand All @@ -99,7 +99,7 @@ reqwest-retry = { version = "0.2.2", optional = true }
# Datafusion
dashmap = { version = "5", optional = true }

sqlparser = { version = "0.36", optional = true }
sqlparser = { version = "0.37", optional = true }

# NOTE dependencies only for integration tests
fs_extra = { version = "1.2.0", optional = true }
Expand Down
2 changes: 2 additions & 0 deletions rust/src/storage/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,8 @@ pub(crate) fn configure_store(
try_configure_s3(url, options)
} else if host.contains("dfs.core.windows.net")
|| host.contains("blob.core.windows.net")
|| host.contains("dfs.fabric.microsoft.com")
|| host.contains("blob.fabric.microsoft.com")
{
try_configure_azure(url, options)
} else {
Expand Down
51 changes: 51 additions & 0 deletions rust/src/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use chrono::Utc;
use fs_extra::dir::{copy, CopyOptions};
use object_store::DynObjectStore;
use serde_json::json;
use std::env;
use std::sync::Arc;
use tempdir::TempDir;

Expand Down Expand Up @@ -33,8 +34,29 @@ impl IntegrationContext {
// create a fresh bucket in every context. THis is done via CLI...
let bucket = match integration {
StorageIntegration::Local => tmp_dir.as_ref().to_str().unwrap().to_owned(),
StorageIntegration::Onelake => {
let account_name =
env::var("AZURE_STORAGE_ACCOUNT_NAME").unwrap_or(String::from("onelake"));
let container_name =
env::var("AZURE_STORAGE_CONTAINER_NAME").unwrap_or(String::from("delta-rs"));
format!(
"{0}.dfs.fabric.microsoft.com/{1}",
account_name, container_name
)
}
StorageIntegration::OnelakeAbfs => {
let account_name =
env::var("AZURE_STORAGE_ACCOUNT_NAME").unwrap_or(String::from("onelake"));
let container_name =
env::var("AZURE_STORAGE_CONTAINER_NAME").unwrap_or(String::from("delta-rs"));
format!(
"{0}@{1}.dfs.fabric.microsoft.com",
container_name, account_name
)
}
_ => format!("test-delta-table-{}", Utc::now().timestamp()),
};

if let StorageIntegration::Google = integration {
gs_cli::prepare_env();
let base_url = std::env::var("GOOGLE_BASE_URL")?;
Expand All @@ -46,10 +68,13 @@ impl IntegrationContext {
account_path.as_path().to_str().unwrap(),
);
}

integration.create_bucket(&bucket)?;
let store_uri = match integration {
StorageIntegration::Amazon => format!("s3://{}", &bucket),
StorageIntegration::Microsoft => format!("az://{}", &bucket),
StorageIntegration::Onelake => format!("https://{}", &bucket),
StorageIntegration::OnelakeAbfs => format!("abfss://{}", &bucket),
StorageIntegration::Google => format!("gs://{}", &bucket),
StorageIntegration::Local => format!("file://{}", &bucket),
StorageIntegration::Hdfs => format!("hdfs://localhost:9000/{}", &bucket),
Expand Down Expand Up @@ -84,6 +109,8 @@ impl IntegrationContext {
match self.integration {
StorageIntegration::Amazon => format!("s3://{}", &self.bucket),
StorageIntegration::Microsoft => format!("az://{}", &self.bucket),
StorageIntegration::Onelake => format!("https://{}", &self.bucket),
StorageIntegration::OnelakeAbfs => format!("abfss://{}", &self.bucket),
StorageIntegration::Google => format!("gs://{}", &self.bucket),
StorageIntegration::Local => format!("file://{}", &self.bucket),
StorageIntegration::Hdfs => format!("hdfs://localhost:9000/{}", &self.bucket),
Expand Down Expand Up @@ -149,6 +176,8 @@ impl Drop for IntegrationContext {
StorageIntegration::Google => {
gs_cli::delete_bucket(&self.bucket).unwrap();
}
StorageIntegration::Onelake => (),
StorageIntegration::OnelakeAbfs => (),
StorageIntegration::Local => (),
StorageIntegration::Hdfs => {
hdfs_cli::delete_dir(&self.bucket).unwrap();
Expand All @@ -161,17 +190,21 @@ impl Drop for IntegrationContext {
pub enum StorageIntegration {
Amazon,
Microsoft,
Onelake,
Google,
Local,
Hdfs,
OnelakeAbfs,
}

impl StorageIntegration {
fn prepare_env(&self) {
match self {
Self::Microsoft => az_cli::prepare_env(),
Self::Onelake => onelake_cli::prepare_env(),
Self::Amazon => s3_cli::prepare_env(),
Self::Google => gs_cli::prepare_env(),
Self::OnelakeAbfs => onelake_cli::prepare_env(),
Self::Local => (),
Self::Hdfs => (),
}
Expand All @@ -183,6 +216,8 @@ impl StorageIntegration {
az_cli::create_container(name)?;
Ok(())
}
Self::Onelake => Ok(()),
Self::OnelakeAbfs => Ok(()),
Self::Amazon => {
s3_cli::create_bucket(format!("s3://{}", name.as_ref()))?;
set_env_if_not_set(
Expand Down Expand Up @@ -264,6 +299,22 @@ pub fn set_env_if_not_set(key: impl AsRef<str>, value: impl AsRef<str>) {
};
}

//cli for onelake
pub mod onelake_cli {
use super::set_env_if_not_set;
/// prepare_env
pub fn prepare_env() {
let token = "jwt-token";
set_env_if_not_set("AZURE_STORAGE_USE_EMULATOR", "0");
set_env_if_not_set("AZURE_STORAGE_ACCOUNT_NAME", "daily-onelake");
set_env_if_not_set(
"AZURE_STORAGE_CONTAINER_NAME",
"86bc63cf-5086-42e0-b16d-6bc580d1dc87",
);
set_env_if_not_set("AZURE_STORAGE_TOKEN", token);
}
}

/// small wrapper around az cli
pub mod az_cli {
use super::set_env_if_not_set;
Expand Down
6 changes: 4 additions & 2 deletions rust/src/writer/record_batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
use std::{collections::HashMap, sync::Arc};

use arrow::array::{Array, UInt32Array};
use arrow::compute::{lexicographical_partition_ranges, take, SortColumn};
use arrow::compute::{take, SortColumn};
use arrow::datatypes::{Schema as ArrowSchema, SchemaRef as ArrowSchemaRef};
use arrow::error::ArrowError;
use arrow::record_batch::RecordBatch;
Expand Down Expand Up @@ -372,7 +372,9 @@ pub(crate) fn divide_by_partition_values(
})
.collect::<Result<Vec<_>, DeltaWriterError>>()?;

let partition_ranges = lexicographical_partition_ranges(sorted_partition_columns.as_slice())?;
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#[allow(warnings)] for now as :lexicographical_partition_ranges is Deprecated

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@wjones127 / @rtyler - Datafusion packages have been upgraded

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

FYI another dedicated PR was merged to upgrade datafusion, so you'll need to address conflicts with that. Should be quick.

#[allow(warnings)]
let partition_ranges =
arrow::compute::lexicographical_partition_ranges(sorted_partition_columns.as_slice())?;

for range in partition_ranges {
// get row indices for current partition
Expand Down
43 changes: 43 additions & 0 deletions rust/tests/integration_object_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,24 @@ async fn test_object_store_azure() -> TestResult {
Ok(())
}

#[cfg(feature = "azure")]
#[tokio::test]
#[serial]
async fn test_object_store_onelake() -> TestResult {
let path = Path::from("17d3977c-d46e-4bae-8fed-ff467e674aed/Files/SampleCustomerList.csv");
read_write_test_onelake(StorageIntegration::Onelake, &path).await?;
Ok(())
}

#[cfg(feature = "azure")]
#[tokio::test]
#[serial]
async fn test_object_store_onelake_abfs() -> TestResult {
let path = Path::from("17d3977c-d46e-4bae-8fed-ff467e674aed/Files/SampleCustomerList.csv");
read_write_test_onelake(StorageIntegration::OnelakeAbfs, &path).await?;
Ok(())
}

#[cfg(feature = "s3")]
#[tokio::test]
#[serial]
Expand All @@ -48,6 +66,31 @@ async fn test_object_store_hdfs() -> TestResult {
Ok(())
}

async fn read_write_test_onelake(integration: StorageIntegration, path: &Path) -> TestResult {
let context = IntegrationContext::new(integration)?;

//println!("line 102-{:#?}",context.root_uri());

let delta_store = DeltaTableBuilder::from_uri(&context.root_uri())
.with_allow_http(true)
.build_storage()?;

//println!("{:#?}",delta_store);

let expected = Bytes::from_static(b"test world from delta-rs on friday");

delta_store.put(path, expected.clone()).await.unwrap();
let fetched = delta_store.get(path).await.unwrap().bytes().await.unwrap();
assert_eq!(expected, fetched);

for range in [0..10, 3..5, 0..expected.len()] {
let data = delta_store.get_range(path, range.clone()).await.unwrap();
assert_eq!(&data[..], &expected[range])
}

Ok(())
}

async fn test_object_store(integration: StorageIntegration, skip_copy: bool) -> TestResult {
let context = IntegrationContext::new(integration)?;
let delta_store = DeltaTableBuilder::from_uri(&context.root_uri())
Expand Down
Loading