Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: re-enable (xor) bloom filter index #7870

Merged
merged 7 commits into from
Sep 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 41 additions & 54 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions src/common/exception/src/exception_code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,7 @@ build_exceptions! {
StorageUnavailable(3901),
StorageUnsupported(3902),
StorageInsecure(3903),
DeprecatedIndexFormat(3904),
StorageOther(4000),
}

Expand Down
1 change: 0 additions & 1 deletion src/query/service/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ backoff = { version = "0.4.0", features = ["futures", "tokio"] }
backon = "0.2.0"
base64 = "0.13.0"
bincode = "2.0.0-rc.1"
bit-vec = { version = "0.6.3", features = ["serde_std"] }
bumpalo = "3.11.0"
byteorder = "1.4.3"
bytes = "1.2.1"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ async fn do_purge_test(case_name: &str, operation: &str) -> Result<()> {
let ctx = fixture.ctx();
execute_command(ctx, &qry).await?;

// there should be only 1 snapshot, 1 segment, 1 block left, and 0 index left
// there should be only 1 snapshot, 1 segment, 1 block left, and 1 index left
check_data_dir(&fixture, case_name, 1, 1, 1, 1).await;
history_should_have_only_one_item(&fixture, case_name).await
}
Expand Down
7 changes: 1 addition & 6 deletions src/query/service/tests/it/storages/fuse/statistics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,8 @@
// limitations under the License.

use std::collections::HashMap;
use std::sync::Arc;

use common_base::base::tokio;
use common_catalog::table_context::TableContext;
use common_datablocks::DataBlock;
use common_datavalues::prelude::*;
use common_fuse_meta::meta::ClusterStatistics;
Expand Down Expand Up @@ -143,18 +141,15 @@ fn test_reduce_block_statistics_in_memory_size() -> common_exception::Result<()>
#[tokio::test]
async fn test_accumulator() -> common_exception::Result<()> {
let blocks = TestFixture::gen_sample_blocks(10, 1);
let fixture = TestFixture::new().await;
let ctx = fixture.ctx();
let mut stats_acc = StatisticsAccumulator::new();

let operator = Operator::new(opendal::services::memory::Builder::default().build()?);
let table_ctx: Arc<dyn TableContext> = ctx;
let loc_generator = TableMetaLocationGenerator::with_prefix("/".to_owned());

for item in blocks {
let block = item?;
let block_statistics = BlockStatistics::from(&block, "does_not_matter".to_owned(), None)?;
let block_writer = BlockWriter::new(&table_ctx, &operator, &loc_generator);
let block_writer = BlockWriter::new(&operator, &loc_generator);
let block_meta = block_writer.write(block, None).await?;
stats_acc.add_with_block_meta(block_meta, block_statistics)?;
}
Expand Down
17 changes: 10 additions & 7 deletions src/query/service/tests/it/storages/fuse/table_test_fixture.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ use common_pipeline_core::SourcePipeBuilder;
use common_planner::plans::CreateDatabasePlan;
use common_storage::StorageFsConfig;
use common_storage::StorageParams;
use common_storages_fuse::FUSE_TBL_XOR_BLOOM_INDEX_PREFIX;
use common_streams::SendableDataBlockStream;
use databend_query::interpreters::append2table;
use databend_query::interpreters::execute_pipeline;
Expand All @@ -44,7 +45,6 @@ use databend_query::sql::Planner;
use databend_query::sql::OPT_KEY_DATABASE_ID;
use databend_query::storages::fuse::table_functions::ClusteringInformationTable;
use databend_query::storages::fuse::table_functions::FuseSnapshotTable;
use databend_query::storages::fuse::FUSE_TBL_BLOCK_INDEX_PREFIX;
use databend_query::storages::fuse::FUSE_TBL_BLOCK_PREFIX;
use databend_query::storages::fuse::FUSE_TBL_SEGMENT_PREFIX;
use databend_query::storages::fuse::FUSE_TBL_SNAPSHOT_PREFIX;
Expand Down Expand Up @@ -454,18 +454,21 @@ pub async fn check_data_dir(
let prefix_snapshot = FUSE_TBL_SNAPSHOT_PREFIX;
let prefix_segment = FUSE_TBL_SEGMENT_PREFIX;
let prefix_block = FUSE_TBL_BLOCK_PREFIX;
let prefix_index = FUSE_TBL_BLOCK_INDEX_PREFIX;
let prefix_index = FUSE_TBL_XOR_BLOOM_INDEX_PREFIX;
for entry in WalkDir::new(root) {
let entry = entry.unwrap();
if entry.file_type().is_file() {
// here, by checking if "contains" the prefix is enough
if entry.path().to_str().unwrap().contains(prefix_snapshot) {
let (_, entry_path) = entry.path().to_str().unwrap().split_at(root.len());
// trim the leading prefix, e.g. "/db_id/table_id/"
let path = entry_path.split('/').skip(3).collect::<Vec<_>>();
let path = path[0];
if path.starts_with(prefix_snapshot) {
ss_count += 1;
} else if entry.path().to_str().unwrap().contains(prefix_segment) {
} else if path.starts_with(prefix_segment) {
sg_count += 1;
} else if entry.path().to_str().unwrap().contains(prefix_block) {
} else if path.starts_with(prefix_block) {
b_count += 1;
} else if entry.path().to_str().unwrap().contains(prefix_index) {
} else if path.starts_with(prefix_index) {
i_count += 1;
}
}
Expand Down
Loading