Skip to content

Commit

Permalink
Removing conflicting crates (#1)
Browse files Browse the repository at this point in the history
* remove ztsd

* remove lz4

* remove gz and fix others

* remove filetime/tar

* remove perf from runtime

* fix accountsdb const

* comment out solana perf

* remove symlink

* update cargo lock

* remove flate2 again

* retrun bz instead of hz

* try brackets

* revert

return removed and comment
  • Loading branch information
DudessaPr committed Jul 10, 2024
1 parent 279da70 commit fdbfe54
Show file tree
Hide file tree
Showing 24 changed files with 737 additions and 727 deletions.
20 changes: 0 additions & 20 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion account-decoder/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ spl-token-2022 = { workspace = true, features = ["no-entrypoint"] }
spl-token-group-interface = { workspace = true }
spl-token-metadata-interface = { workspace = true }
thiserror = { workspace = true }
zstd = { workspace = true }
# zstd = { workspace = true }

[dev-dependencies]
assert_matches = { workspace = true }
Expand Down
54 changes: 27 additions & 27 deletions account-decoder/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ use {
pubkey::Pubkey,
},
std::{
io::{Read, Write},
// io::{Read, Write},
str::FromStr,
},
};
Expand Down Expand Up @@ -65,15 +65,15 @@ impl UiAccountData {
UiAccountData::Binary(blob, encoding) => match encoding {
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
UiAccountEncoding::Base64 => BASE64_STANDARD.decode(blob).ok(),
UiAccountEncoding::Base64Zstd => {
BASE64_STANDARD.decode(blob).ok().and_then(|zstd_data| {
let mut data = vec![];
zstd::stream::read::Decoder::new(zstd_data.as_slice())
.and_then(|mut reader| reader.read_to_end(&mut data))
.map(|_| data)
.ok()
})
}
// UiAccountEncoding::Base64Zstd => {
// BASE64_STANDARD.decode(blob).ok().and_then(|zstd_data| {
// let mut data = vec![];
// zstd::stream::read::Decoder::new(zstd_data.as_slice())
// .and_then(|mut reader| reader.read_to_end(&mut data))
// .map(|_| data)
// .ok()
// })
// }
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
},
}
Expand All @@ -87,8 +87,8 @@ pub enum UiAccountEncoding {
Base58,
Base64,
JsonParsed,
#[serde(rename = "base64+zstd")]
Base64Zstd,
// #[serde(rename = "base64+zstd")]
// Base64Zstd, // Unsupproted by svm-rollup
}

impl UiAccount {
Expand Down Expand Up @@ -125,21 +125,21 @@ impl UiAccount {
BASE64_STANDARD.encode(slice_data(account.data(), data_slice_config)),
encoding,
),
UiAccountEncoding::Base64Zstd => {
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
match encoder
.write_all(slice_data(account.data(), data_slice_config))
.and_then(|()| encoder.finish())
{
Ok(zstd_data) => {
UiAccountData::Binary(BASE64_STANDARD.encode(zstd_data), encoding)
}
Err(_) => UiAccountData::Binary(
BASE64_STANDARD.encode(slice_data(account.data(), data_slice_config)),
UiAccountEncoding::Base64,
),
}
}
// UiAccountEncoding::Base64Zstd => {
// let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
// match encoder
// .write_all(slice_data(account.data(), data_slice_config))
// .and_then(|()| encoder.finish())
// {
// Ok(zstd_data) => {
// UiAccountData::Binary(BASE64_STANDARD.encode(zstd_data), encoding)
// }
// Err(_) => UiAccountData::Binary(
// BASE64_STANDARD.encode(slice_data(account.data(), data_slice_config)),
// UiAccountEncoding::Base64,
// ),
// }
// }
UiAccountEncoding::JsonParsed => {
if let Ok(parsed_data) =
parse_account_data_v2(pubkey, account.owner(), account.data(), additional_data)
Expand Down
6 changes: 3 additions & 3 deletions accounts-db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ bincode = { workspace = true }
blake3 = { workspace = true }
bv = { workspace = true, features = ["serde"] }
bytemuck = { workspace = true }
bzip2 = { workspace = true }
# bzip2 = { workspace = true }
crossbeam-channel = { workspace = true }
dashmap = { workspace = true, features = ["rayon", "raw-api"] }
index_list = { workspace = true }
indexmap = { workspace = true }
itertools = { workspace = true }
lazy_static = { workspace = true }
log = { workspace = true }
lz4 = { workspace = true }
# lz4 = { workspace = true }
memmap2 = { workspace = true }
modular-bitfield = { workspace = true }
num_cpus = { workspace = true }
Expand All @@ -47,7 +47,7 @@ solana-stake-program = { workspace = true, optional = true }
solana-svm = { workspace = true }
solana-vote-program = { workspace = true, optional = true }
static_assertions = { workspace = true }
tar = { workspace = true }
# tar = { workspace = true }
tempfile = { workspace = true }
thiserror = { workspace = true }

Expand Down
1 change: 1 addition & 0 deletions accounts-db/src/accounts_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ impl AccountsCache {
is_frozen: AtomicBool::default(),
})
}
#[allow(dead_code)]
fn unique_account_writes_size(&self) -> u64 {
self.cache
.iter()
Expand Down
28 changes: 16 additions & 12 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ mod geyser_plugin_utils;

#[cfg(feature = "dev-context-only-utils")]
use qualifier_attr::qualifiers;
#[allow(unused_imports)]
use {
crate::{
account_info::{AccountInfo, StorageLocation},
Expand Down Expand Up @@ -666,6 +667,7 @@ struct SlotIndexGenerationInfo {
}

#[derive(Default, Debug)]
#[allow(dead_code)]
struct GenerateIndexTimings {
pub total_time_us: u64,
pub index_time: u64,
Expand Down Expand Up @@ -696,7 +698,7 @@ struct StorageSizeAndCount {
type StorageSizeAndCountMap = DashMap<AccountsFileId, StorageSizeAndCount>;

impl GenerateIndexTimings {
pub fn report(&self, startup_stats: &StartupStats) {
pub fn report(&self, _startup_stats: &StartupStats) {
// datapoint_info!(
// "generate_index",
// ("overall_us", self.total_time_us, i64),
Expand Down Expand Up @@ -1503,6 +1505,7 @@ pub struct AccountsDb {
}

#[derive(Debug, Default)]
#[allow(dead_code)]
pub struct AccountsStats {
delta_hash_scan_time_total_us: AtomicU64,
delta_hash_accumulate_time_total_us: AtomicU64,
Expand Down Expand Up @@ -1547,7 +1550,7 @@ pub struct PurgeStats {
}

impl PurgeStats {
fn report(&self, metric_name: &'static str, report_interval_ms: Option<u64>) {
fn report(&self, _metric_name: &'static str, report_interval_ms: Option<u64>) {
let should_report = report_interval_ms
.map(|report_interval_ms| self.last_report.should_update(report_interval_ms))
.unwrap_or(true);
Expand Down Expand Up @@ -3258,7 +3261,7 @@ impl AccountsDb {
}
sort.stop();

let total_keys_count = pubkeys.len();
let _total_keys_count = pubkeys.len();
let mut accounts_scan = Measure::start("accounts_scan");
let uncleaned_roots = self.accounts_index.clone_uncleaned_roots();
let found_not_zero_accum = AtomicU64::new(0);
Expand Down Expand Up @@ -4293,6 +4296,7 @@ impl AccountsDb {
/// first tuple element: the filtered-down candidates and
/// second duple element: the candidates which
/// are skipped in this round and might be eligible for the future shrink.
#[allow(unused_variables)]
fn select_candidates_by_total_usage(
&self,
shrink_slots: &ShrinkCandidates,
Expand Down Expand Up @@ -4825,7 +4829,7 @@ impl AccountsDb {

let mut measure_shrink_all_candidates = Measure::start("shrink_all_candidate_slots-ms");
let num_candidates = shrink_slots.len();
let shrink_candidates_count = shrink_slots.len();
let _shrink_candidates_count = shrink_slots.len();
self.thread_pool_clean.install(|| {
shrink_slots
.into_par_iter()
Expand Down Expand Up @@ -5399,7 +5403,7 @@ impl AccountsDb {
// The latest version of the account existed in the index, but could not be
// fetched from storage. This means a race occurred between this function and clean
// accounts/purge_slots
let message = format!(
let _message = format!(
"do_load() failed to get key: {pubkey} from storage, latest attempt was for \
slot: {slot}, storage_location: {storage_location:?}, load_hint: {load_hint:?}",
);
Expand Down Expand Up @@ -6307,7 +6311,7 @@ impl AccountsDb {
// Note even if force_flush is false, we will still flush all roots <= the
// given `requested_flush_root`, even if some of the later roots cannot be used for
// cleaning due to an ongoing scan
let (total_new_cleaned_roots, num_cleaned_roots_flushed, mut flush_stats) = self
let (_total_new_cleaned_roots, _num_cleaned_roots_flushed, mut flush_stats) = self
.flush_rooted_accounts_cache(
requested_flush_root,
Some((&mut account_bytes_saved, &mut num_accounts_saved)),
Expand All @@ -6319,7 +6323,7 @@ impl AccountsDb {
// banks

// If 'should_aggressively_flush_cache', then flush the excess ones to storage
let (total_new_excess_roots, num_excess_roots_flushed, flush_stats_aggressively) =
let (_total_new_excess_roots, _num_excess_roots_flushed, flush_stats_aggressively) =
if self.should_aggressively_flush_cache() {
// Start by flushing the roots
//
Expand All @@ -6332,12 +6336,12 @@ impl AccountsDb {
};
flush_stats.accumulate(&flush_stats_aggressively);

let mut excess_slot_count = 0;
let mut _excess_slot_count = 0;
let mut unflushable_unrooted_slot_count = 0;
let max_flushed_root = self.accounts_cache.fetch_max_flush_root();
if self.should_aggressively_flush_cache() {
let old_slots = self.accounts_cache.cached_frozen_slots();
excess_slot_count = old_slots.len();
_excess_slot_count = old_slots.len();
let mut flush_stats = FlushStats::default();
old_slots.into_iter().for_each(|old_slot| {
// Don't flush slots that are known to be unrooted
Expand Down Expand Up @@ -6721,7 +6725,7 @@ impl AccountsDb {
"total_stores: {total_count}, newest_slot: {newest_slot}, oldest_slot: {oldest_slot}"
);

let total_alive_ratio = if total_bytes > 0 {
let _total_alive_ratio = if total_bytes > 0 {
total_alive_bytes as f64 / total_bytes as f64
} else {
0.
Expand Down Expand Up @@ -6855,7 +6859,7 @@ impl AccountsDb {
let total_lamports = *total_lamports.lock().unwrap();

let mut hash_time = Measure::start("hash");
let (accumulated_hash, hash_total) = AccountsHasher::calculate_hash(account_hashes);
let (accumulated_hash, _hash_total) = AccountsHasher::calculate_hash(account_hashes);
hash_time.stop();

// datapoint_info!(
Expand Down Expand Up @@ -8414,7 +8418,7 @@ impl AccountsDb {

fn report_store_timings(&self) {
if self.stats.last_store_report.should_update(1000) {
let read_cache_stats = self.read_only_accounts_cache.get_and_reset_stats();
let _read_cache_stats = self.read_only_accounts_cache.get_and_reset_stats();
// datapoint_info!(
// "accounts_db_store_timings",
// (
Expand Down
1 change: 1 addition & 0 deletions accounts-db/src/accounts_index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -991,6 +991,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> AccountsIndex<T, U> {
// Scan accounts and return latest version of each account that is either:
// 1) rooted or
// 2) present in ancestors
#[allow(unused_variables)]
fn do_scan_accounts<F, R>(
&self,
metric_name: &'static str,
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/active_stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ impl ActiveStats {
ActiveStatItem::HashMerkleTree => &self.hash_merkle,
ActiveStatItem::HashScan => &self.hash_scan,
};
let value = modify_stat(stat);
let _value = modify_stat(stat);
// match item {
// ActiveStatItem::Clean => datapoint_info!("accounts_db_active", ("clean", value, i64)),
// ActiveStatItem::SquashAncient => {
Expand Down
7 changes: 4 additions & 3 deletions accounts-db/src/bucket_map_holder_stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ impl BucketMapHolderStats {
)
}
}

#[allow(dead_code)]
fn calc_percent(ms: u64, elapsed_ms: u64) -> f32 {
if elapsed_ms == 0 {
0.0
Expand Down Expand Up @@ -185,7 +185,8 @@ impl BucketMapHolderStats {
+ self.held_in_mem.ref_count.load(Ordering::Relaxed);
in_mem.saturating_sub(held_in_mem) as usize
}

#[allow(unused_variables)]
#[allow(unused_mut)]
pub fn report_stats<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>>(
&self,
storage: &BucketMapHolder<T, U>,
Expand Down Expand Up @@ -216,7 +217,7 @@ impl BucketMapHolderStats {
.unwrap_or_default();
let in_mem_stats = Self::get_stats(in_mem_per_bucket_counts);
let disk_stats = Self::get_stats(disk_per_bucket_counts);

#[allow(dead_code)]
const US_PER_MS: u64 = 1_000;

// all metrics during startup are written to a different data point
Expand Down
1 change: 1 addition & 0 deletions accounts-db/src/cache_hash_data_stats.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
//! Cached data for hashing accounts
#[allow(unused_imports)]
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};

#[derive(Default, Debug)]
Expand Down
Loading

0 comments on commit fdbfe54

Please sign in to comment.