From 732e7bfc5c6c0e53daf96eea31b72857d3fa2d84 Mon Sep 17 00:00:00 2001 From: Max Tropets Date: Wed, 26 Jun 2024 12:56:27 +0000 Subject: [PATCH 1/9] Historical cache auto-shrink --- .daily_canary | 9 +- doc/build_apps/api.rst | 2 +- include/ccf/historical_queries_interface.h | 9 + samples/apps/logging/logging.cpp | 9 + src/node/historical_queries.h | 186 +++++++++++++++++++-- src/node/rpc/test/node_stub.h | 2 + src/node/test/historical_queries.cpp | 76 +++++++++ 7 files changed, 278 insertions(+), 15 deletions(-) diff --git a/.daily_canary b/.daily_canary index a02a115d02e2..a05873eea162 100644 --- a/.daily_canary +++ b/.daily_canary @@ -3,4 +3,11 @@ ( V ) / . \ | +---=---' /--x-m- /--n-n---xXx--/--yY------>>>----<<<>>]]{{}}---||-/\---.. 2024__ -!..! \ No newline at end of file +!..! + + /""\ , + <<>^ L____/| + `) /` , / + \ `---' / + `'";\)` + _/_Y \ No newline at end of file diff --git a/doc/build_apps/api.rst b/doc/build_apps/api.rst index 3383e92df673..2fd3bd237d9f 100644 --- a/doc/build_apps/api.rst +++ b/doc/build_apps/api.rst @@ -125,7 +125,7 @@ Historical Queries .. doxygenclass:: ccf::historical::AbstractStateCache :project: CCF - :members: set_default_expiry_duration, get_state_at, get_store_at, get_store_range, drop_cached_states + :members: set_default_expiry_duration, set_soft_cache_limit, get_state_at, get_store_at, get_store_range, drop_cached_states .. doxygenstruct:: ccf::historical::State :project: CCF diff --git a/include/ccf/historical_queries_interface.h b/include/ccf/historical_queries_interface.h index 32a2a4e8d91e..482694588a7a 100644 --- a/include/ccf/historical_queries_interface.h +++ b/include/ccf/historical_queries_interface.h @@ -49,6 +49,8 @@ namespace ccf::historical using ExpiryDuration = std::chrono::seconds; + using CacheSize = size_t; + /** Stores the progress of historical query requests. * * A request will generally need to be made multiple times (with the same @@ -79,6 +81,13 @@ namespace ccf::historical virtual void set_default_expiry_duration( ExpiryDuration seconds_until_expiry) = 0; + /** Set the cache limit (in bytes) to evict least recently used requests + * from the cache after it's size grows beyond this limit. The limit is not + * strict. It is estimated based on serialized states' sizes approximation + * and is checked once per tick, and so it can overflow for a short time. + */ + virtual void set_soft_cache_limit(CacheSize cache_limit) = 0; + /** EXPERIMENTAL: Set the tracking of deletes on missing keys for historical * queries. * diff --git a/samples/apps/logging/logging.cpp b/samples/apps/logging/logging.cpp index d11a1cf490e0..0057c167f7ec 100644 --- a/samples/apps/logging/logging.cpp +++ b/samples/apps/logging/logging.cpp @@ -460,6 +460,15 @@ namespace loggingapp PUBLIC_RECORDS, context, 10000, 20); context.get_indexing_strategies().install_strategy(index_per_public_key); + // According to manual obvervation it's enough to start evicting old + // requests on historical perf test, but not to small to get stuck because + // of a single request being larget than the cache. + // + // A much better approach would be to create a new endpoint for + // configuring the limit online, but I won't do it now. + constexpr size_t cache_limit = 1024 * 1024 * 10; // MB + context.get_historical_state().set_soft_cache_limit(cache_limit); + const ccf::AuthnPolicies auth_policies = { ccf::jwt_auth_policy, ccf::user_cert_auth_policy, diff --git a/src/node/historical_queries.h b/src/node/historical_queries.h index bfc30b5be321..452766601b97 100644 --- a/src/node/historical_queries.h +++ b/src/node/historical_queries.h @@ -64,6 +64,7 @@ FMT_END_NAMESPACE namespace ccf::historical { static constexpr auto slow_fetch_threshold = std::chrono::milliseconds(1000); + static constexpr size_t soft_to_raw_ratio{5}; static std::optional get_signature( const kv::StorePtr& sig_store) @@ -95,10 +96,9 @@ namespace ccf::historical // whether to keep all the writes so that we can build a diff later bool track_deletes_on_missing_keys_v = false; - enum class RequestStage + enum class StoreStage { Fetching, - Untrusted, Trusted, }; @@ -132,7 +132,7 @@ namespace ccf::historical struct StoreDetails { std::chrono::milliseconds time_until_fetch = {}; - RequestStage current_stage = RequestStage::Fetching; + StoreStage current_stage = StoreStage::Fetching; ccf::crypto::Sha256Hash entry_digest = {}; ccf::ClaimsDigest claims_digest = {}; kv::StorePtr store = nullptr; @@ -234,11 +234,13 @@ namespace ccf::historical return {}; } - void adjust_ranges( + std::pair, std::vector> adjust_ranges( const SeqNoCollection& new_seqnos, bool should_include_receipts, SeqNo earliest_ledger_secret_seqno) { + std::vector removed{}, probably_added{}; + bool any_diff = false; // If a seqno is earlier than the earliest known ledger secret, we will @@ -266,6 +268,7 @@ namespace ccf::historical { // No longer looking for a seqno which was previously requested. // Remove it from my_stores + removed.push_back(prev_it->first); prev_it = my_stores.erase(prev_it); any_diff |= true; } @@ -279,6 +282,7 @@ namespace ccf::historical { // If this is too early for known secrets, just record that it // was requested but don't add it to all_stores yet + probably_added.push_back(*new_it); prev_it = my_stores.insert_or_assign(prev_it, *new_it, nullptr); any_too_early = true; } @@ -293,6 +297,7 @@ namespace ccf::historical details = std::make_shared(); all_stores.insert_or_assign(all_it, *new_it, details); } + probably_added.push_back(*new_it); prev_it = my_stores.insert_or_assign(prev_it, *new_it, details); } any_diff |= true; @@ -311,7 +316,7 @@ namespace ccf::historical if (!any_diff && (should_include_receipts == include_receipts)) { HISTORICAL_LOG("Identical to previous request"); - return; + return {removed, probably_added}; } include_receipts = should_include_receipts; @@ -331,6 +336,7 @@ namespace ccf::historical populate_receipts(seqno); } } + return {removed, probably_added}; } void populate_receipts(ccf::SeqNo new_seqno) @@ -493,6 +499,135 @@ namespace ccf::historical ExpiryDuration default_expiry_duration = std::chrono::seconds(1800); + // These two combine into an effective O(1) lookup/add/remove by handle. + std::list lru_requests; + std::map::iterator> lru_lookup; + + // To maintain the estimated size consumed by all requests. Gets updated + // when ledger entries are fetched, and when requests are dropped. + std::unordered_map> store_to_requests; + std::unordered_map raw_store_sizes{}; + + CacheSize soft_store_cache_limit{1ll * 1024 * 1024 * 512 /*512 MB*/}; + CacheSize soft_store_cache_limit_raw = + soft_store_cache_limit / soft_to_raw_ratio; + CacheSize estimated_store_cache_size{0}; + + void add_request_ref(SeqNo seq, CompoundHandle handle) + { + auto it = store_to_requests.find(seq); + + if (it == store_to_requests.end()) + { + store_to_requests.insert({seq, {handle}}); + auto size = raw_store_sizes.find(seq); + if (size != raw_store_sizes.end()) + { + estimated_store_cache_size += size->second; + } + } + else + { + it->second.insert(handle); + } + } + + void add_request_refs(CompoundHandle handle) + { + for (const auto& [seq, _] : requests.at(handle).my_stores) + { + add_request_ref(seq, handle); + } + } + + void remove_request_ref(SeqNo seq, CompoundHandle handle) + { + auto it = store_to_requests.find(seq); + assert(it != store_to_requests.end()); + + it->second.erase(handle); + if (it->second.empty()) + { + store_to_requests.erase(it); + auto size = raw_store_sizes.find(seq); + if (size != raw_store_sizes.end()) + { + estimated_store_cache_size -= size->second; + raw_store_sizes.erase(size); + } + } + } + + void remove_request_refs(CompoundHandle handle) + { + for (const auto& [seq, _] : requests.at(handle).my_stores) + { + remove_request_ref(seq, handle); + } + } + + void lru_promote(CompoundHandle handle) + { + auto it = lru_lookup.find(handle); + if (it != lru_lookup.end()) + { + lru_requests.erase(it->second); + it->second = lru_requests.insert(lru_requests.begin(), handle); + } + else + { + lru_lookup[handle] = lru_requests.insert(lru_requests.begin(), handle); + add_request_refs(handle); + } + } + + void lru_shrink_to_fit(size_t threshold) + { + while (estimated_store_cache_size > threshold) + { + if (lru_requests.empty()) + { + LOG_FAIL_FMT( + "LRU shrink to {} requested but cache is already empty", threshold); + return; + } + + const auto handle = lru_requests.back(); + LOG_DEBUG_FMT( + "Cache size shrinking (reached {} / {}). Dropping {}", + estimated_store_cache_size, + threshold, + handle); + + remove_request_refs(handle); + lru_lookup.erase(handle); + + requests.erase(handle); + lru_requests.pop_back(); + } + } + + void lru_evict(CompoundHandle handle) + { + auto it = lru_lookup.find(handle); + if (it != lru_lookup.end()) + { + remove_request_refs(handle); + lru_requests.erase(it->second); + lru_lookup.erase(it); + } + } + + void update_store_raw_size(SeqNo seq, size_t new_size) + { + auto& stored_size = raw_store_sizes[seq]; + assert(!stored_size || stored_size == new_size); + + estimated_store_cache_size -= stored_size; + estimated_store_cache_size += new_size; + stored_size = new_size; + } + void fetch_entry_at(ccf::SeqNo seqno) { fetch_entries_range(seqno, seqno); @@ -577,7 +712,7 @@ namespace ccf::historical { // Deserialisation includes a GCM integrity check, so all entries // have been verified by the time we get here. - details->current_stage = RequestStage::Trusted; + details->current_stage = StoreStage::Trusted; details->has_commit_evidence = has_commit_evidence; details->entry_digest = entry_digest; @@ -760,6 +895,8 @@ namespace ccf::historical HISTORICAL_LOG("First time I've seen handle {}", handle); } + lru_promote(handle); + Request& request = it->second; update_earliest_known_ledger_secret(); @@ -772,9 +909,18 @@ namespace ccf::historical seqnos.size(), *seqnos.begin(), include_receipts); - request.adjust_ranges( + auto [removed, probably_added] = request.adjust_ranges( seqnos, include_receipts, earliest_secret_.valid_from); + for (auto seq : removed) + { + remove_request_ref(seq, handle); + } + for (auto seq : probably_added) + { + add_request_ref(seq, handle); + } + // If the earliest target entry cannot be deserialised with the earliest // known ledger secret, record the target seqno and begin fetching the // previous historical ledger secret. @@ -789,10 +935,9 @@ namespace ccf::historical for (auto seqno : seqnos) { auto target_details = request.get_store_details(seqno); - if ( target_details != nullptr && - target_details->current_stage == RequestStage::Trusted && + target_details->current_stage == StoreStage::Trusted && (!request.include_receipts || target_details->receipt != nullptr)) { // Have this store, associated txid and receipt and trust it - add @@ -823,6 +968,7 @@ namespace ccf::historical { if (request_it->second.get_store_details(seqno) != nullptr) { + lru_evict(request_it->first); request_it = requests.erase(request_it); } else @@ -977,6 +1123,12 @@ namespace ccf::historical default_expiry_duration = duration; } + void set_soft_cache_limit(CacheSize cache_limit) + { + soft_store_cache_limit = cache_limit; + soft_store_cache_limit_raw = soft_store_cache_limit / soft_to_raw_ratio; + } + void track_deletes_on_missing_keys(bool track) { track_deletes_on_missing_keys_v = track; @@ -985,8 +1137,8 @@ namespace ccf::historical bool drop_cached_states(const CompoundHandle& handle) { std::lock_guard guard(requests_lock); + lru_evict(handle); const auto erased_count = requests.erase(handle); - HISTORICAL_LOG("Dropping historical request {}", handle); return erased_count > 0; } @@ -1000,8 +1152,7 @@ namespace ccf::historical std::lock_guard guard(requests_lock); const auto it = all_stores.find(seqno); auto details = it == all_stores.end() ? nullptr : it->second.lock(); - if ( - details == nullptr || details->current_stage != RequestStage::Fetching) + if (details == nullptr || details->current_stage != StoreStage::Fetching) { // Unexpected entry, we already have it or weren't asking for it - // ignore this resubmission @@ -1094,6 +1245,7 @@ namespace ccf::historical std::move(claims_digest), has_commit_evidence); + update_store_raw_size(seqno, size); return true; } @@ -1245,6 +1397,7 @@ namespace ccf::historical { LOG_DEBUG_FMT( "Dropping expired historical query with handle {}", it->first); + lru_evict(it->first); it = requests.erase(it); } else @@ -1255,6 +1408,8 @@ namespace ccf::historical } } + lru_shrink_to_fit(soft_store_cache_limit_raw); + { auto it = all_stores.begin(); std::optional> range_to_request = @@ -1268,7 +1423,7 @@ namespace ccf::historical } else { - if (details->current_stage == RequestStage::Fetching) + if (details->current_stage == StoreStage::Fetching) { details->time_until_fetch -= elapsed_ms; if (details->time_until_fetch.count() <= 0) @@ -1434,6 +1589,11 @@ namespace ccf::historical StateCacheImpl::set_default_expiry_duration(duration); } + void set_soft_cache_limit(CacheSize cache_limit) override + { + StateCacheImpl::set_soft_cache_limit(cache_limit); + } + void track_deletes_on_missing_keys(bool track) override { StateCacheImpl::track_deletes_on_missing_keys(track); diff --git a/src/node/rpc/test/node_stub.h b/src/node/rpc/test/node_stub.h index e96efb672750..4168545c7dc7 100644 --- a/src/node/rpc/test/node_stub.h +++ b/src/node/rpc/test/node_stub.h @@ -161,6 +161,8 @@ namespace ccf historical::ExpiryDuration seconds_until_expiry) {} + void set_soft_cache_limit(historical::CacheSize cache_limit){}; + void track_deletes_on_missing_keys(bool track) {} kv::ReadOnlyStorePtr get_store_at( diff --git a/src/node/test/historical_queries.cpp b/src/node/test/historical_queries.cpp index cfd1dd3fceb6..f16b6069537b 100644 --- a/src/node/test/historical_queries.cpp +++ b/src/node/test/historical_queries.cpp @@ -948,6 +948,82 @@ TEST_CASE("Incremental progress") } } +TEST_CASE("StateCache soft zero limit with increasing") +{ + // Try get two stores. Shouldn't be able to retrieve anything with 0 cache + // limit. After increasing to the size of first store only that one is + // available, but later overwritten by the second one, and finally all are + // evicted after setting again to 0. + // + // If you change this bare in mind that each attempt to get the store promotes + // the handle, and so requests eviction order changes, therefore this test in + // particular is quite fragile. + + auto state = create_and_init_state(); + auto& kv_store = *state.kv_store; + + kv::Version signature_transaction; + + { + signature_transaction = write_transactions_and_signature(kv_store, 1); + signature_transaction = write_transactions_and_signature(kv_store, 1); + REQUIRE(kv_store.current_version() == signature_transaction); + } + + auto ledger = construct_host_ledger(state.kv_store->get_consensus()); + REQUIRE(ledger.size() == signature_transaction); + + auto seq_high = signature_transaction; + auto seq_low = seq_high - 1; + + auto stub_writer = std::make_shared(); + ccf::historical::StateCache cache( + kv_store, state.ledger_secrets, stub_writer); + + cache.set_soft_cache_limit(0); + + REQUIRE(!cache.get_store_at(0, seq_low)); + cache.tick(std::chrono::milliseconds(100)); + + cache.handle_ledger_entry(seq_low, ledger.at(seq_low)); + cache.tick(std::chrono::milliseconds(100)); + + // Ledger entry fecthed and instantly removed because of the parent request + // eviction due to zero cache limit. + REQUIRE(!cache.get_store_at(0, seq_low)); + + const size_t limit_for_one_entry_only = + (ledger.at(seq_low).size() + ledger.at(seq_high).size()) * + ccf::historical::soft_to_raw_ratio - + 1; + cache.set_soft_cache_limit(limit_for_one_entry_only); + + cache.handle_ledger_entry(5, ledger.at(seq_low)); + cache.tick(std::chrono::milliseconds(100)); + + REQUIRE(cache.get_store_at(0, seq_low)); + REQUIRE(!cache.get_store_at(1, seq_high)); + + cache.tick(std::chrono::milliseconds(100)); + + cache.handle_ledger_entry(seq_high, ledger.at(seq_high)); + + // Both available because tick hasn't been called yet. + REQUIRE(cache.get_store_at(0, seq_low)); + REQUIRE(cache.get_store_at(1, seq_high)); + + cache.tick(std::chrono::milliseconds(100)); + + REQUIRE(!cache.get_store_at(0, seq_low)); + REQUIRE(cache.get_store_at(1, seq_high)); + + cache.set_soft_cache_limit(0); + cache.tick(std::chrono::milliseconds(100)); + + REQUIRE(!cache.get_store_at(0, seq_low)); + REQUIRE(!cache.get_store_at(1, seq_high)); +} + TEST_CASE("StateCache sparse queries") { auto state = create_and_init_state(); From 2876204ea0c536980b941517544a626edb5fa8a9 Mon Sep 17 00:00:00 2001 From: Max Tropets Date: Wed, 26 Jun 2024 20:34:21 +0000 Subject: [PATCH 2/9] Better tests --- .daily_canary | 2 +- src/node/test/historical_queries.cpp | 129 ++++++++++++++++++++------- 2 files changed, 98 insertions(+), 33 deletions(-) diff --git a/.daily_canary b/.daily_canary index a05873eea162..06781fc17c93 100644 --- a/.daily_canary +++ b/.daily_canary @@ -10,4 +10,4 @@ `) /` , / \ `---' / `'";\)` - _/_Y \ No newline at end of file + _/_/ \ No newline at end of file diff --git a/src/node/test/historical_queries.cpp b/src/node/test/historical_queries.cpp index f16b6069537b..ee675a0e43c2 100644 --- a/src/node/test/historical_queries.cpp +++ b/src/node/test/historical_queries.cpp @@ -236,6 +236,19 @@ std::map> construct_host_ledger( return ledger; } +size_t get_cache_limit_for_entries( + const std::vector>& entries) +{ + return ccf::historical::soft_to_raw_ratio * + std::accumulate( + entries.begin(), + entries.end(), + 0ll, + [&](size_t prev, const std::vector& entry) { + return prev + entry.size(); + }); +} + TEST_CASE("StateCache point queries") { auto state = create_and_init_state(); @@ -950,31 +963,24 @@ TEST_CASE("Incremental progress") TEST_CASE("StateCache soft zero limit with increasing") { - // Try get two stores. Shouldn't be able to retrieve anything with 0 cache - // limit. After increasing to the size of first store only that one is + // Try get two states. Shouldn't be able to retrieve anything with 0 cache + // limit. After increasing to the size of first state only that one is // available, but later overwritten by the second one, and finally all are // evicted after setting again to 0. // - // If you change this bare in mind that each attempt to get the store promotes - // the handle, and so requests eviction order changes, therefore this test in - // particular is quite fragile. + // ! DISCLAIMER ! If you change this bare in mind that each attempt to get the + // store promotes the handle, and so requests eviction order changes, + // therefore this test in particular is quite fragile. auto state = create_and_init_state(); auto& kv_store = *state.kv_store; - kv::Version signature_transaction; - - { - signature_transaction = write_transactions_and_signature(kv_store, 1); - signature_transaction = write_transactions_and_signature(kv_store, 1); - REQUIRE(kv_store.current_version() == signature_transaction); - } + auto seq_low = write_transactions_and_signature(kv_store, 1); + auto seq_high = write_transactions_and_signature(kv_store, 1); + REQUIRE(kv_store.current_version() == seq_high); auto ledger = construct_host_ledger(state.kv_store->get_consensus()); - REQUIRE(ledger.size() == signature_transaction); - - auto seq_high = signature_transaction; - auto seq_low = seq_high - 1; + REQUIRE(ledger.size() == seq_high); auto stub_writer = std::make_shared(); ccf::historical::StateCache cache( @@ -982,7 +988,7 @@ TEST_CASE("StateCache soft zero limit with increasing") cache.set_soft_cache_limit(0); - REQUIRE(!cache.get_store_at(0, seq_low)); + REQUIRE(!cache.get_state_at(0, seq_low)); cache.tick(std::chrono::milliseconds(100)); cache.handle_ledger_entry(seq_low, ledger.at(seq_low)); @@ -990,38 +996,97 @@ TEST_CASE("StateCache soft zero limit with increasing") // Ledger entry fecthed and instantly removed because of the parent request // eviction due to zero cache limit. - REQUIRE(!cache.get_store_at(0, seq_low)); + REQUIRE(!cache.get_state_at(0, seq_low)); - const size_t limit_for_one_entry_only = - (ledger.at(seq_low).size() + ledger.at(seq_high).size()) * - ccf::historical::soft_to_raw_ratio - - 1; - cache.set_soft_cache_limit(limit_for_one_entry_only); + cache.set_soft_cache_limit( + get_cache_limit_for_entries({ledger.at(seq_low), ledger.at(seq_high)}) - 1); - cache.handle_ledger_entry(5, ledger.at(seq_low)); + cache.handle_ledger_entry(seq_low, ledger.at(seq_low)); cache.tick(std::chrono::milliseconds(100)); - REQUIRE(cache.get_store_at(0, seq_low)); - REQUIRE(!cache.get_store_at(1, seq_high)); + REQUIRE(cache.get_state_at(0, seq_low)); + REQUIRE(!cache.get_state_at(1, seq_high)); cache.tick(std::chrono::milliseconds(100)); cache.handle_ledger_entry(seq_high, ledger.at(seq_high)); // Both available because tick hasn't been called yet. - REQUIRE(cache.get_store_at(0, seq_low)); - REQUIRE(cache.get_store_at(1, seq_high)); + REQUIRE(cache.get_state_at(0, seq_low)); + REQUIRE(cache.get_state_at(1, seq_high)); cache.tick(std::chrono::milliseconds(100)); - REQUIRE(!cache.get_store_at(0, seq_low)); - REQUIRE(cache.get_store_at(1, seq_high)); + REQUIRE(!cache.get_state_at(0, seq_low)); + REQUIRE(cache.get_state_at(1, seq_high)); cache.set_soft_cache_limit(0); cache.tick(std::chrono::milliseconds(100)); - REQUIRE(!cache.get_store_at(0, seq_low)); - REQUIRE(!cache.get_store_at(1, seq_high)); + REQUIRE(!cache.get_state_at(0, seq_low)); + REQUIRE(!cache.get_state_at(1, seq_high)); +} + +TEST_CASE("StateCache dropping state explicitly") +{ + // Adding two states to the cache (limit is hit). Drop state2 and add state3. + // State1 should remain available, as well as state3, state2 was force-evicted + // from LRU because it's been dropped explicitly. + // + // ! DISCLAIMER ! If you change this bare in mind that each attempt to get the + // store promotes the handle, and so requests eviction order changes, + // therefore this test in particular is quite fragile. + + auto state = create_and_init_state(); + auto& kv_store = *state.kv_store; + + auto seq_low = write_transactions_and_signature(kv_store, 1); + auto seq_mid = write_transactions_and_signature(kv_store, 1); + auto seq_high = write_transactions_and_signature(kv_store, 1); + REQUIRE(kv_store.current_version() == seq_high); + + auto ledger = construct_host_ledger(state.kv_store->get_consensus()); + REQUIRE(ledger.size() == seq_high); + + auto stub_writer = std::make_shared(); + ccf::historical::StateCache cache( + kv_store, state.ledger_secrets, stub_writer); + + REQUIRE(!cache.get_state_at(0, seq_low)); + REQUIRE(!cache.get_state_at(1, seq_mid)); + + cache.tick(std::chrono::milliseconds(100)); + + cache.handle_ledger_entry(seq_low, ledger.at(seq_low)); + cache.handle_ledger_entry(seq_mid, ledger.at(seq_mid)); + + REQUIRE(cache.get_state_at(0, seq_low)); + REQUIRE(cache.get_state_at(1, seq_mid)); + + cache.set_soft_cache_limit( + get_cache_limit_for_entries( + {ledger.at(seq_low), ledger.at(seq_mid), ledger.at(seq_high)}) - + 1); + + cache.tick(std::chrono::milliseconds(100)); + + REQUIRE(!cache.get_state_at(2, seq_high)); + cache.tick(std::chrono::milliseconds(100)); + + // Ledger entries not provided yet, so previous are not evicted. + REQUIRE(cache.get_state_at(0, seq_low)); + REQUIRE(cache.get_state_at(1, seq_mid)); + REQUIRE(!cache.get_state_at(2, seq_high)); + + cache.drop_cached_states(1); + cache.tick(std::chrono::milliseconds(100)); + + cache.handle_ledger_entry(seq_high, ledger.at(seq_high)); + cache.tick(std::chrono::milliseconds(100)); + + REQUIRE(cache.get_state_at(0, seq_low)); + REQUIRE(!cache.get_state_at(1, seq_mid)); + REQUIRE(cache.get_state_at(2, seq_high)); } TEST_CASE("StateCache sparse queries") From c576d2339e5aa57208edb201cfa967bb0c6c63af Mon Sep 17 00:00:00 2001 From: Max Date: Thu, 27 Jun 2024 16:54:10 +0100 Subject: [PATCH 3/9] Update include/ccf/historical_queries_interface.h Co-authored-by: Amaury Chamayou --- include/ccf/historical_queries_interface.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/ccf/historical_queries_interface.h b/include/ccf/historical_queries_interface.h index 482694588a7a..d09b09641c7a 100644 --- a/include/ccf/historical_queries_interface.h +++ b/include/ccf/historical_queries_interface.h @@ -82,7 +82,7 @@ namespace ccf::historical ExpiryDuration seconds_until_expiry) = 0; /** Set the cache limit (in bytes) to evict least recently used requests - * from the cache after it's size grows beyond this limit. The limit is not + * from the cache after its size grows beyond this limit. The limit is not * strict. It is estimated based on serialized states' sizes approximation * and is checked once per tick, and so it can overflow for a short time. */ From bc9011d7680f923ee783063cb9d7d5c13288aa29 Mon Sep 17 00:00:00 2001 From: Max Date: Thu, 27 Jun 2024 16:54:15 +0100 Subject: [PATCH 4/9] Update samples/apps/logging/logging.cpp Co-authored-by: Amaury Chamayou --- samples/apps/logging/logging.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/apps/logging/logging.cpp b/samples/apps/logging/logging.cpp index 0057c167f7ec..880ce9c54462 100644 --- a/samples/apps/logging/logging.cpp +++ b/samples/apps/logging/logging.cpp @@ -461,7 +461,7 @@ namespace loggingapp context.get_indexing_strategies().install_strategy(index_per_public_key); // According to manual obvervation it's enough to start evicting old - // requests on historical perf test, but not to small to get stuck because + // requests on historical perf test, but not too small to get stuck because // of a single request being larget than the cache. // // A much better approach would be to create a new endpoint for From 41b4de1019bab0580ccc4acff69055ce6fba11ee Mon Sep 17 00:00:00 2001 From: Max Date: Thu, 27 Jun 2024 16:54:23 +0100 Subject: [PATCH 5/9] Update src/node/test/historical_queries.cpp Co-authored-by: Amaury Chamayou --- src/node/test/historical_queries.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/node/test/historical_queries.cpp b/src/node/test/historical_queries.cpp index ee675a0e43c2..6c4838fa0dc2 100644 --- a/src/node/test/historical_queries.cpp +++ b/src/node/test/historical_queries.cpp @@ -968,7 +968,7 @@ TEST_CASE("StateCache soft zero limit with increasing") // available, but later overwritten by the second one, and finally all are // evicted after setting again to 0. // - // ! DISCLAIMER ! If you change this bare in mind that each attempt to get the + // ! DISCLAIMER ! If you change this bear in mind that each attempt to get the // store promotes the handle, and so requests eviction order changes, // therefore this test in particular is quite fragile. From 003c9348712cc6fdea799cda45e6c2eb5a4f6f82 Mon Sep 17 00:00:00 2001 From: Max Date: Thu, 27 Jun 2024 16:54:47 +0100 Subject: [PATCH 6/9] Update src/node/test/historical_queries.cpp Co-authored-by: Amaury Chamayou --- src/node/test/historical_queries.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/node/test/historical_queries.cpp b/src/node/test/historical_queries.cpp index 6c4838fa0dc2..d49240faef10 100644 --- a/src/node/test/historical_queries.cpp +++ b/src/node/test/historical_queries.cpp @@ -1033,7 +1033,7 @@ TEST_CASE("StateCache dropping state explicitly") // State1 should remain available, as well as state3, state2 was force-evicted // from LRU because it's been dropped explicitly. // - // ! DISCLAIMER ! If you change this bare in mind that each attempt to get the + // ! DISCLAIMER ! If you change this bear in mind that each attempt to get the // store promotes the handle, and so requests eviction order changes, // therefore this test in particular is quite fragile. From 83f186a6cebafc9853368f3c177b1d1d2a284889 Mon Sep 17 00:00:00 2001 From: Max Date: Thu, 27 Jun 2024 16:55:01 +0100 Subject: [PATCH 7/9] Update src/node/historical_queries.h Co-authored-by: Amaury Chamayou --- src/node/historical_queries.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/node/historical_queries.h b/src/node/historical_queries.h index 452766601b97..25ce2957bd78 100644 --- a/src/node/historical_queries.h +++ b/src/node/historical_queries.h @@ -499,7 +499,7 @@ namespace ccf::historical ExpiryDuration default_expiry_duration = std::chrono::seconds(1800); - // These two combine into an effective O(1) lookup/add/remove by handle. + // These two combine into an effective O(log(N)) lookup/add/remove by handle. std::list lru_requests; std::map::iterator> lru_lookup; From c613d1c2ffc3db00843086654726f5eb661d61e8 Mon Sep 17 00:00:00 2001 From: Max Tropets Date: Thu, 27 Jun 2024 16:02:17 +0000 Subject: [PATCH 8/9] CR minor cleanup --- samples/apps/logging/logging.cpp | 7 ++----- src/node/historical_queries.h | 17 +++++++++-------- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/samples/apps/logging/logging.cpp b/samples/apps/logging/logging.cpp index 880ce9c54462..e97bc96872c2 100644 --- a/samples/apps/logging/logging.cpp +++ b/samples/apps/logging/logging.cpp @@ -461,11 +461,8 @@ namespace loggingapp context.get_indexing_strategies().install_strategy(index_per_public_key); // According to manual obvervation it's enough to start evicting old - // requests on historical perf test, but not too small to get stuck because - // of a single request being larget than the cache. - // - // A much better approach would be to create a new endpoint for - // configuring the limit online, but I won't do it now. + // requests on historical perf test, but not too small to get stuck + // because of a single request being larget than the cache. constexpr size_t cache_limit = 1024 * 1024 * 10; // MB context.get_historical_state().set_soft_cache_limit(cache_limit); diff --git a/src/node/historical_queries.h b/src/node/historical_queries.h index 25ce2957bd78..406d0fd8d7fc 100644 --- a/src/node/historical_queries.h +++ b/src/node/historical_queries.h @@ -239,7 +239,7 @@ namespace ccf::historical bool should_include_receipts, SeqNo earliest_ledger_secret_seqno) { - std::vector removed{}, probably_added{}; + std::vector removed{}, added{}; bool any_diff = false; @@ -282,7 +282,7 @@ namespace ccf::historical { // If this is too early for known secrets, just record that it // was requested but don't add it to all_stores yet - probably_added.push_back(*new_it); + added.push_back(*new_it); prev_it = my_stores.insert_or_assign(prev_it, *new_it, nullptr); any_too_early = true; } @@ -297,7 +297,7 @@ namespace ccf::historical details = std::make_shared(); all_stores.insert_or_assign(all_it, *new_it, details); } - probably_added.push_back(*new_it); + added.push_back(*new_it); prev_it = my_stores.insert_or_assign(prev_it, *new_it, details); } any_diff |= true; @@ -316,7 +316,7 @@ namespace ccf::historical if (!any_diff && (should_include_receipts == include_receipts)) { HISTORICAL_LOG("Identical to previous request"); - return {removed, probably_added}; + return {removed, added}; } include_receipts = should_include_receipts; @@ -336,7 +336,7 @@ namespace ccf::historical populate_receipts(seqno); } } - return {removed, probably_added}; + return {removed, added}; } void populate_receipts(ccf::SeqNo new_seqno) @@ -499,7 +499,8 @@ namespace ccf::historical ExpiryDuration default_expiry_duration = std::chrono::seconds(1800); - // These two combine into an effective O(log(N)) lookup/add/remove by handle. + // These two combine into an effective O(log(N)) lookup/add/remove by + // handle. std::list lru_requests; std::map::iterator> lru_lookup; @@ -909,14 +910,14 @@ namespace ccf::historical seqnos.size(), *seqnos.begin(), include_receipts); - auto [removed, probably_added] = request.adjust_ranges( + auto [removed, added] = request.adjust_ranges( seqnos, include_receipts, earliest_secret_.valid_from); for (auto seq : removed) { remove_request_ref(seq, handle); } - for (auto seq : probably_added) + for (auto seq : added) { add_request_ref(seq, handle); } From 30e97343bfe0e863e30493b3527a890fbce105a7 Mon Sep 17 00:00:00 2001 From: Max Tropets Date: Thu, 27 Jun 2024 16:02:39 +0000 Subject: [PATCH 9/9] Poke --- .daily_canary | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.daily_canary b/.daily_canary index 06781fc17c93..7408758c7576 100644 --- a/.daily_canary +++ b/.daily_canary @@ -10,4 +10,4 @@ `) /` , / \ `---' / `'";\)` - _/_/ \ No newline at end of file + _/_/_ \ No newline at end of file