Skip to content

Commit

Permalink
Add trace logs to major public APIs and critical operations
Browse files Browse the repository at this point in the history
  • Loading branch information
umegane committed Dec 17, 2024
1 parent 27cd7df commit c5bcaf4
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 0 deletions.
9 changes: 9 additions & 0 deletions src/limestone/datastore.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@ log_channel& datastore::create_channel(const boost::filesystem::path& location)
epoch_id_type datastore::last_epoch() const noexcept { return static_cast<epoch_id_type>(epoch_id_informed_.load()); }

void datastore::switch_epoch(epoch_id_type new_epoch_id) {
VLOG(50) << "start switch_epoch() with new_epoch_id=" << new_epoch_id;
try {
check_after_ready(static_cast<const char*>(__func__));
auto neid = static_cast<std::uint64_t>(new_epoch_id);
Expand All @@ -224,14 +225,18 @@ void datastore::switch_epoch(epoch_id_type new_epoch_id) {
} catch (...) {
HANDLE_EXCEPTION_AND_ABORT();
}
VLOG(50) << "end switch_epoch() with new_epoch_id=" << new_epoch_id;
}

void datastore::update_min_epoch_id(bool from_switch_epoch) { // NOLINT(readability-function-cognitive-complexity)

VLOG(50) << "start update_min_epoch_id() with from_switch_epoch=" << from_switch_epoch;
auto upper_limit = epoch_id_switched_.load();
if (upper_limit == 0) {
return; // If epoch_id_switched_ is zero, it means no epoch has been switched, so updating epoch_id_to_be_recorded_ and epoch_id_informed_ is unnecessary.
}
upper_limit--;

epoch_id_type max_finished_epoch = 0;

for (const auto& e : log_channels_) {
Expand All @@ -247,6 +252,8 @@ void datastore::update_min_epoch_id(bool from_switch_epoch) { // NOLINT(readabi
}
}

VLOG(50) << "epoch_id_switched_ = " << epoch_id_switched_.load() << ", upper_limit = " << upper_limit << ", max_finished_epoch = " << max_finished_epoch;

// update recorded_epoch_
auto to_be_epoch = upper_limit;
if (from_switch_epoch && (to_be_epoch > static_cast<std::uint64_t>(max_finished_epoch))) {
Expand Down Expand Up @@ -285,7 +292,9 @@ void datastore::update_min_epoch_id(bool from_switch_epoch) { // NOLINT(readabi
break;
}
if (persistent_callback_) {
VLOG(50) << "start calling persistent callback to " << to_be_epoch;
persistent_callback_(to_be_epoch);
VLOG(50) << "end calling persistent callback to " << to_be_epoch;
}
}
{
Expand Down
21 changes: 21 additions & 0 deletions src/limestone/log_channel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ void log_channel::begin_session() {
current_epoch_id_.store(envelope_.epoch_id_switched_.load());
std::atomic_thread_fence(std::memory_order_acq_rel);
} while (current_epoch_id_.load() != envelope_.epoch_id_switched_.load());
VLOG(50) << "start begin_session() with current_epoch_id_=" << current_epoch_id_.load();

auto log_file = file_path();
strm_ = fopen(log_file.c_str(), "a"); // NOLINT(*-owning-memory)
Expand All @@ -71,13 +72,16 @@ void log_channel::begin_session() {
registered_ = true;
}
log_entry::begin_session(strm_, static_cast<epoch_id_type>(current_epoch_id_.load()));
VLOG(50) << "end begin_session() with current_epoch_id_=" << current_epoch_id_.load();
} catch (...) {
VLOG(50) << "abort begin_session() with current_epoch_id_=" << current_epoch_id_.load();
HANDLE_EXCEPTION_AND_ABORT();
}
}

void log_channel::end_session() {
try {
VLOG(50) << "start end_session() with current_epoch_id_=" << current_epoch_id_.load();
if (fflush(strm_) != 0) {
LOG_AND_THROW_IO_EXCEPTION("fflush failed", errno);
}
Expand All @@ -91,7 +95,9 @@ void log_channel::end_session() {
if (fclose(strm_) != 0) { // NOLINT(*-owning-memory)
LOG_AND_THROW_IO_EXCEPTION("fclose failed", errno);
}
VLOG(50) << "end end_session() with current_epoch_id_=" << current_epoch_id_.load();
} catch (...) {
VLOG(50) << "abort end_session() with current_epoch_id_=" << current_epoch_id_.load();
HANDLE_EXCEPTION_AND_ABORT();
}
}
Expand All @@ -106,52 +112,67 @@ void log_channel::abort_session([[maybe_unused]] status status_code, [[maybe_unu
}

void log_channel::add_entry(storage_id_type storage_id, std::string_view key, std::string_view value, write_version_type write_version) {
VLOG(50) << "start add_entry() with storage_id=" << storage_id << ", key=" << key << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
try {
log_entry::write(strm_, storage_id, key, value, write_version);
write_version_ = write_version;
} catch (...) {
VLOG(50) << "abort add_entry() with storage_id=" << storage_id << ", key=" << key << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
HANDLE_EXCEPTION_AND_ABORT();
}
VLOG(50) << "end add_entry() with storage_id=" << storage_id << ", key=" << key << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
}

void log_channel::add_entry([[maybe_unused]] storage_id_type storage_id, [[maybe_unused]] std::string_view key, [[maybe_unused]] std::string_view value, [[maybe_unused]] write_version_type write_version, [[maybe_unused]] const std::vector<large_object_input>& large_objects) {
LOG_AND_THROW_EXCEPTION("not implemented");// FIXME
};

void log_channel::remove_entry(storage_id_type storage_id, std::string_view key, write_version_type write_version) {
VLOG(50) << "start remove_entry() with storage_id=" << storage_id << ", key=" << key << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
try {
log_entry::write_remove(strm_, storage_id, key, write_version);
write_version_ = write_version;
} catch (...) {
VLOG(50) << "abort remove_entry() with storage_id=" << storage_id << ", key=" << key << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
HANDLE_EXCEPTION_AND_ABORT();
}
VLOG(50) << "end remove_entry() with storage_id=" << storage_id << ", key=" << key << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
}

void log_channel::add_storage(storage_id_type storage_id, write_version_type write_version) {
VLOG(50) << "start add_storage() with storage_id=" << storage_id << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
try {
log_entry::write_add_storage(strm_, storage_id, write_version);
write_version_ = write_version;
} catch (...) {
VLOG(50) << "abort add_storage() with storage_id=" << storage_id << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
HANDLE_EXCEPTION_AND_ABORT();
}
VLOG(50) << "end add_storage() with storage_id=" << storage_id << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
}

void log_channel::remove_storage(storage_id_type storage_id, write_version_type write_version) {
VLOG(50) << "start remove_storage() with storage_id=" << storage_id << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
try {
log_entry::write_remove_storage(strm_, storage_id, write_version);
write_version_ = write_version;
} catch (...) {
VLOG(50) << "abort remove_storage() with storage_id=" << storage_id << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
HANDLE_EXCEPTION_AND_ABORT();
}
VLOG(50) << "end remove_storage() with storage_id=" << storage_id << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
}

void log_channel::truncate_storage(storage_id_type storage_id, write_version_type write_version) {
VLOG(50) << "start truncate_storage() with storage_id=" << storage_id << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
try {
log_entry::write_clear_storage(strm_, storage_id, write_version);
write_version_ = write_version;
} catch (...) {
VLOG(50) << "abort truncate_storage() with storage_id=" << storage_id << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
HANDLE_EXCEPTION_AND_ABORT();
}
VLOG(50) << "end truncate_storage() with storage_id=" << storage_id << ", write_version.epoch_number_=" << write_version.epoch_number_ << ", write_version.minor_write_version_=" << write_version.minor_write_version_;
}

boost::filesystem::path log_channel::file_path() const noexcept {
Expand Down

0 comments on commit c5bcaf4

Please sign in to comment.