diff --git a/xmrstak/backend/amd/minethd.cpp b/xmrstak/backend/amd/minethd.cpp index 667144cdd..0a181154c 100644 --- a/xmrstak/backend/amd/minethd.cpp +++ b/xmrstak/backend/amd/minethd.cpp @@ -172,7 +172,6 @@ void minethd::work_main() lck.release(); std::this_thread::yield(); - uint64_t iCount = 0; cryptonight_ctx* cpu_ctx; cpu_ctx = cpu::minethd::minethd_alloc_ctx(); @@ -288,10 +287,7 @@ void minethd::work_main() executor::inst()->push_event(ex_event("AMD Invalid Result", pGpuCtx->deviceIdx, oWork.iPoolId)); } - iCount += pGpuCtx->rawIntensity; - uint64_t iStamp = get_timestamp_ms(); - iHashCount.store(iCount, std::memory_order_relaxed); - iTimestamp.store(iStamp, std::memory_order_relaxed); + updateStats(pGpuCtx->rawIntensity, oWork.iPoolId); accRuntime += updateTimings(pGpuCtx, t0); diff --git a/xmrstak/backend/cpu/minethd.cpp b/xmrstak/backend/cpu/minethd.cpp index 0229af0a7..463be1aab 100644 --- a/xmrstak/backend/cpu/minethd.cpp +++ b/xmrstak/backend/cpu/minethd.cpp @@ -833,6 +833,7 @@ void minethd::multiway_work_main() cryptonight_ctx* ctx[MAX_N]; uint64_t iCount = 0; + uint64_t iLastCount = 0; uint64_t* piHashVal[MAX_N]; uint32_t* piNonce[MAX_N]; uint8_t bHashOut[MAX_N * 32]; @@ -915,9 +916,8 @@ void minethd::multiway_work_main() { if((iCount++ & 0x7) == 0) //Store stats every 8*N hashes { - uint64_t iStamp = get_timestamp_ms(); - iHashCount.store(iCount * N, std::memory_order_relaxed); - iTimestamp.store(iStamp, std::memory_order_relaxed); + updateStats((iCount - iLastCount) * N, oWork.iPoolId); + iLastCount = iCount; } nonce_ctr -= N; diff --git a/xmrstak/backend/iBackend.hpp b/xmrstak/backend/iBackend.hpp index 1af42c248..dd59b6c52 100644 --- a/xmrstak/backend/iBackend.hpp +++ b/xmrstak/backend/iBackend.hpp @@ -1,6 +1,7 @@ #pragma once #include "xmrstak/backend/globalStates.hpp" +#include "xmrstak/net/msgstruct.hpp" #include #include @@ -46,6 +47,29 @@ struct iBackend std::atomic iTimestamp; uint32_t iThreadNo; BackendType backendType = UNKNOWN; + uint64_t iLastStamp = get_timestamp_ms(); + double avgHashPerMsec = 0.0; + + void updateStats(uint64_t numNewHashes, size_t poolId) + { + uint64_t iStamp = get_timestamp_ms(); + double timeDiff = static_cast(iStamp - iLastStamp); + iLastStamp = iStamp; + + if(poolId == 0) + { + // if dev pool is active interpolate the number of shares (avoid hash rate drops) + numNewHashes = static_cast(avgHashPerMsec * timeDiff); + } + else + { + const double hashRatePerMs = static_cast(numNewHashes) / timeDiff; + constexpr double averagingBias = 0.1; + avgHashPerMsec = avgHashPerMsec * (1.0 - averagingBias) + hashRatePerMs * averagingBias; + } + iHashCount.fetch_add(numNewHashes, std::memory_order_relaxed); + iTimestamp.store(iStamp, std::memory_order_relaxed); + } iBackend() : iHashCount(0), diff --git a/xmrstak/backend/nvidia/minethd.cpp b/xmrstak/backend/nvidia/minethd.cpp index 4506faed6..32b21dc71 100644 --- a/xmrstak/backend/nvidia/minethd.cpp +++ b/xmrstak/backend/nvidia/minethd.cpp @@ -198,7 +198,6 @@ void minethd::work_main() // wait until all NVIDIA devices are initialized thread_work_guard.wait(); - uint64_t iCount = 0; cryptonight_ctx* cpu_ctx; cpu_ctx = cpu::minethd::minethd_alloc_ctx(); @@ -297,13 +296,8 @@ void minethd::work_main() executor::inst()->push_event(ex_event("NVIDIA Invalid Result", ctx.device_id, oWork.iPoolId)); } - iCount += h_per_round; iNonce += h_per_round; - - using namespace std::chrono; - uint64_t iStamp = get_timestamp_ms(); - iHashCount.store(iCount, std::memory_order_relaxed); - iTimestamp.store(iStamp, std::memory_order_relaxed); + updateStats(h_per_round, oWork.iPoolId); std::this_thread::yield(); }