diff --git a/src/engine/allocators.cpp b/src/engine/allocators.cpp index 7ea2fad7e3..323532144e 100644 --- a/src/engine/allocators.cpp +++ b/src/engine/allocators.cpp @@ -215,8 +215,8 @@ namespace Lumix BaseProxyAllocator::BaseProxyAllocator(IAllocator& source) : m_source(source) + , m_allocation_count(0) { - m_allocation_count = 0; } BaseProxyAllocator::~BaseProxyAllocator() { ASSERT(m_allocation_count == 0); } @@ -224,7 +224,7 @@ BaseProxyAllocator::~BaseProxyAllocator() { ASSERT(m_allocation_count == 0); } void* BaseProxyAllocator::allocate(size_t size, size_t align) { - atomicIncrement(&m_allocation_count); + m_allocation_count.inc(); return m_source.allocate(size, align); } @@ -233,7 +233,7 @@ void BaseProxyAllocator::deallocate(void* ptr) { if(ptr) { - atomicDecrement(&m_allocation_count); + m_allocation_count.dec(); m_source.deallocate(ptr); } } @@ -241,14 +241,13 @@ void BaseProxyAllocator::deallocate(void* ptr) void* BaseProxyAllocator::reallocate(void* ptr, size_t new_size, size_t old_size, size_t align) { - if (!ptr) atomicIncrement(&m_allocation_count); - if (new_size == 0) atomicDecrement(&m_allocation_count); + if (!ptr) m_allocation_count.inc(); + if (new_size == 0) m_allocation_count.dec(); return m_source.reallocate(ptr, new_size, old_size, align); } -LinearAllocator::LinearAllocator(u32 reserved) { - m_end = 0; - m_commited_bytes = 0; +LinearAllocator::LinearAllocator(u32 reserved) +{ m_reserved = reserved; m_mem = (u8*)os::memReserve(reserved); } @@ -256,7 +255,7 @@ LinearAllocator::LinearAllocator(u32 reserved) { LinearAllocator::~LinearAllocator() { ASSERT(m_end == 0); os::memRelease(m_mem, m_reserved); - atomicSubtract(&g_total_commited_bytes, m_commited_bytes); + g_total_commited_bytes.subtract(m_commited_bytes); } void LinearAllocator::reset() { @@ -274,7 +273,7 @@ void* LinearAllocator::allocate(size_t size, size_t align) { for (;;) { const u32 end = m_end; start = roundUp(end, (u32)align); - if (compareAndExchange(&m_end, u32(start + size), end)) break; + if (m_end.compareExchange(u32(start + size), end)) break; } if (start + size <= m_commited_bytes) return m_mem + start; @@ -285,13 +284,13 @@ void* LinearAllocator::allocate(size_t size, size_t align) { const u32 commited = roundUp(start + (u32)size, 4096); ASSERT(commited < m_reserved); os::memCommit(m_mem + m_commited_bytes, commited - m_commited_bytes); - atomicAdd(&g_total_commited_bytes, commited - m_commited_bytes); + g_total_commited_bytes.add(commited - m_commited_bytes); m_commited_bytes = commited; return m_mem + start; } -volatile i64 LinearAllocator::g_total_commited_bytes = 0; +AtomicI64 LinearAllocator::g_total_commited_bytes = 0; void LinearAllocator::deallocate(void* ptr) { /*everything should be "deallocated" with reset()*/ } void* LinearAllocator::reallocate(void* ptr, size_t new_size, size_t old_size, size_t align) { diff --git a/src/engine/allocators.h b/src/engine/allocators.h index c2e03bf10d..f2fe638d34 100644 --- a/src/engine/allocators.h +++ b/src/engine/allocators.h @@ -1,6 +1,7 @@ #pragma once #include "allocator.h" +#include "atomic.h" #include "crt.h" #include "sync.h" @@ -58,7 +59,7 @@ struct LUMIX_ENGINE_API BaseProxyAllocator final : IAllocator { private: IAllocator& m_source; - volatile i32 m_allocation_count; + AtomicI32 m_allocation_count; }; // allocations in a row one after another, deallocate everything at once @@ -76,13 +77,13 @@ struct LUMIX_ENGINE_API LinearAllocator : IAllocator { static size_t getTotalCommitedBytes() { return g_total_commited_bytes; } private: - u32 m_commited_bytes; + u32 m_commited_bytes = 0; u32 m_reserved; - volatile i32 m_end; + AtomicI32 m_end = 0; u8* m_mem; Mutex m_mutex; - static volatile i64 g_total_commited_bytes; + static AtomicI64 g_total_commited_bytes; }; // one allocation from local memory backing (m_mem), use fallback allocator otherwise diff --git a/src/engine/atomic.h b/src/engine/atomic.h index 315a180d3d..1456902cda 100644 --- a/src/engine/atomic.h +++ b/src/engine/atomic.h @@ -5,17 +5,41 @@ namespace Lumix { -LUMIX_ENGINE_API i64 atomicIncrement(i64 volatile* value); -LUMIX_ENGINE_API i32 atomicIncrement(i32 volatile* value); -// returns the resulting value -LUMIX_ENGINE_API i32 atomicDecrement(i32 volatile* value); -// returns the initial value -LUMIX_ENGINE_API i32 atomicAdd(i32 volatile* addend, i32 value); -LUMIX_ENGINE_API i64 atomicAdd(i64 volatile* addend, i64 value); -LUMIX_ENGINE_API i32 atomicSubtract(i32 volatile* addend, i32 value); -LUMIX_ENGINE_API i64 atomicSubtract(i64 volatile* addend, i64 value); -LUMIX_ENGINE_API bool compareAndExchange(i32 volatile* dest, i32 exchange, i32 comperand); -LUMIX_ENGINE_API bool compareAndExchange64(i64 volatile* dest, i64 exchange, i64 comperand); +LUMIX_ENGINE_API struct AtomicI32 { + AtomicI32(i32 v) : value(v) {} + + void operator =(i32 v); + operator i32() const; + + // returns initial value of the variable + i32 inc(); + i32 dec(); + i32 add(i32 v); + i32 subtract(i32 v); + + bool compareExchange(i32 exchange, i32 comperand); +private: + volatile i32 value; +}; + +LUMIX_ENGINE_API struct AtomicI64 { + AtomicI64(i64 v) : value(v) {} + + void operator =(i64 v); + operator i64() const; + + // returns initial value of the variable + i64 inc(); + i64 dec(); + i64 add(i64 v); + i64 subtract(i64 v); + + bool compareExchange(i64 exchange, i64 comperand); +private: + volatile i64 value; +}; + +LUMIX_ENGINE_API bool compareExchangePtr(volatile void** value, void* exchange, void* comperand); LUMIX_ENGINE_API void memoryBarrier(); } // namespace Lumix diff --git a/src/engine/debug.h b/src/engine/debug.h index 194336ded8..0506a93f79 100644 --- a/src/engine/debug.h +++ b/src/engine/debug.h @@ -2,6 +2,7 @@ #include "engine/allocator.h" +#include "engine/atomic.h" #include "engine/lumix.h" #include "engine/sync.h" @@ -41,7 +42,7 @@ struct LUMIX_ENGINE_API StackTree private: StackNode* m_root; - static i32 s_instances; + static AtomicI32 s_instances; }; #ifdef _WIN32 diff --git a/src/engine/job_system.cpp b/src/engine/job_system.cpp index 5b8f08cbf8..b979d31c73 100644 --- a/src/engine/job_system.cpp +++ b/src/engine/job_system.cpp @@ -78,7 +78,7 @@ struct System { static Local g_system; -static volatile i32 g_generation = 0; +static AtomicI32 g_generation = 0; static thread_local WorkerTask* g_worker = nullptr; #ifndef _WIN32 @@ -166,9 +166,9 @@ LUMIX_FORCE_INLINE static bool trigger(Signal* signal) signal->counter = 0; } else { - --signal->counter; - ASSERT(signal->counter >= 0); - if (signal->counter > 0) return false; + i32 counter = signal->counter.dec(); + ASSERT(counter > 0); + if (counter > 1) return false; } waitor = signal->waitor; @@ -223,9 +223,9 @@ void enableBackupWorker(bool enable) LUMIX_FORCE_INLINE static bool setRedEx(Signal* signal) { ASSERT(signal); ASSERT(signal->counter <= 1); - bool res = compareAndExchange(&signal->counter, 1, 0); + bool res = signal->counter.compareExchange(1, 0); if (res) { - signal->generation = atomicIncrement(&g_generation); + signal->generation = g_generation.inc(); } return res; } @@ -260,9 +260,8 @@ void runEx(void* data, void(*task)(void*), Signal* on_finished, u8 worker_index) if (on_finished) { Lumix::MutexGuard guard(g_system->m_sync); - ++on_finished->counter; - if (on_finished->counter == 1) { - on_finished->generation = atomicIncrement(&g_generation); + if (on_finished->counter.inc() == 0) { + on_finished->generation = g_generation.inc(); } } diff --git a/src/engine/job_system.h b/src/engine/job_system.h index 9ca6116d45..68e0932a47 100644 --- a/src/engine/job_system.h +++ b/src/engine/job_system.h @@ -1,8 +1,6 @@ #pragma once #include "lumix.h" -#ifndef _WIN32 - #include "atomic.h" -#endif +#include "atomic.h" namespace Lumix { @@ -64,7 +62,7 @@ struct Signal { ~Signal() { ASSERT(!waitor); ASSERT(!counter); } struct Waitor* waitor = nullptr; - volatile i32 counter = 0; + AtomicI32 counter = 0; i32 generation; // identify different red-green pairs on the same signal, used by profiler }; @@ -95,11 +93,11 @@ void forEach(i32 count, i32 step, const F& f) return; } - volatile i32 offset = 0; + AtomicI32 offset = 0; jobs::runOnWorkers([&](){ for(;;) { - const i32 idx = atomicAdd(&offset, step); + const i32 idx = offset.add(step); if (idx >= count) break; i32 to = idx + step; to = to > count ? count : to; diff --git a/src/engine/page_allocator.cpp b/src/engine/page_allocator.cpp index d02600e11d..222c4d2491 100644 --- a/src/engine/page_allocator.cpp +++ b/src/engine/page_allocator.cpp @@ -43,7 +43,7 @@ void PageAllocator::unlock() void* PageAllocator::allocate(bool lock) { - atomicIncrement(&allocated_count); + allocated_count.inc(); void* p; if (free_pages.pop(p)) return p; @@ -65,7 +65,7 @@ void* PageAllocator::allocate(bool lock) void PageAllocator::deallocate(void* mem, bool lock) { - atomicDecrement(&allocated_count); + allocated_count.dec(); free_pages.push(mem, lock ? &mutex : nullptr); } diff --git a/src/engine/page_allocator.h b/src/engine/page_allocator.h index 2d874543da..3dcaba3545 100644 --- a/src/engine/page_allocator.h +++ b/src/engine/page_allocator.h @@ -28,7 +28,7 @@ struct LUMIX_ENGINE_API PageAllocator final void unlock(); private: - volatile i32 allocated_count = 0; + AtomicI32 allocated_count = 0; u32 reserved_count = 0; RingBuffer free_pages; Mutex mutex; @@ -47,10 +47,11 @@ struct PagedListIterator for (;;) { volatile T* tmp = value; if(!tmp) return nullptr; - if (compareAndExchange64((volatile i64*)&value, (i64)tmp->header.next, (i64)tmp)) return (T*)tmp; + if (compareExchangePtr((volatile void**)&value, (void*)tmp->header.next, (void*)tmp)) return (T*)tmp; } } +private: volatile T* value; }; diff --git a/src/engine/profiler.cpp b/src/engine/profiler.cpp index a9b37c862d..f5d1f87ec2 100644 --- a/src/engine/profiler.cpp +++ b/src/engine/profiler.cpp @@ -6,6 +6,7 @@ #include #endif +#include "engine/atomic.h" #include "engine/array.h" #include "engine/crt.h" #include "engine/hash_map.h" @@ -182,7 +183,7 @@ static struct Instance u64 paused_time = 0; u64 last_frame_duration = 0; u64 last_frame_time = 0; - volatile i32 fiber_wait_id = 0; + AtomicI32 fiber_wait_id = 0; TraceTask trace_task; ThreadContext global_context; } g_instance; @@ -363,18 +364,18 @@ void blockColor(u8 r, u8 g, u8 b) write(*ctx, EventType::BLOCK_COLOR, color); } -static volatile i32 last_block_id = 0; - static void continueBlock(i32 block_id) { ThreadContext* ctx = g_instance.getThreadContext(); ctx->open_blocks.push(block_id); write(*ctx, EventType::CONTINUE_BLOCK, block_id); } +static AtomicI32 last_block_id = 0; + void beginBlock(const char* name) { BlockRecord r; - r.id = atomicIncrement(&last_block_id); + r.id = last_block_id.inc(); r.name = name; ThreadContext* ctx = g_instance.getThreadContext(); ctx->open_blocks.push(r.id); @@ -402,8 +403,8 @@ void endGPUBlock(u64 timestamp) i64 createNewLinkID() { - static i64 counter = 0; - return atomicIncrement(&counter); + AtomicI64 counter = 0; + return counter.inc(); } @@ -447,7 +448,7 @@ void signalTriggered(i32 job_system_signal) { FiberSwitchData beginFiberWait(i32 job_system_signal, bool is_mutex) { FiberWaitRecord r; - r.id = atomicIncrement(&g_instance.fiber_wait_id); + r.id = g_instance.fiber_wait_id.inc(); r.job_system_signal = job_system_signal; r.is_mutex = is_mutex; diff --git a/src/engine/ring_buffer.h b/src/engine/ring_buffer.h index d70c1f5d2c..e3f45e8fea 100644 --- a/src/engine/ring_buffer.h +++ b/src/engine/ring_buffer.h @@ -32,7 +32,7 @@ struct RingBuffer { return false; } else if (seq == pos + 1) { - if (compareAndExchange(&rd, pos + 1, pos)) break; + if (rd.compareExchange(pos + 1, pos)) break; } else { pos = rd; @@ -58,7 +58,7 @@ struct RingBuffer { } else if (seq == pos) { // we can try to push - if (compareAndExchange(&wr, pos + 1, pos)) break; + if (wr.compareExchange(pos + 1, pos)) break; } else { // somebody pushed before us, try again @@ -77,8 +77,8 @@ struct RingBuffer { } Item objects[CAPACITY]; - volatile i32 rd = 0; - volatile i32 wr = 0; + AtomicI32 rd = 0; + AtomicI32 wr = 0; Array m_fallback; }; diff --git a/src/engine/win/atomic.cpp b/src/engine/win/atomic.cpp index 4b94ae690c..38c1b640cf 100644 --- a/src/engine/win/atomic.cpp +++ b/src/engine/win/atomic.cpp @@ -4,52 +4,35 @@ namespace Lumix { -i64 atomicIncrement(i64 volatile* value) -{ - return _InterlockedIncrement64((volatile long long*)value); -} - -i32 atomicIncrement(i32 volatile* value) -{ - return _InterlockedIncrement((volatile long*)value); -} +void AtomicI32::operator =(i32 v) { _InterlockedExchange((volatile long*)&value, v); } +AtomicI32::operator i32() const { return _InterlockedExchangeAdd((volatile long*)&value, 0); } -i32 atomicDecrement(i32 volatile* value) -{ - return _InterlockedDecrement((volatile long*)value); -} - -i32 atomicAdd(i32 volatile* addend, i32 value) -{ - return _InterlockedExchangeAdd((volatile long*)addend, value); -} +i32 AtomicI32::inc() { return _InterlockedExchangeAdd((volatile long*)&value, 1); } +i32 AtomicI32::dec() { return _InterlockedExchangeAdd((volatile long*)&value, -1); } +i32 AtomicI32::add(i32 v) { return _InterlockedExchangeAdd((volatile long*)&value, v); } +i32 AtomicI32::subtract(i32 v) { return _InterlockedExchangeAdd((volatile long*)&value, -v); } -i32 atomicSubtract(i32 volatile* addend, i32 value) -{ - return _InterlockedExchangeAdd((volatile long*)addend, -value); +bool AtomicI32::compareExchange(i32 exchange, i32 comperand) { + return _InterlockedCompareExchange((volatile long*)&value, exchange, comperand) == comperand; } -i64 atomicAdd(i64 volatile* addend, i64 value) -{ - return _InterlockedExchangeAdd64((volatile long long*)addend, value); -} +void AtomicI64::operator =(i64 v) { _InterlockedExchange64((volatile long long*)&value, v); } +AtomicI64::operator i64() const { return _InterlockedExchangeAdd64((volatile long long*)&value, 0); } -i64 atomicSubtract(i64 volatile* addend, i64 value) -{ - return _InterlockedExchangeAdd64((volatile long long*)addend, -value); -} +i64 AtomicI64::inc() { return _InterlockedExchangeAdd64((volatile long long*)&value, 1); } +i64 AtomicI64::dec() { return _InterlockedExchangeAdd64((volatile long long*)&value, -1); } +i64 AtomicI64::add(i64 v) { return _InterlockedExchangeAdd64((volatile long long*)&value, v); } +i64 AtomicI64::subtract(i64 v) { return _InterlockedExchangeAdd64((volatile long long*)&value, -v); } -bool compareAndExchange(i32 volatile* dest, i32 exchange, i32 comperand) -{ - return _InterlockedCompareExchange((volatile long*)dest, exchange, comperand) == comperand; +bool AtomicI64::compareExchange(i64 exchange, i64 comperand) { + return _InterlockedCompareExchange64((volatile long long*)&value, exchange, comperand) == comperand; } -bool compareAndExchange64(i64 volatile* dest, i64 exchange, i64 comperand) -{ - return _InterlockedCompareExchange64(dest, exchange, comperand) == comperand; +bool compareExchangePtr(volatile void** value, void* exchange, void* comperand) { + static_assert(sizeof(comperand) == sizeof(long long)); + return _InterlockedCompareExchange64((volatile long long*)value, (long long)exchange, (long long)comperand) == (long long)comperand; } - LUMIX_ENGINE_API void memoryBarrier() { #ifdef _M_AMD64 diff --git a/src/engine/win/debug.cpp b/src/engine/win/debug.cpp index 6391868aaf..7c16d002a7 100644 --- a/src/engine/win/debug.cpp +++ b/src/engine/win/debug.cpp @@ -58,7 +58,7 @@ void debugBreak() } -int StackTree::s_instances = 0; +AtomicI32 StackTree::s_instances = 0; struct StackNode @@ -79,7 +79,7 @@ struct StackNode StackTree::StackTree() { m_root = nullptr; - if (atomicIncrement(&s_instances) == 1) + if (s_instances.inc() == 0) { HANDLE process = GetCurrentProcess(); SymInitialize(process, nullptr, TRUE); @@ -90,7 +90,7 @@ StackTree::StackTree() StackTree::~StackTree() { LUMIX_DELETE(getGlobalAllocator(), m_root); - if (atomicDecrement(&s_instances) == 0) + if (s_instances.dec() == 1) { HANDLE process = GetCurrentProcess(); SymCleanup(process); diff --git a/src/navigation/navigation_module.cpp b/src/navigation/navigation_module.cpp index 6f670f6667..1dc7e8230c 100644 --- a/src/navigation/navigation_module.cpp +++ b/src/navigation/navigation_module.cpp @@ -1313,16 +1313,16 @@ struct NavigationModuleImpl final : NavigationModule void pushJob() { jobs::runLambda([this](){ - const i32 i = atomicIncrement(&counter) - 1; + const i32 i = counter.inc(); if (i >= total) { return; } if (!module->generateTile(*zone, zone_entity, i % zone->m_num_tiles_x, i / zone->m_num_tiles_x, false, mutex)) { - atomicIncrement(&fail_counter); + fail_counter.inc(); } else { - atomicIncrement(&done_counter); + done_counter.inc(); } pushJob(); @@ -1337,9 +1337,9 @@ struct NavigationModuleImpl final : NavigationModule } i32 total; - volatile i32 counter = 0; - volatile i32 fail_counter = 0; - volatile i32 done_counter = 0; + AtomicI32 counter = 0; + AtomicI32 fail_counter = 0; + AtomicI32 done_counter = 0; Mutex mutex; RecastZone* zone; EntityRef zone_entity; diff --git a/src/renderer/culling_system.cpp b/src/renderer/culling_system.cpp index 145d017988..863d4ed503 100644 --- a/src/renderer/culling_system.cpp +++ b/src/renderer/culling_system.cpp @@ -319,7 +319,7 @@ struct CullingSystemImpl final : CullingSystem { if (m_cells.empty()) return nullptr; - volatile i32 cell_idx = 0; + AtomicI32 cell_idx = 0; PagedList list(m_page_allocator); jobs::runOnWorkers([&](){ @@ -329,7 +329,7 @@ struct CullingSystemImpl final : CullingSystem CullResult* result = nullptr; u32 total_count = 0; for(;;) { - const i32 idx = atomicIncrement(&cell_idx) - 1; + const i32 idx = cell_idx.inc(); if (idx >= m_cells.size()) return; CellPage& cell = *m_cells[idx]; diff --git a/src/renderer/editor/fbx_importer.cpp b/src/renderer/editor/fbx_importer.cpp index f117625198..6556de02ed 100644 --- a/src/renderer/editor/fbx_importer.cpp +++ b/src/renderer/editor/fbx_importer.cpp @@ -767,14 +767,14 @@ static void computeBoundingShapes(FBXImporter::ImportMesh& mesh, u32 vertex_size // convert from ofbx to runtime vertex data, compute missing info (normals, tangents, ao, ...) void FBXImporter::postprocessMeshes(const ImportConfig& cfg, const Path& path) { - volatile i32 mesh_idx_getter = 0; + AtomicI32 mesh_idx_getter = 0; jobs::runOnWorkers([&](){ Array skinning(m_allocator); OutputMemoryStream unindexed_triangles(m_allocator); Array tri_indices_tmp(m_allocator); for (;;) { - i32 mesh_idx = atomicIncrement(&mesh_idx_getter) - 1; + i32 mesh_idx = mesh_idx_getter.inc(); if (mesh_idx >= m_meshes.size()) break; ImportMesh& import_mesh = m_meshes[mesh_idx]; diff --git a/src/renderer/particle_system.cpp b/src/renderer/particle_system.cpp index 69a2103182..3f9ff62246 100644 --- a/src/renderer/particle_system.cpp +++ b/src/renderer/particle_system.cpp @@ -342,7 +342,7 @@ void ParticleSystem::emit(u32 emitter_idx, Span emit_data, u32 coun ++emitter.emit_index; m_constants[1] += time_step; } - m_last_update_stats.emitted += count; + m_last_update_stats.emitted.add(count); m_constants[1] = c1; } @@ -1089,7 +1089,7 @@ void ParticleSystem::update(float dt, u32 emitter_idx, const Transform& delta_tr OutputPagedStream emit_stream(page_allocator); jobs::Mutex emit_mutex; - volatile i32 counter = 0; + AtomicI32 counter = 0; auto update = [&](){ PROFILE_BLOCK("update particles"); @@ -1101,7 +1101,7 @@ void ParticleSystem::update(float dt, u32 emitter_idx, const Transform& delta_tr u32 processed = 0; for (;;) { - ctx.from = atomicAdd(&counter, 1024); + ctx.from = counter.add(1024); if (ctx.from >= (i32)emitter.particles_count) return; processChunk(ctx); @@ -1110,7 +1110,7 @@ void ParticleSystem::update(float dt, u32 emitter_idx, const Transform& delta_tr profiler::pushInt("Total count", processed); }; - m_last_update_stats.processed += emitter.particles_count; + m_last_update_stats.processed.add(emitter.particles_count); if (emitter.particles_count <= 4096) update(); else jobs::runOnWorkers(update); @@ -1148,7 +1148,7 @@ void ParticleSystem::update(float dt, u32 emitter_idx, const Transform& delta_tr } } - m_last_update_stats.killed += total_killed; + m_last_update_stats.killed.add(total_killed); emitter.particles_count -= total_killed; profiler::pushInt("kill count", total_killed); page_allocator.deallocate(kill_counter, true); @@ -1231,7 +1231,7 @@ u32 ParticleSystem::Emitter::getParticlesDataSizeBytes() const { void ParticleSystem::Emitter::fillInstanceData(float* data, PageAllocator& page_allocator) const { if (particles_count == 0) return; - volatile i32 counter = 0; + AtomicI32 counter = 0; auto fill = [&](){ PROFILE_BLOCK("fill particle gpu data"); @@ -1240,7 +1240,7 @@ void ParticleSystem::Emitter::fillInstanceData(float* data, PageAllocator& page_ ctx.output_memory = data; for (;;) { - ctx.from = atomicAdd(&counter, 1024); + ctx.from = counter.add(1024); if (ctx.from >= (i32)particles_count) return; system.processChunk(ctx); diff --git a/src/renderer/particle_system.h b/src/renderer/particle_system.h index 7767cf8411..40fee7ef23 100644 --- a/src/renderer/particle_system.h +++ b/src/renderer/particle_system.h @@ -3,6 +3,7 @@ #include "engine/lumix.h" #include "engine/array.h" +#include "engine/atomic.h" #include "engine/math.h" #include "engine/resource.h" #include "engine/resource_manager.h" @@ -147,9 +148,9 @@ struct LUMIX_RENDERER_API ParticleSystem { }; struct Stats { - i32 emitted = 0; - i32 killed = 0; - i32 processed = 0; + AtomicI32 emitted = 0; + AtomicI32 killed = 0; + AtomicI32 processed = 0; }; struct Emitter { diff --git a/src/renderer/pipeline.cpp b/src/renderer/pipeline.cpp index b11434f92d..2b8a6e9176 100644 --- a/src/renderer/pipeline.cpp +++ b/src/renderer/pipeline.cpp @@ -2462,7 +2462,7 @@ struct PipelineImpl final : Pipeline lod_indices.w = maximum(lod_indices.z, m->getLODIndices()[3].to); const u32 instance_count = im.instances.size(); - const u32 indirect_offset = atomicAdd(&m_indirect_buffer_offset, m->getMeshCount()); + const u32 indirect_offset = m_indirect_buffer_offset.add(m->getMeshCount()); ub_values.camera_offset = Vec4(Vec3(origin.pos - view.cp.pos), 1); ub_values.lod_distances = lod_distances; @@ -3508,7 +3508,7 @@ struct PipelineImpl final : Pipeline const float global_lod_multiplier = m_renderer.getLODMultiplier(); const float global_lod_multiplier_rcp = 1 / global_lod_multiplier; const float time_delta = m_renderer.getEngine().getLastTimeDelta(); - volatile i32 worker_idx = 0; + AtomicI32 worker_idx = 0; u32 bucket_map[255]; for (u32 i = 0; i < 255; ++i) { @@ -3529,7 +3529,7 @@ struct PipelineImpl final : Pipeline const DVec3 lod_ref_point = m_viewport.pos; Sorter::Inserter inserter(view.sorter); - const i32 instancer_idx = atomicIncrement(&worker_idx) - 1; + const i32 instancer_idx = worker_idx.inc(); AutoInstancer& instancer = view.instancers[instancer_idx]; instancer.init(m_renderer.getMaxSortKey() + 1); @@ -3757,14 +3757,14 @@ struct PipelineImpl final : Pipeline memset(m_histogram, 0, sizeof(m_histogram)); m_sorted = true; - volatile i32 counter = 0; + AtomicI32 counter = 0; auto work = [&](){ PROFILE_BLOCK("compute histogram"); u32 histogram[SIZE]; bool sorted = true; memset(histogram, 0, sizeof(histogram)); - i32 begin = atomicAdd(&counter, STEP); + i32 begin = counter.add(STEP); while (begin < size) { const i32 end = minimum(size, begin + STEP); @@ -3778,7 +3778,7 @@ struct PipelineImpl final : Pipeline sorted &= prev_key <= key; prev_key = key; } - begin = atomicAdd(&counter, STEP); + begin = counter.add(STEP); } jobs::MutexGuard lock(m_cs); @@ -4116,7 +4116,7 @@ struct PipelineImpl final : Pipeline Array m_textures; Array m_buffers; os::Timer m_timer; - volatile i32 m_indirect_buffer_offset; + AtomicI32 m_indirect_buffer_offset = 0; gpu::BufferHandle m_instanced_meshes_buffer; gpu::BufferHandle m_indirect_buffer; gpu::VertexDecl m_base_vertex_decl; diff --git a/src/renderer/render_module.cpp b/src/renderer/render_module.cpp index 3ea92a6cff..72a47957b0 100644 --- a/src/renderer/render_module.cpp +++ b/src/renderer/render_module.cpp @@ -536,7 +536,7 @@ struct RenderModuleImpl final : RenderModule { StackArray to_delete(m_allocator); jobs::Mutex mutex; - volatile ParticleSystem::Stats stats = {}; + ParticleSystem::Stats stats = {}; jobs::forEach(m_particle_emitters.capacity(), 1, [&](i32 idx, i32){ ParticleSystem* ps = m_particle_emitters.getFromIndex(idx); if (!ps) return; @@ -547,9 +547,9 @@ struct RenderModuleImpl final : RenderModule { jobs::exit(&mutex); } - atomicAdd(&stats.emitted, ps->m_last_update_stats.emitted); - atomicAdd(&stats.killed, ps->m_last_update_stats.killed); - atomicAdd(&stats.processed, ps->m_last_update_stats.processed); + stats.emitted.add(ps->m_last_update_stats.emitted); + stats.killed.add(ps->m_last_update_stats.killed); + stats.processed.add(ps->m_last_update_stats.processed); }); static u32 emitted_particles_stat = profiler::createCounter("Emitted particles", 0); diff --git a/src/renderer/renderer.cpp b/src/renderer/renderer.cpp index 8991fd8e70..cff5896538 100644 --- a/src/renderer/renderer.cpp +++ b/src/renderer/renderer.cpp @@ -51,7 +51,7 @@ struct TransientBuffer { Renderer::TransientSlice alloc(u32 size) { Renderer::TransientSlice slice; size = (size + (ALIGN - 1)) & ~(ALIGN - 1); - slice.offset = atomicAdd(&m_offset, size); + slice.offset = m_offset.add(size); slice.size = size; if (slice.offset + size <= m_size) { slice.buffer = m_buffer; @@ -111,7 +111,7 @@ struct TransientBuffer { } gpu::BufferHandle m_buffer = gpu::INVALID_BUFFER; - i32 m_offset = 0; + AtomicI32 m_offset = 0; u32 m_size = 0; u8* m_ptr = nullptr; jobs::Mutex m_mutex;