diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 98e81eddd2c51..37705d3f5a92a 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -5660,8 +5660,7 @@ void* virtual_alloc (size_t size, bool use_large_pages_p, uint16_t numa_node) if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size) { - gc_heap::reserved_memory_limit = - GCScan::AskForMoreReservedMemory (gc_heap::reserved_memory_limit, requested_size); + gc_heap::reserved_memory_limit = gc_heap::reserved_memory_limit + requested_size; if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size) { return 0; @@ -7275,23 +7274,16 @@ void gc_heap::gc_thread_function () bool gc_heap::virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number) { -#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_NATIVEAOT) - // Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to - // a host. This will need to be added later. -#if !defined(FEATURE_CORECLR) && !defined(BUILD_AS_STANDALONE) - if (!CLRMemoryHosted()) -#endif +#ifdef MULTIPLE_HEAPS + if (GCToOSInterface::CanEnableGCNumaAware()) { - if (GCToOSInterface::CanEnableGCNumaAware()) - { - uint16_t numa_node = heap_select::find_numa_node_from_heap_no(h_number); - if (GCToOSInterface::VirtualCommit (addr, size, numa_node)) - return true; - } + uint16_t numa_node = heap_select::find_numa_node_from_heap_no(h_number); + if (GCToOSInterface::VirtualCommit (addr, size, numa_node)) + return true; } -#else //MULTIPLE_HEAPS && !FEATURE_NATIVEAOT +#else //MULTIPLE_HEAPS UNREFERENCED_PARAMETER(h_number); -#endif //MULTIPLE_HEAPS && !FEATURE_NATIVEAOT +#endif //MULTIPLE_HEAPS //numa aware not enabled, or call failed --> fallback to VirtualCommit() return GCToOSInterface::VirtualCommit(addr, size); @@ -43942,11 +43934,7 @@ void gc_heap::decommit_ephemeral_segment_pages() #ifdef HOST_64BIT max(min(min(soh_segment_size/32, dd_max_size (dd0)), (generation_size (max_generation) / 10)), (size_t)desired_allocation); #else -#ifdef FEATURE_CORECLR desired_allocation; -#else - dd_max_size (dd0); -#endif //FEATURE_CORECLR #endif // HOST_64BIT uint8_t *decommit_target = heap_segment_allocated (ephemeral_heap_segment) + slack_space; diff --git a/src/coreclr/gc/gcee.cpp b/src/coreclr/gc/gcee.cpp index b0b1b02021aad..b48632cf06923 100644 --- a/src/coreclr/gc/gcee.cpp +++ b/src/coreclr/gc/gcee.cpp @@ -308,16 +308,12 @@ void gc_heap::fire_etw_allocation_event (size_t allocation_amount, uint8_t* object_address, size_t object_size) { -#ifdef FEATURE_NATIVEAOT - FIRE_EVENT(GCAllocationTick_V1, (uint32_t)allocation_amount, (uint32_t)gen_to_oh (gen_number)); -#else FIRE_EVENT(GCAllocationTick_V4, allocation_amount, (uint32_t)gen_to_oh (gen_number), heap_number, object_address, object_size); -#endif //FEATURE_NATIVEAOT } void gc_heap::fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject) diff --git a/src/coreclr/gc/gcscan.cpp b/src/coreclr/gc/gcscan.cpp index 6f200810fed59..314857130f471 100644 --- a/src/coreclr/gc/gcscan.cpp +++ b/src/coreclr/gc/gcscan.cpp @@ -233,28 +233,6 @@ void GCScan::GcPromotionsGranted (int condemned, int max_gen, ScanContext* sc) GCToEEInterface::SyncBlockCachePromotionsGranted(max_gen); } - -size_t GCScan::AskForMoreReservedMemory (size_t old_size, size_t need_size) -{ - LIMITED_METHOD_CONTRACT; - -#if !defined(FEATURE_CORECLR) && !defined(FEATURE_NATIVEAOT) - // call the host.... - - IGCHostControl *pGCHostControl = CorHost::GetGCHostControl(); - - if (pGCHostControl) - { - size_t new_max_limit_size = need_size; - pGCHostControl->RequestVirtualMemLimit (old_size, - (SIZE_T*)&new_max_limit_size); - return new_max_limit_size; - } -#endif - - return old_size + need_size; -} - void GCScan::VerifyHandleTable(int condemned, int max_gen, ScanContext* sc) { LIMITED_METHOD_CONTRACT; diff --git a/src/coreclr/gc/gcscan.h b/src/coreclr/gc/gcscan.h index 009241251a3bb..82c687b0c2d9f 100644 --- a/src/coreclr/gc/gcscan.h +++ b/src/coreclr/gc/gcscan.h @@ -81,8 +81,6 @@ class GCScan // post-promotions callback some roots were demoted static void GcDemote (int condemned, int max_gen, ScanContext* sc); - static size_t AskForMoreReservedMemory (size_t old_size, size_t need_size); - static void VerifyHandleTable(int condemned, int max_gen, ScanContext* sc); static VOLATILE(int32_t) m_GcStructuresInvalidCnt; diff --git a/src/coreclr/gc/handletableconstants.h b/src/coreclr/gc/handletableconstants.h index 9ad57eb8fd961..0f64c3112d91b 100644 --- a/src/coreclr/gc/handletableconstants.h +++ b/src/coreclr/gc/handletableconstants.h @@ -7,7 +7,7 @@ #define FEATURE_REFCOUNTED_HANDLES // Build support for obsolete handles types into standalone GC to make it usable with older runtimes -#if defined(BUILD_AS_STANDALONE) && !defined(FEATURE_NATIVEAOT) +#ifdef BUILD_AS_STANDALONE #define FEATURE_ASYNC_PINNED_HANDLES // No longer used in .NET 8+ #define FEATURE_WEAK_NATIVE_COM_HANDLES // No longer used in .NET 8+ #endif diff --git a/src/coreclr/gc/objecthandle.cpp b/src/coreclr/gc/objecthandle.cpp index 5374229b5b067..0da8c8bb259fe 100644 --- a/src/coreclr/gc/objecthandle.cpp +++ b/src/coreclr/gc/objecthandle.cpp @@ -736,7 +736,6 @@ void Ref_Shutdown() } } -#ifndef FEATURE_NATIVEAOT bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket) { CONTRACTL @@ -825,7 +824,6 @@ bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket) offset = last->dwMaxIndex; } } -#endif // !FEATURE_NATIVEAOT void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket) { diff --git a/src/coreclr/nativeaot/Runtime/gctoclreventsink.cpp b/src/coreclr/nativeaot/Runtime/gctoclreventsink.cpp index 16ef566053046..b8ffaad1ffe88 100644 --- a/src/coreclr/nativeaot/Runtime/gctoclreventsink.cpp +++ b/src/coreclr/nativeaot/Runtime/gctoclreventsink.cpp @@ -161,6 +161,7 @@ void GCToCLREventSink::FireGCGlobalHeapHistory_V4(uint64_t finalYoungestDesired, void GCToCLREventSink::FireGCAllocationTick_V1(uint32_t allocationAmount, uint32_t allocationKind) { + ASSERT(!"Superseded by FireGCAllocationTick_V4"); } MethodTable* GetLastAllocEEType(); @@ -273,7 +274,7 @@ void GCToCLREventSink::FireBGC1stConEnd() void GCToCLREventSink::FireBGC1stSweepEnd(uint32_t genNumber) { - //FireEtwBGC1stSweepEnd(genNumber, GetClrInstanceId()); TODO + FireEtwBGC1stSweepEnd(genNumber, GetClrInstanceId()); } void GCToCLREventSink::FireBGC2ndNonConBegin() diff --git a/src/coreclr/vm/gctoclreventsink.cpp b/src/coreclr/vm/gctoclreventsink.cpp index 5eb30d8210d75..fff929d51567a 100644 --- a/src/coreclr/vm/gctoclreventsink.cpp +++ b/src/coreclr/vm/gctoclreventsink.cpp @@ -151,7 +151,7 @@ void GCToCLREventSink::FireGCAllocationTick_V1(uint32_t allocationAmount, uint32 { LIMITED_METHOD_CONTRACT; - FireEtwGCAllocationTick_V1(allocationAmount, allocationKind, GetClrInstanceId()); + _ASSERTE(!"Superseded by FireGCAllocationTick_V4"); } void GCToCLREventSink::FireGCAllocationTick_V4(uint64_t allocationAmount,