Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Disallow gen1 - code by @maoni0 as a PR with the Fragmentation Fix Changes #105166

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
237 changes: 209 additions & 28 deletions src/coreclr/gc/gc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2859,6 +2859,7 @@ gc_heap::dynamic_heap_count_data_t SVR::gc_heap::dynamic_heap_count_data;
size_t gc_heap::current_total_soh_stable_size = 0;
uint64_t gc_heap::last_suspended_end_time = 0;
size_t gc_heap::gc_index_full_gc_end = 0;
bool gc_heap::trigger_bgc_for_rethreading_p = false;

#ifdef STRESS_DYNAMIC_HEAP_COUNT
int gc_heap::heaps_in_this_gc = 0;
Expand Down Expand Up @@ -3326,7 +3327,7 @@ gc_heap::dt_high_frag_p (gc_tuning_point tp,
}
}
#endif //!MULTIPLE_HEAPS
size_t fr = generation_unusable_fragmentation (generation_of (gen_number));
size_t fr = generation_unusable_fragmentation (generation_of (gen_number), heap_number);
ret = (fr > dd_fragmentation_limit(dd));
if (ret)
{
Expand Down Expand Up @@ -15855,7 +15856,6 @@ void allocator::unlink_item (unsigned int bn, uint8_t* item, uint8_t* prev_item,
void allocator::unlink_item_no_undo (unsigned int bn, uint8_t* item)
{
alloc_list* al = &alloc_list_of (bn);

uint8_t* next_item = free_list_slot (item);
uint8_t* prev_item = free_list_prev (item);

Expand Down Expand Up @@ -15926,7 +15926,6 @@ void allocator::unlink_item_no_undo (uint8_t* item, size_t size)
void allocator::unlink_item_no_undo_added (unsigned int bn, uint8_t* item, uint8_t* previous_item)
{
alloc_list* al = &alloc_list_of (bn);

uint8_t* next_item = free_list_slot (item);
uint8_t* prev_item = free_list_prev (item);

Expand Down Expand Up @@ -20957,6 +20956,31 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
#endif //STRESS_HEAP

#ifdef BACKGROUND_GC
#ifdef DYNAMIC_HEAP_COUNT
if (trigger_bgc_for_rethreading_p)
{
if (background_running_p())
{
// trigger_bgc_for_rethreading_p being true indicates we did not change gen2 FL items when we changed HC.
// So some heaps could have no FL at all which means if we did a gen1 GC during this BGC we would increase
// gen2 size. We chose to prioritize not increasing gen2 size so we disallow gen1 GCs.
n = 0;
}
else
{
dprintf (6666, ("was going to be g%d %s GC, HC change request this GC to be a BGC unless it's an NGC2",
n, (*blocking_collection_p ? "blocking" : "non blocking")));

// If we already decided to do a blocking gen2 which would also achieve the purpose of building up a new
// gen2 FL, let it happen; otherwise we want to trigger a BGC.
if (!((n == max_generation) && *blocking_collection_p))
{
n = max_generation;
}
}
}
else
#endif //DYNAMIC_HEAP_COUNT
if ((n == max_generation) && background_running_p())
{
n = max_generation - 1;
Expand Down Expand Up @@ -21264,6 +21288,68 @@ bool gc_heap::init_table_for_region (int gen_number, heap_segment* region)
}
#endif //USE_REGIONS

// The following 2 methods Use integer division to prevent potential floating point exception.
// FPE may occur if we use floating point division because of speculative execution.
//
// Return the percentage of efficiency (between 0 and 100) of the allocator.
inline
size_t gc_heap::generation_allocator_efficiency_percent (generation* inst)
{
#ifdef DYNAMIC_HEAP_COUNT
if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes)
{
uint64_t total_plan_allocated = generation_total_plan_allocated (inst);
uint64_t condemned_allocated = generation_condemned_allocated (inst);
return ((total_plan_allocated == 0) ? 0 : (100 * (total_plan_allocated - condemned_allocated) / total_plan_allocated));
}
else
#endif //DYNAMIC_HEAP_COUNT
{
uint64_t free_obj_space = generation_free_obj_space (inst);
uint64_t free_list_allocated = generation_free_list_allocated (inst);
if ((free_list_allocated + free_obj_space) == 0)
return 0;
return (size_t)((100 * free_list_allocated) / (free_list_allocated + free_obj_space));
}
}

inline
size_t gc_heap::generation_unusable_fragmentation (generation* inst, int hn)
{
#ifdef DYNAMIC_HEAP_COUNT
if (dynamic_adaptation_mode == dynamic_adaptation_to_application_sizes)
{
uint64_t total_plan_allocated = generation_total_plan_allocated (inst);
uint64_t condemned_allocated = generation_condemned_allocated (inst);
uint64_t unusable_frag = 0;
size_t fo_space = (((ptrdiff_t)generation_free_obj_space (inst) < 0) ? 0 : generation_free_obj_space (inst));

if (total_plan_allocated != 0)
{
unusable_frag = fo_space + (condemned_allocated * generation_free_list_space (inst) / total_plan_allocated);
}

dprintf (6666, ("h%d g%d FLa: %Id, ESa: %Id, Ca: %Id | FO: %Id, FL %Id, fl effi %.3f, unusable fl is %Id",
hn, inst->gen_num,
generation_free_list_allocated (inst), generation_end_seg_allocated (inst), (size_t)condemned_allocated,
fo_space, generation_free_list_space (inst),
((total_plan_allocated == 0) ? 1.0 : ((float)(total_plan_allocated - condemned_allocated) / (float)total_plan_allocated)),
(size_t)unusable_frag));

return (size_t)unusable_frag;
}
else
#endif //DYNAMIC_HEAP_COUNT
{
uint64_t free_obj_space = generation_free_obj_space (inst);
uint64_t free_list_allocated = generation_free_list_allocated (inst);
uint64_t free_list_space = generation_free_list_space (inst);
if ((free_list_allocated + free_obj_space) == 0)
return 0;
return (size_t)(free_obj_space + (free_obj_space * free_list_space) / (free_list_allocated + free_obj_space));
}
}

#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:6326) // "Potential comparison of a constant with another constant" is intentional in this function.
Expand Down Expand Up @@ -22780,6 +22866,10 @@ void gc_heap::gc1()

#ifdef DYNAMIC_HEAP_COUNT
update_total_soh_stable_size();
if ((settings.condemned_generation == max_generation) && trigger_bgc_for_rethreading_p)
{
trigger_bgc_for_rethreading_p = false;
}
#endif //DYNAMIC_HEAP_COUNT

fire_pevents();
Expand Down Expand Up @@ -24276,6 +24366,14 @@ void gc_heap::garbage_collect (int n)

settings.init_mechanisms();
settings.condemned_generation = gen;

#ifdef DYNAMIC_HEAP_COUNT
if (trigger_bgc_for_rethreading_p)
{
settings.condemned_generation = 0;
}
#endif //DYNAMIC_HEAP_COUNT

settings.gc_index = (size_t)dd_collection_count (dynamic_data_of (0)) + 2;
do_pre_gc();

Expand Down Expand Up @@ -25133,7 +25231,7 @@ void gc_heap::recommission_heap()
// mark_stack_array_length = 0;
// mark_stack_array = nullptr;

generation_skip_ratio = 0;
generation_skip_ratio = 100;
gen0_must_clear_bricks = 0;

freeable_uoh_segment = nullptr;
Expand Down Expand Up @@ -26120,25 +26218,35 @@ bool gc_heap::change_heap_count (int new_n_heaps)
gc_t_join.join (this, gc_join_merge_temp_fl);
if (gc_t_join.joined ())
{
#ifdef BACKGROUND_GC
// For now I'm always setting it to true. This should be set based on heuristics like the number of
// FL items. I'm currently rethreading all generations' FL except gen2's. When the next GC happens,
// it will be a BGC (unless it's a blocking gen2 which also works). And when BGC sweep starts we will
// build the gen2 FL from scratch.
trigger_bgc_for_rethreading_p = true;
#endif //BACKGROUND_GC
gc_t_join.restart ();
}

// rethread the free lists
for (int gen_idx = 0; gen_idx < total_generation_count; gen_idx++)
{
if (heap_number < old_n_heaps)
if (gen_idx != max_generation)
{
dprintf (3, ("h%d calling per heap work!", heap_number));
rethread_fl_items (gen_idx);
}
if (heap_number < old_n_heaps)
{
dprintf (3, ("h%d calling per heap work!", heap_number));
rethread_fl_items (gen_idx);
}

// join for merging the free lists
gc_t_join.join (this, gc_join_merge_temp_fl);
if (gc_t_join.joined ())
{
merge_fl_from_other_heaps (gen_idx, new_n_heaps, old_n_heaps);
// join for merging the free lists
gc_t_join.join (this, gc_join_merge_temp_fl);
if (gc_t_join.joined ())
{
merge_fl_from_other_heaps (gen_idx, new_n_heaps, old_n_heaps);

gc_t_join.restart ();
gc_t_join.restart ();
}
}
}

Expand Down Expand Up @@ -26196,8 +26304,17 @@ bool gc_heap::change_heap_count (int new_n_heaps)
generation* gen = hp->generation_of (gen_idx);
size_t gen_size = hp->generation_size (gen_idx);
dd_fragmentation (dd) = generation_free_list_space (gen);
assert (gen_size >= dd_fragmentation (dd));
dd_current_size (dd) = gen_size - dd_fragmentation (dd);
if (gen_idx == max_generation)
{
// Just set it to 0 so it doesn't cause any problems. The next GC which will be a gen2 will update it to the correct value.
dd_current_size (dd) = 0;
}
else
{
// We cannot assert this for gen2 because we didn't actually rethread gen2 FL.
assert (gen_size >= dd_fragmentation (dd));
dd_current_size (dd) = gen_size - dd_fragmentation (dd);
}

dprintf (3, ("h%d g%d: budget: %zd, left in budget: %zd, generation_size: %zd fragmentation: %zd current_size: %zd",
i,
Expand Down Expand Up @@ -39380,6 +39497,13 @@ void gc_heap::bgc_thread_function()
fire_pevents();
#endif //MULTIPLE_HEAPS

#ifdef DYNAMIC_HEAP_COUNT
if (trigger_bgc_for_rethreading_p)
{
trigger_bgc_for_rethreading_p = false;
}
#endif //DYNAMIC_HEAP_COUNT

c_write (settings.concurrent, FALSE);
gc_background_running = FALSE;
keep_bgc_threads_p = FALSE;
Expand Down Expand Up @@ -43736,6 +43860,24 @@ void gc_heap::compute_new_dynamic_data (int gen_number)
//keep track of fragmentation
dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen);

// We need to reset the condemned alloc for the condemned generation because it will participate in the free list efficiency
// calculation. And if a generation is condemned, it means all the allocations into this generation during that GC will be
// condemned and it wouldn't make sense to use this value to calculate the FL efficiency since at this point the FL hasn't
// been built.
generation_condemned_allocated (gen) = 0;

if (settings.concurrent)
{
// For BGC we could have non zero values due to gen1 FGCs. We reset all 3 allocs to start anew.
generation_free_list_allocated (gen) = 0;
generation_end_seg_allocated (gen) = 0;
}
else
{
assert (generation_free_list_allocated (gen) == 0);
assert (generation_end_seg_allocated (gen) == 0);
}

// make sure the subtraction below doesn't overflow
if (dd_fragmentation (dd) <= total_gen_size)
dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);
Expand Down Expand Up @@ -45648,20 +45790,41 @@ void gc_heap::background_sweep()
concurrent_print_time_delta ("Sw");
dprintf (2, ("---- (GC%zu)Background Sweep Phase ----", VolatileLoad(&settings.gc_index)));

bool rebuild_maxgen_fl_p = true;

#ifdef DOUBLY_LINKED_FL
#ifdef DYNAMIC_HEAP_COUNT
rebuild_maxgen_fl_p = trigger_bgc_for_rethreading_p;
#else
rebuild_maxgen_fl_p = false;
#endif //DYNAMIC_HEAP_COUNT
#endif //DOUBLY_LINKED_FL

for (int i = 0; i <= max_generation; i++)
{
generation* gen_to_reset = generation_of (i);

bool clear_fl_p = true;

#ifdef DOUBLY_LINKED_FL
if (i == max_generation)
{
dprintf (2, ("h%d: gen2 still has FL: %zd, FO: %zd",
clear_fl_p = rebuild_maxgen_fl_p;

dprintf (6666, ("h%d: gen2 still has FL: %zd, FO: %zd, clear gen2 FL %s",
heap_number,
generation_free_list_space (gen_to_reset),
generation_free_obj_space (gen_to_reset)));
generation_free_obj_space (gen_to_reset),
(clear_fl_p ? "yes" : "no")));
}
else
#endif //DOUBLY_LINKED_FL

if (clear_fl_p)
{
if (i == max_generation)
{
dprintf (6666, ("clearing g2 FL for h%d!", heap_number));
}
generation_allocator (gen_to_reset)->clear();
generation_free_list_space (gen_to_reset) = 0;
generation_free_obj_space (gen_to_reset) = 0;
Expand Down Expand Up @@ -45769,6 +45932,10 @@ void gc_heap::background_sweep()

for (int i = max_generation; i < total_generation_count; i++)
{
// TEMP BEG!!!!!!!!!!!
bool printed_p = false;
// TEMP END!!!!!!!!!!!

generation* gen = generation_of (i);
heap_segment* gen_start_seg = heap_segment_rw (generation_start_segment(gen));
heap_segment* next_seg = 0;
Expand Down Expand Up @@ -45922,7 +46089,7 @@ void gc_heap::background_sweep()
next_sweep_obj = o + size_o;

#ifdef DOUBLY_LINKED_FL
if (gen != large_object_generation)
if ((i == max_generation) && !rebuild_maxgen_fl_p)
{
if (method_table (o) == g_gc_pFreeObjectMethodTable)
{
Expand All @@ -45938,13 +46105,17 @@ void gc_heap::background_sweep()
assert ((ptrdiff_t)generation_free_list_space (gen) >= 0);
generation_free_obj_space (gen) += size_o;

dprintf (3333, ("[h%d] gen2F-: %p->%p(%zd) FL: %zd",
heap_number, o, (o + size_o), size_o,
generation_free_list_space (gen)));
dprintf (3333, ("h%d: gen2FO+: %p(%zx)->%zd (g: %zd)",
heap_number, o, size_o,
generation_free_obj_space (gen),
free_obj_size_last_gap));
if (!printed_p)
{
dprintf (6666, ("[h%d] gen2F-: %p->%p(%zd) FL: %zd",
heap_number, o, (o + size_o), size_o,
generation_free_list_space (gen)));
dprintf (6666, ("h%d: gen2FO+: %p(%zx)->%zd (g: %zd)",
heap_number, o, size_o,
generation_free_obj_space (gen),
free_obj_size_last_gap));
printed_p = true;
}
remove_gen_free (max_generation, size_o);
}
else
Expand Down Expand Up @@ -47463,7 +47634,8 @@ void gc_heap::verify_regions (int gen_number, bool can_verify_gen_num, bool can_
// 2) the tail region is the same as the last region if we following the list of regions
// in that generation.
// 3) no region is pointing to itself.
// 4) if we can verify gen num, each region's gen_num and plan_gen_num are the same and
// 4) the region's heap is correct.
// 5) if we can verify gen num, each region's gen_num and plan_gen_num are the same and
// they are the right generation.
generation* gen = generation_of (gen_number);
int num_regions_in_gen = 0;
Expand Down Expand Up @@ -47499,6 +47671,15 @@ void gc_heap::verify_regions (int gen_number, bool can_verify_gen_num, bool can_
FATAL_GC_ERROR();
}

#ifdef MULTIPLE_HEAPS
if (heap_segment_heap (seg_in_gen) != __this)
{
dprintf (6666, ("h%d gen%d region %p heap is %Ix, diff from the heap it's on %Ix!",
heap_number, gen_number, heap_segment_heap (seg_in_gen), __this));
FATAL_GC_ERROR();
}
#endif //MULTIPLE_HEAPS

prev_region_in_gen = seg_in_gen;
num_regions_in_gen++;
heap_segment* next_region = heap_segment_next (seg_in_gen);
Expand Down
Loading
Loading