src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 2591 : 6814390: G1: remove the concept of non-generational G1
Summary: Removed the possibility to turn off generational mode for G1.
Reviewed-by: johnc, ysr, tonyp


1246 
1247     // Make sure we'll choose a new allocation region afterwards.
1248     release_mutator_alloc_region();
1249     abandon_gc_alloc_regions();
1250     g1_rem_set()->cleanupHRRS();
1251     tear_down_region_lists();
1252 
1253     // We should call this after we retire any currently active alloc
1254     // regions so that all the ALLOC / RETIRE events are generated
1255     // before the start GC event.
1256     _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1257 
1258     // We may have added regions to the current incremental collection
1259     // set between the last GC or pause and now. We need to clear the
1260     // incremental collection set and then start rebuilding it afresh
1261     // after this full GC.
1262     abandon_collection_set(g1_policy()->inc_cset_head());
1263     g1_policy()->clear_incremental_cset();
1264     g1_policy()->stop_incremental_cset_building();
1265 
1266     if (g1_policy()->in_young_gc_mode()) {
1267       empty_young_list();
1268       g1_policy()->set_full_young_gcs(true);
1269     }
1270 
1271     // See the comment in G1CollectedHeap::ref_processing_init() about
1272     // how reference processing currently works in G1.
1273 
1274     // Temporarily make reference _discovery_ single threaded (non-MT).
1275     ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
1276 
1277     // Temporarily make refs discovery atomic
1278     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
1279 
1280     // Temporarily clear _is_alive_non_header
1281     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
1282 
1283     ref_processor()->enable_discovery();
1284     ref_processor()->setup_policy(do_clear_all_soft_refs);
1285     // Do collection work
1286     {
1287       HandleMark hm;  // Discard invalid handles created during gc
1288       G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
1289     }


1370     // evacuation pause.
1371     clear_cset_fast_test();
1372 
1373     init_mutator_alloc_region();
1374 
1375     double end = os::elapsedTime();
1376     g1_policy()->record_full_collection_end();
1377 
1378 #ifdef TRACESPINNING
1379     ParallelTaskTerminator::print_termination_counts();
1380 #endif
1381 
1382     gc_epilogue(true);
1383 
1384     // Discard all rset updates
1385     JavaThread::dirty_card_queue_set().abandon_logs();
1386     assert(!G1DeferredRSUpdate
1387            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1388   }
1389 
1390   if (g1_policy()->in_young_gc_mode()) {
1391     _young_list->reset_sampled_info();
1392     // At this point there should be no regions in the
1393     // entire heap tagged as young.
1394     assert( check_young_list_empty(true /* check_heap */),
1395             "young list should be empty at this point");
1396   }
1397 
1398   // Update the number of full collections that have been completed.
1399   increment_full_collections_completed(false /* concurrent */);
1400 
1401   _hrs.verify_optional();
1402   verify_region_sets_optional();
1403 
1404   if (PrintHeapAtGC) {
1405     Universe::print_heap_after_gc();
1406   }
1407   g1mm()->update_counters();
1408 
1409   return true;
1410 }
1411 
1412 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1413   // do_collection() will return whether it succeeded in performing
1414   // the GC. Currently, there is no facility on the
1415   // do_full_collection() API to notify the caller than the collection
1416   // did not succeed (e.g., because it was locked out by the GC


3144 
3145   HeapWord* result = op.result();
3146   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3147   assert(result == NULL || ret_succeeded,
3148          "the result should be NULL if the VM did not succeed");
3149   *succeeded = ret_succeeded;
3150 
3151   assert_heap_not_locked();
3152   return result;
3153 }
3154 
3155 void
3156 G1CollectedHeap::doConcurrentMark() {
3157   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3158   if (!_cmThread->in_progress()) {
3159     _cmThread->set_started();
3160     CGC_lock->notify();
3161   }
3162 }
3163 
3164 void G1CollectedHeap::do_sync_mark() {
3165   _cm->checkpointRootsInitial();
3166   _cm->markFromRoots();
3167   _cm->checkpointRootsFinal(false);
3168 }
3169 
3170 // <NEW PREDICTION>
3171 
3172 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
3173                                                        bool young) {
3174   return _g1_policy->predict_region_elapsed_time_ms(hr, young);
3175 }
3176 
3177 void G1CollectedHeap::check_if_region_is_too_expensive(double
3178                                                            predicted_time_ms) {
3179   _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
3180 }
3181 
3182 size_t G1CollectedHeap::pending_card_num() {
3183   size_t extra_cards = 0;
3184   JavaThread *curr = Threads::first();
3185   while (curr != NULL) {
3186     DirtyCardQueue& dcq = curr->dirty_card_queue();
3187     extra_cards += dcq.size();
3188     curr = curr->next();
3189   }


3300   }
3301 
3302   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3303   ResourceMark rm;
3304 
3305   if (PrintHeapAtGC) {
3306     Universe::print_heap_before_gc();
3307   }
3308 
3309   verify_region_sets_optional();
3310   verify_dirty_young_regions();
3311 
3312   {
3313     // This call will decide whether this pause is an initial-mark
3314     // pause. If it is, during_initial_mark_pause() will return true
3315     // for the duration of this pause.
3316     g1_policy()->decide_on_conc_mark_initiation();
3317 
3318     char verbose_str[128];
3319     sprintf(verbose_str, "GC pause ");
3320     if (g1_policy()->in_young_gc_mode()) {
3321       if (g1_policy()->full_young_gcs())
3322         strcat(verbose_str, "(young)");
3323       else
3324         strcat(verbose_str, "(partial)");
3325     }
3326     if (g1_policy()->during_initial_mark_pause()) {
3327       strcat(verbose_str, " (initial-mark)");
3328       // We are about to start a marking cycle, so we increment the
3329       // full collection counter.
3330       increment_total_full_collections();
3331     }
3332 
3333     // if PrintGCDetails is on, we'll print long statistics information
3334     // in the collector policy code, so let's not print this as the output
3335     // is messy if we do.
3336     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
3337     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3338     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
3339 
3340     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3341     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3342 
3343     // If the secondary_free_list is not empty, append it to the
3344     // free_list. No need to wait for the cleanup operation to finish;
3345     // the region allocation code will check the secondary_free_list
3346     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3347     // set, skip this step so that the region allocation code has to
3348     // get entries from the secondary_free_list.
3349     if (!G1StressConcRegionFreeing) {
3350       append_secondary_free_list_if_not_empty_with_lock();
3351     }
3352 
3353     if (g1_policy()->in_young_gc_mode()) {
3354       assert(check_young_list_well_formed(),
3355              "young list should be well formed");
3356     }
3357 
3358     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3359       IsGCActiveMark x;
3360 
3361       gc_prologue(false);
3362       increment_total_collections(false /* full gc */);
3363       increment_gc_time_stamp();
3364 
3365       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
3366         HandleMark hm;  // Discard invalid handles created during verification
3367         gclog_or_tty->print(" VerifyBeforeGC:");
3368         prepare_for_verify();
3369         Universe::verify(/* allow dirty */ false,
3370                          /* silent      */ false,
3371                          /* option      */ VerifyOption_G1UsePrevMarking);
3372 
3373       }
3374 
3375       COMPILER2_PRESENT(DerivedPointerTable::clear());
3376 


3477 
3478       // Initialize the GC alloc regions.
3479       init_gc_alloc_regions();
3480 
3481       // Actually do the work...
3482       evacuate_collection_set();
3483 
3484       free_collection_set(g1_policy()->collection_set());
3485       g1_policy()->clear_collection_set();
3486 
3487       cleanup_surviving_young_words();
3488 
3489       // Start a new incremental collection set for the next pause.
3490       g1_policy()->start_incremental_cset_building();
3491 
3492       // Clear the _cset_fast_test bitmap in anticipation of adding
3493       // regions to the incremental collection set for the next
3494       // evacuation pause.
3495       clear_cset_fast_test();
3496 
3497       if (g1_policy()->in_young_gc_mode()) {
3498         _young_list->reset_sampled_info();
3499 
3500         // Don't check the whole heap at this point as the
3501         // GC alloc regions from this pause have been tagged
3502         // as survivors and moved on to the survivor list.
3503         // Survivor regions will fail the !is_young() check.
3504         assert(check_young_list_empty(false /* check_heap */),
3505                "young list should be empty");
3506 
3507 #if YOUNG_LIST_VERBOSE
3508         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3509         _young_list->print();
3510 #endif // YOUNG_LIST_VERBOSE
3511 
3512         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3513                                           _young_list->first_survivor_region(),
3514                                           _young_list->last_survivor_region());
3515 
3516         _young_list->reset_auxilary_lists();
3517       }
3518 
3519       if (evacuation_failed()) {
3520         _summary_bytes_used = recalculate_used();
3521       } else {
3522         // The "used" of the the collection set have already been subtracted
3523         // when they were freed.  Add in the bytes evacuated.
3524         _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
3525       }
3526 
3527       if (g1_policy()->in_young_gc_mode() &&
3528           g1_policy()->during_initial_mark_pause()) {
3529         concurrent_mark()->checkpointRootsInitialPost();
3530         set_marking_started();
3531         // CAUTION: after the doConcurrentMark() call below,
3532         // the concurrent marking thread(s) could be running
3533         // concurrently with us. Make sure that anything after
3534         // this point does not assume that we are the only GC thread
3535         // running. Note: of course, the actual marking work will
3536         // not start until the safepoint itself is released in
3537         // ConcurrentGCThread::safepoint_desynchronize().
3538         doConcurrentMark();
3539       }
3540 
3541       allocate_dummy_regions();
3542 
3543 #if YOUNG_LIST_VERBOSE
3544       gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3545       _young_list->print();
3546       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3547 #endif // YOUNG_LIST_VERBOSE
3548 


5074     return false;
5075   }
5076   bool success() { return _success; }
5077 };
5078 
5079 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5080   bool ret = _young_list->check_list_empty(check_sample);
5081 
5082   if (check_heap) {
5083     NoYoungRegionsClosure closure;
5084     heap_region_iterate(&closure);
5085     ret = ret && closure.success();
5086   }
5087 
5088   return ret;
5089 }
5090 
5091 void G1CollectedHeap::empty_young_list() {
5092   assert(heap_lock_held_for_gc(),
5093               "the heap lock should already be held by or for this thread");
5094   assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
5095 
5096   _young_list->empty_list();
5097 }
5098 
5099 // Done at the start of full GC.
5100 void G1CollectedHeap::tear_down_region_lists() {
5101   _free_list.remove_all();
5102 }
5103 
5104 class RegionResetter: public HeapRegionClosure {
5105   G1CollectedHeap* _g1h;
5106   FreeRegionList _local_free_list;
5107 
5108 public:
5109   RegionResetter() : _g1h(G1CollectedHeap::heap()),
5110                      _local_free_list("Local Free List for RegionResetter") { }
5111 
5112   bool doHeapRegion(HeapRegion* r) {
5113     if (r->continuesHumongous()) return false;
5114     if (r->top() > r->bottom()) {




1246 
1247     // Make sure we'll choose a new allocation region afterwards.
1248     release_mutator_alloc_region();
1249     abandon_gc_alloc_regions();
1250     g1_rem_set()->cleanupHRRS();
1251     tear_down_region_lists();
1252 
1253     // We should call this after we retire any currently active alloc
1254     // regions so that all the ALLOC / RETIRE events are generated
1255     // before the start GC event.
1256     _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1257 
1258     // We may have added regions to the current incremental collection
1259     // set between the last GC or pause and now. We need to clear the
1260     // incremental collection set and then start rebuilding it afresh
1261     // after this full GC.
1262     abandon_collection_set(g1_policy()->inc_cset_head());
1263     g1_policy()->clear_incremental_cset();
1264     g1_policy()->stop_incremental_cset_building();
1265 

1266     empty_young_list();
1267     g1_policy()->set_full_young_gcs(true);

1268 
1269     // See the comment in G1CollectedHeap::ref_processing_init() about
1270     // how reference processing currently works in G1.
1271 
1272     // Temporarily make reference _discovery_ single threaded (non-MT).
1273     ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
1274 
1275     // Temporarily make refs discovery atomic
1276     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
1277 
1278     // Temporarily clear _is_alive_non_header
1279     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
1280 
1281     ref_processor()->enable_discovery();
1282     ref_processor()->setup_policy(do_clear_all_soft_refs);
1283     // Do collection work
1284     {
1285       HandleMark hm;  // Discard invalid handles created during gc
1286       G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
1287     }


1368     // evacuation pause.
1369     clear_cset_fast_test();
1370 
1371     init_mutator_alloc_region();
1372 
1373     double end = os::elapsedTime();
1374     g1_policy()->record_full_collection_end();
1375 
1376 #ifdef TRACESPINNING
1377     ParallelTaskTerminator::print_termination_counts();
1378 #endif
1379 
1380     gc_epilogue(true);
1381 
1382     // Discard all rset updates
1383     JavaThread::dirty_card_queue_set().abandon_logs();
1384     assert(!G1DeferredRSUpdate
1385            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1386   }
1387 

1388   _young_list->reset_sampled_info();
1389   // At this point there should be no regions in the
1390   // entire heap tagged as young.
1391   assert( check_young_list_empty(true /* check_heap */),
1392     "young list should be empty at this point");

1393 
1394   // Update the number of full collections that have been completed.
1395   increment_full_collections_completed(false /* concurrent */);
1396 
1397   _hrs.verify_optional();
1398   verify_region_sets_optional();
1399 
1400   if (PrintHeapAtGC) {
1401     Universe::print_heap_after_gc();
1402   }
1403   g1mm()->update_counters();
1404 
1405   return true;
1406 }
1407 
1408 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1409   // do_collection() will return whether it succeeded in performing
1410   // the GC. Currently, there is no facility on the
1411   // do_full_collection() API to notify the caller than the collection
1412   // did not succeed (e.g., because it was locked out by the GC


3140 
3141   HeapWord* result = op.result();
3142   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3143   assert(result == NULL || ret_succeeded,
3144          "the result should be NULL if the VM did not succeed");
3145   *succeeded = ret_succeeded;
3146 
3147   assert_heap_not_locked();
3148   return result;
3149 }
3150 
3151 void
3152 G1CollectedHeap::doConcurrentMark() {
3153   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3154   if (!_cmThread->in_progress()) {
3155     _cmThread->set_started();
3156     CGC_lock->notify();
3157   }
3158 }
3159 






3160 // <NEW PREDICTION>
3161 
3162 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
3163                                                        bool young) {
3164   return _g1_policy->predict_region_elapsed_time_ms(hr, young);
3165 }
3166 
3167 void G1CollectedHeap::check_if_region_is_too_expensive(double
3168                                                            predicted_time_ms) {
3169   _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
3170 }
3171 
3172 size_t G1CollectedHeap::pending_card_num() {
3173   size_t extra_cards = 0;
3174   JavaThread *curr = Threads::first();
3175   while (curr != NULL) {
3176     DirtyCardQueue& dcq = curr->dirty_card_queue();
3177     extra_cards += dcq.size();
3178     curr = curr->next();
3179   }


3290   }
3291 
3292   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3293   ResourceMark rm;
3294 
3295   if (PrintHeapAtGC) {
3296     Universe::print_heap_before_gc();
3297   }
3298 
3299   verify_region_sets_optional();
3300   verify_dirty_young_regions();
3301 
3302   {
3303     // This call will decide whether this pause is an initial-mark
3304     // pause. If it is, during_initial_mark_pause() will return true
3305     // for the duration of this pause.
3306     g1_policy()->decide_on_conc_mark_initiation();
3307 
3308     char verbose_str[128];
3309     sprintf(verbose_str, "GC pause ");
3310     if (g1_policy()->full_young_gcs()) {

3311       strcat(verbose_str, "(young)");
3312     } else {
3313       strcat(verbose_str, "(partial)");
3314     }
3315     if (g1_policy()->during_initial_mark_pause()) {
3316       strcat(verbose_str, " (initial-mark)");
3317       // We are about to start a marking cycle, so we increment the
3318       // full collection counter.
3319       increment_total_full_collections();
3320     }
3321 
3322     // if PrintGCDetails is on, we'll print long statistics information
3323     // in the collector policy code, so let's not print this as the output
3324     // is messy if we do.
3325     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
3326     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3327     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
3328 
3329     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3330     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3331 
3332     // If the secondary_free_list is not empty, append it to the
3333     // free_list. No need to wait for the cleanup operation to finish;
3334     // the region allocation code will check the secondary_free_list
3335     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3336     // set, skip this step so that the region allocation code has to
3337     // get entries from the secondary_free_list.
3338     if (!G1StressConcRegionFreeing) {
3339       append_secondary_free_list_if_not_empty_with_lock();
3340     }
3341 

3342     assert(check_young_list_well_formed(),
3343       "young list should be well formed");

3344 
3345     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3346       IsGCActiveMark x;
3347 
3348       gc_prologue(false);
3349       increment_total_collections(false /* full gc */);
3350       increment_gc_time_stamp();
3351 
3352       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
3353         HandleMark hm;  // Discard invalid handles created during verification
3354         gclog_or_tty->print(" VerifyBeforeGC:");
3355         prepare_for_verify();
3356         Universe::verify(/* allow dirty */ false,
3357                          /* silent      */ false,
3358                          /* option      */ VerifyOption_G1UsePrevMarking);
3359 
3360       }
3361 
3362       COMPILER2_PRESENT(DerivedPointerTable::clear());
3363 


3464 
3465       // Initialize the GC alloc regions.
3466       init_gc_alloc_regions();
3467 
3468       // Actually do the work...
3469       evacuate_collection_set();
3470 
3471       free_collection_set(g1_policy()->collection_set());
3472       g1_policy()->clear_collection_set();
3473 
3474       cleanup_surviving_young_words();
3475 
3476       // Start a new incremental collection set for the next pause.
3477       g1_policy()->start_incremental_cset_building();
3478 
3479       // Clear the _cset_fast_test bitmap in anticipation of adding
3480       // regions to the incremental collection set for the next
3481       // evacuation pause.
3482       clear_cset_fast_test();
3483 

3484       _young_list->reset_sampled_info();
3485 
3486       // Don't check the whole heap at this point as the
3487       // GC alloc regions from this pause have been tagged
3488       // as survivors and moved on to the survivor list.
3489       // Survivor regions will fail the !is_young() check.
3490       assert(check_young_list_empty(false /* check_heap */),
3491         "young list should be empty");
3492 
3493 #if YOUNG_LIST_VERBOSE
3494       gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3495       _young_list->print();
3496 #endif // YOUNG_LIST_VERBOSE
3497 
3498       g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3499         _young_list->first_survivor_region(),
3500         _young_list->last_survivor_region());
3501 
3502       _young_list->reset_auxilary_lists();

3503 
3504       if (evacuation_failed()) {
3505         _summary_bytes_used = recalculate_used();
3506       } else {
3507         // The "used" of the the collection set have already been subtracted
3508         // when they were freed.  Add in the bytes evacuated.
3509         _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
3510       }
3511 
3512       if (g1_policy()->during_initial_mark_pause()) {

3513         concurrent_mark()->checkpointRootsInitialPost();
3514         set_marking_started();
3515         // CAUTION: after the doConcurrentMark() call below,
3516         // the concurrent marking thread(s) could be running
3517         // concurrently with us. Make sure that anything after
3518         // this point does not assume that we are the only GC thread
3519         // running. Note: of course, the actual marking work will
3520         // not start until the safepoint itself is released in
3521         // ConcurrentGCThread::safepoint_desynchronize().
3522         doConcurrentMark();
3523       }
3524 
3525       allocate_dummy_regions();
3526 
3527 #if YOUNG_LIST_VERBOSE
3528       gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3529       _young_list->print();
3530       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3531 #endif // YOUNG_LIST_VERBOSE
3532 


5058     return false;
5059   }
5060   bool success() { return _success; }
5061 };
5062 
5063 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5064   bool ret = _young_list->check_list_empty(check_sample);
5065 
5066   if (check_heap) {
5067     NoYoungRegionsClosure closure;
5068     heap_region_iterate(&closure);
5069     ret = ret && closure.success();
5070   }
5071 
5072   return ret;
5073 }
5074 
5075 void G1CollectedHeap::empty_young_list() {
5076   assert(heap_lock_held_for_gc(),
5077               "the heap lock should already be held by or for this thread");

5078 
5079   _young_list->empty_list();
5080 }
5081 
5082 // Done at the start of full GC.
5083 void G1CollectedHeap::tear_down_region_lists() {
5084   _free_list.remove_all();
5085 }
5086 
5087 class RegionResetter: public HeapRegionClosure {
5088   G1CollectedHeap* _g1h;
5089   FreeRegionList _local_free_list;
5090 
5091 public:
5092   RegionResetter() : _g1h(G1CollectedHeap::heap()),
5093                      _local_free_list("Local Free List for RegionResetter") { }
5094 
5095   bool doHeapRegion(HeapRegion* r) {
5096     if (r->continuesHumongous()) return false;
5097     if (r->top() > r->bottom()) {