src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 2591 : 6814390: G1: remove the concept of non-generational G1
Summary: Removed the possibility to turn off generational mode for G1.
Reviewed-by: johnc, ysr, tonyp

@@ -1261,14 +1261,12 @@
     // after this full GC.
     abandon_collection_set(g1_policy()->inc_cset_head());
     g1_policy()->clear_incremental_cset();
     g1_policy()->stop_incremental_cset_building();
 
-    if (g1_policy()->in_young_gc_mode()) {
       empty_young_list();
       g1_policy()->set_full_young_gcs(true);
-    }
 
     // See the comment in G1CollectedHeap::ref_processing_init() about
     // how reference processing currently works in G1.
 
     // Temporarily make reference _discovery_ single threaded (non-MT).

@@ -1385,17 +1383,15 @@
     JavaThread::dirty_card_queue_set().abandon_logs();
     assert(!G1DeferredRSUpdate
            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
   }
 
-  if (g1_policy()->in_young_gc_mode()) {
     _young_list->reset_sampled_info();
     // At this point there should be no regions in the
     // entire heap tagged as young.
     assert( check_young_list_empty(true /* check_heap */),
             "young list should be empty at this point");
-  }
 
   // Update the number of full collections that have been completed.
   increment_full_collections_completed(false /* concurrent */);
 
   _hrs.verify_optional();

@@ -3159,16 +3155,10 @@
     _cmThread->set_started();
     CGC_lock->notify();
   }
 }
 
-void G1CollectedHeap::do_sync_mark() {
-  _cm->checkpointRootsInitial();
-  _cm->markFromRoots();
-  _cm->checkpointRootsFinal(false);
-}
-
 // <NEW PREDICTION>
 
 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
                                                        bool young) {
   return _g1_policy->predict_region_elapsed_time_ms(hr, young);

@@ -3315,14 +3305,13 @@
     // for the duration of this pause.
     g1_policy()->decide_on_conc_mark_initiation();
 
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
-    if (g1_policy()->in_young_gc_mode()) {
-      if (g1_policy()->full_young_gcs())
+    if (g1_policy()->full_young_gcs()) {
         strcat(verbose_str, "(young)");
-      else
+    } else {
         strcat(verbose_str, "(partial)");
     }
     if (g1_policy()->during_initial_mark_pause()) {
       strcat(verbose_str, " (initial-mark)");
       // We are about to start a marking cycle, so we increment the

@@ -3348,14 +3337,12 @@
     // get entries from the secondary_free_list.
     if (!G1StressConcRegionFreeing) {
       append_secondary_free_list_if_not_empty_with_lock();
     }
 
-    if (g1_policy()->in_young_gc_mode()) {
       assert(check_young_list_well_formed(),
              "young list should be well formed");
-    }
 
     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
       IsGCActiveMark x;
 
       gc_prologue(false);

@@ -3492,11 +3479,10 @@
       // Clear the _cset_fast_test bitmap in anticipation of adding
       // regions to the incremental collection set for the next
       // evacuation pause.
       clear_cset_fast_test();
 
-      if (g1_policy()->in_young_gc_mode()) {
         _young_list->reset_sampled_info();
 
         // Don't check the whole heap at this point as the
         // GC alloc regions from this pause have been tagged
         // as survivors and moved on to the survivor list.

@@ -3512,22 +3498,20 @@
         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
                                           _young_list->first_survivor_region(),
                                           _young_list->last_survivor_region());
 
         _young_list->reset_auxilary_lists();
-      }
 
       if (evacuation_failed()) {
         _summary_bytes_used = recalculate_used();
       } else {
         // The "used" of the the collection set have already been subtracted
         // when they were freed.  Add in the bytes evacuated.
         _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
       }
 
-      if (g1_policy()->in_young_gc_mode() &&
-          g1_policy()->during_initial_mark_pause()) {
+      if (g1_policy()->during_initial_mark_pause()) {
         concurrent_mark()->checkpointRootsInitialPost();
         set_marking_started();
         // CAUTION: after the doConcurrentMark() call below,
         // the concurrent marking thread(s) could be running
         // concurrently with us. Make sure that anything after

@@ -5089,11 +5073,10 @@
 }
 
 void G1CollectedHeap::empty_young_list() {
   assert(heap_lock_held_for_gc(),
               "the heap lock should already be held by or for this thread");
-  assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
 
   _young_list->empty_list();
 }
 
 // Done at the start of full GC.