< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page
rev 9402 : dihop-changes
rev 9404 : [mq]: erik-jmasa-review

@@ -37,10 +37,11 @@
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/pair.hpp"
 
 // Different defaults for different number of GC threads
 // They were chosen by running GCOld and SPECjbb on debris with different
 //   numbers of GC threads and choosing them based on the results
 

@@ -292,13 +293,11 @@
 
   _collectionSetChooser = new CollectionSetChooser();
 }
 
 G1CollectorPolicy::~G1CollectorPolicy() {
-  if (_ihop_control != NULL) {
     delete _ihop_control;
-  }
 }
 
 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
   return _predictor.get_new_prediction(seq);
 }

@@ -533,29 +532,30 @@
   // constraints (i.e., user-defined minimum bound). Currently, we
   // effectively don't set this bound.
   return _young_gen_sizer->max_desired_young_length();
 }
 
-void G1CollectorPolicy::update_young_list_max_and_target_length(size_t* unbounded_target_length) {
-  update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq), unbounded_target_length);
+uint G1CollectorPolicy::update_young_list_max_and_target_length() {
+  return update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
 }
 
-void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths, size_t* unbounded_target_length) {
-  update_young_list_target_length(rs_lengths, unbounded_target_length);
+uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
+  uint unbounded_target_length = update_young_list_target_length(rs_lengths);
   update_max_gc_locker_expansion();
+  return unbounded_target_length;
 }
 
-void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length) {
-  _young_list_target_length = bounded_young_list_target_length(rs_lengths, unbounded_target_length);
+uint G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
+  YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
+  _young_list_target_length = young_lengths.first;
+  return young_lengths.second;
 }
 
-void G1CollectorPolicy::update_young_list_target_length() {
-  update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
-}
+G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengths(size_t rs_lengths) const {
+  YoungTargetLengths result;
 
-uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length) const {
-  // Calculate the absolute and desired min bounds.
+  // Calculate the absolute and desired min bounds first.
 
   // This is how many young regions we already have (currently: the survivors).
   uint base_min_length = recorded_survivor_regions();
   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
   // This is the absolute minimum young length. Ensure that we

@@ -584,13 +584,11 @@
     // The user asked for a fixed young gen so we'll fix the young gen
     // whether the next GC is young or mixed.
     young_list_target_length = _young_list_fixed_length;
   }
 
-  if (unbounded_target_length != NULL) {
-    *unbounded_target_length = young_list_target_length;    
-  }
+  result.second = young_list_target_length;
   
   // We will try our best not to "eat" into the reserve.
   uint absolute_max_length = 0;
   if (_free_regions_at_end_of_collection > _reserve_regions) {
     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;

@@ -611,11 +609,12 @@
 
   assert(young_list_target_length > recorded_survivor_regions(),
          "we should be able to allocate at least one eden region");
   assert(young_list_target_length >= absolute_min_length, "post-condition");
 
-  return young_list_target_length;
+  result.first = young_list_target_length;
+  return result;
 }
 
 uint
 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
                                                      uint base_min_length,

@@ -926,11 +925,11 @@
 
 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
                                                               "skip last young-only gc");
   collector_state()->set_last_young_gc(should_continue_with_reclaim);
-  // We abort the marking phase.
+  // We skip the marking phase.
   if (!should_continue_with_reclaim) {
     abort_time_to_mixed_tracking();
   }
   collector_state()->set_in_marking_window(false);
 }

@@ -1045,23 +1044,20 @@
     record_concurrent_mark_init_end(0.0);
   } else {
     maybe_start_marking();
   }
 
-  double app_time_ms = 1.0;
-
-  if (update_stats) {
-    _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
-    // this is where we update the allocation rate of the application
-    app_time_ms =
-      (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
+  double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
     if (app_time_ms < MIN_TIMER_GRANULARITY) {
       // This usually happens due to the timer not having the required
       // granularity. Some Linuxes are the usual culprits.
       // We'll just set it to something (arbitrarily) small.
       app_time_ms = 1.0;
     }
+
+  if (update_stats) {
+    _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
     // We maintain the invariant that all objects allocated by mutator
     // threads will be allocated out of eden regions. So, we can use
     // the eden region number allocated since the previous GC to
     // calculate the application's allocate rate. The only exception
     // to that is humongous objects that are allocated separately. But

@@ -1208,23 +1204,23 @@
   _free_regions_at_end_of_collection = _g1->num_free_regions();
   // IHOP control wants to know the expected young gen length if it were not
   // restrained by the heap reserve. Using the actual length would make the
   // prediction too small and the limit the young gen every time we get to the
   // predicted target occupancy.
-  size_t last_unrestrained_young_length = 0;
-  update_young_list_max_and_target_length(&last_unrestrained_young_length);
+  size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
   update_rs_lengths_prediction();
 
+  // Only update IHOP information on regular GCs.
+  if (update_stats) {
   double marking_to_mixed_time = -1.0;
   if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
     marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
     assert(marking_to_mixed_time > 0.0,
            "Initial mark to mixed time must be larger than zero but is %.3f",
            marking_to_mixed_time);
   }
-  // Only update IHOP information on regular GCs.
-  if (update_stats) {
+
     update_ihop_statistics(marking_to_mixed_time,
                            app_time_ms / 1000.0,
                            _last_old_allocated_bytes,
                            last_unrestrained_young_length * HeapRegion::GrainBytes);
   }

@@ -1269,11 +1265,11 @@
   // To avoid using really small times that may be caused by e.g. back-to-back gcs
   // we filter them out.
   double const min_valid_time = 1e-6;
 
   if (marking_time > min_valid_time) {
-    _ihop_control->update_time_to_mixed(marking_time);
+    _ihop_control->update_marking_length(marking_time);
     report = true;
   }
 
   // As an approximation for the young gc promotion rates during marking we use
   // all of them. In many applications there are only a few if any young gcs during
< prev index next >