< prev index next >

src/share/vm/gc/g1/g1CollectionSet.cpp

Print this page
rev 11545 : [mq]: 8159978-collection-set-as-array
rev 11546 : [mq]: 8159978-erikh-review


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1CollectedHeap.hpp"
  27 #include "gc/g1/g1CollectionSet.hpp"
  28 #include "gc/g1/g1CollectorState.hpp"
  29 #include "gc/g1/g1FixedSizeStack.inline.hpp"
  30 #include "gc/g1/g1Policy.hpp"
  31 #include "gc/g1/heapRegion.inline.hpp"
  32 #include "gc/g1/heapRegionRemSet.hpp"
  33 #include "gc/g1/heapRegionSet.hpp"
  34 #include "logging/logStream.hpp"
  35 #include "utilities/debug.hpp"
  36 
  37 G1CollectorState* G1CollectionSet::collector_state() {
  38   return _g1->collector_state();
  39 }
  40 
  41 G1GCPhaseTimes* G1CollectionSet::phase_times() {
  42   return _policy->phase_times();
  43 }
  44 
  45 CollectionSetChooser* G1CollectionSet::cset_chooser() {
  46   return _cset_chooser;
  47 }
  48 
  49 double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) {
  50   return _policy->predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
  51 }
  52 
  53 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
  54   _g1(g1h),
  55   _policy(policy),
  56   _cset_chooser(new CollectionSetChooser()),
  57   _eden_region_length(0),
  58   _survivor_region_length(0),
  59   _old_region_length(0),
  60   _collection_set_regions(),
  61   _bytes_used_before(0),
  62   _recorded_rs_lengths(0),



  63   // Incremental CSet attributes
  64   _inc_build_state(Inactive),
  65   _inc_bytes_used_before(0),
  66   _inc_recorded_rs_lengths(0),
  67   _inc_recorded_rs_lengths_diffs(0),
  68   _inc_predicted_elapsed_time_ms(0.0),
  69   _inc_predicted_elapsed_time_ms_diffs(0.0) {
  70 }
  71 
  72 G1CollectionSet::~G1CollectionSet() {



  73   delete _cset_chooser;
  74 }
  75 
  76 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
  77                                           uint survivor_cset_region_length) {


  78   _eden_region_length     = eden_cset_region_length;
  79   _survivor_region_length = survivor_cset_region_length;
  80 
  81   assert((size_t) young_region_length() == _collection_set_regions.length(),
  82          "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_regions.length());
  83 
  84   _old_region_length      = 0;
  85 }
  86 
  87 void G1CollectionSet::set_max_length(uint max_region_length) {
  88   guarantee(_collection_set_regions.max_length() == 0, "Must only initialize once.");
  89   _collection_set_regions.initialize(max_region_length);

  90 }
  91 
  92 void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
  93   _recorded_rs_lengths = rs_lengths;
  94 }
  95 
  96 // Add the heap region at the head of the non-incremental collection set
  97 void G1CollectionSet::add_old_region(HeapRegion* hr) {


  98   assert(_inc_build_state == Active, "Precondition");
  99   assert(hr->is_old(), "the region should be old");
 100 
 101   assert(!hr->in_collection_set(), "should not already be in the CSet");
 102   _g1->register_old_region_with_cset(hr);
 103 
 104   _collection_set_regions.par_push(hr->hrm_index());

 105   
 106   _bytes_used_before += hr->used();
 107   size_t rs_length = hr->rem_set()->occupied();
 108   _recorded_rs_lengths += rs_length;
 109   _old_region_length += 1;
 110 }
 111 
 112 // Initialize the per-collection-set information
 113 void G1CollectionSet::start_incremental_building() {
 114   assert(_collection_set_regions.length() == 0, "Must be empty before starting a new collection set.");
 115   assert(_inc_build_state == Inactive, "Precondition");
 116 
 117   _collection_set_regions.clear();
 118 
 119   _inc_bytes_used_before = 0;
 120 
 121   _inc_recorded_rs_lengths = 0;
 122   _inc_recorded_rs_lengths_diffs = 0;
 123   _inc_predicted_elapsed_time_ms = 0.0;
 124   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 125   _inc_build_state = Active;
 126 }
 127 
 128 void G1CollectionSet::finalize_incremental_building() {
 129   assert(_inc_build_state == Active, "Precondition");
 130   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
 131 
 132   // The two "main" fields, _inc_recorded_rs_lengths and
 133   // _inc_predicted_elapsed_time_ms, are updated by the thread
 134   // that adds a new region to the CSet. Further updates by the
 135   // concurrent refinement thread that samples the young RSet lengths
 136   // are accumulated in the *_diffs fields. Here we add the diffs to
 137   // the "main" fields.
 138 


 140     _inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs;
 141   } else {
 142     // This is defensive. The diff should in theory be always positive
 143     // as RSets can only grow between GCs. However, given that we
 144     // sample their size concurrently with other threads updating them
 145     // it's possible that we might get the wrong size back, which
 146     // could make the calculations somewhat inaccurate.
 147     size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs);
 148     if (_inc_recorded_rs_lengths >= diffs) {
 149       _inc_recorded_rs_lengths -= diffs;
 150     } else {
 151       _inc_recorded_rs_lengths = 0;
 152     }
 153   }
 154   _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs;
 155 
 156   _inc_recorded_rs_lengths_diffs = 0;
 157   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 158 }
 159 
 160 void G1CollectionSet::iterate(HeapRegionClosure* cl) {
 161   iterate_from(cl, 0, 1, true);





 162 }
 163 
 164 void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers, bool may_be_aborted) {
 165   size_t len = _collection_set_regions.length();

 166   if (len == 0) {
 167     return;
 168   }
 169   size_t start_pos = (worker_id * len) / total_workers;
 170   size_t cur_pos = start_pos;
 171 
 172   do {
 173     HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions.get_by_index(cur_pos));
 174     bool result = cl->doHeapRegion(r);
 175     guarantee(may_be_aborted || !result, "This iteration should not abort.");
 176     if (result) {

 177       return;
 178     }
 179     cur_pos++;
 180     if (cur_pos == len) {
 181       cur_pos = 0;
 182     }
 183   } while (cur_pos != start_pos);
 184 }
 185 
 186 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
 187                                                      size_t new_rs_length) {
 188   // Update the CSet information that is dependent on the new RS length
 189   assert(hr->is_young(), "Precondition");
 190   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
 191 
 192   // We could have updated _inc_recorded_rs_lengths and
 193   // _inc_predicted_elapsed_time_ms directly but we'd need to do
 194   // that atomically, as this code is executed by a concurrent
 195   // refinement thread, potentially concurrently with a mutator thread
 196   // allocating a new region and also updating the same fields. To


 198   // separate fields (*_diffs) and we'll just add them to the "main"
 199   // fields at the start of a GC.
 200 
 201   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
 202   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
 203   _inc_recorded_rs_lengths_diffs += rs_lengths_diff;
 204 
 205   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
 206   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 207   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
 208   _inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
 209 
 210   hr->set_recorded_rs_length(new_rs_length);
 211   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
 212 }
 213 
 214 void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
 215   assert(hr->is_young(), "invariant");
 216   assert(_inc_build_state == Active, "Precondition");
 217 
 218   size_t collection_set_length = _collection_set_regions.length();
 219   assert(collection_set_length <= INT_MAX, "Collection set is too large with %d entries", (int)collection_set_length);
 220   hr->set_young_index_in_cset((int)collection_set_length);
 221   _collection_set_regions.par_push(hr->hrm_index());






 222 
 223   // This routine is used when:
 224   // * adding survivor regions to the incremental cset at the end of an
 225   //   evacuation pause or
 226   // * adding the current allocation region to the incremental cset
 227   //   when it is retired.
 228   // Therefore this routine may be called at a safepoint by the
 229   // VM thread, or in-between safepoints by mutator threads (when
 230   // retiring the current allocation region)
 231   // We need to clear and set the cached recorded/cached collection set
 232   // information in the heap region here (before the region gets added
 233   // to the collection set). An individual heap region's cached values
 234   // are calculated, aggregated with the policy collection set info,
 235   // and cached in the heap region here (initially) and (subsequently)
 236   // by the Young List sampling code.
 237 
 238   size_t rs_length = hr->rem_set()->occupied();
 239   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 240 
 241   // Cache the values we have added to the aggregated information


 249   _inc_recorded_rs_lengths += rs_length;
 250   _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
 251   _inc_bytes_used_before += used_bytes;
 252 
 253   assert(!hr->in_collection_set(), "invariant");
 254   _g1->register_young_region_with_cset(hr);
 255 }
 256 
 257 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
 258   assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
 259   add_young_region_common(hr);
 260 }
 261 
 262 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
 263   assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
 264   add_young_region_common(hr);
 265 }
 266 
 267 #ifndef PRODUCT
 268 bool G1CollectionSet::verify_young_ages() {


 269   bool ret = true;
 270 
 271   size_t length = _collection_set_regions.length();
 272   for (size_t i = 0; i < length; i++) {
 273     HeapRegion* curr = G1CollectedHeap::heap()->region_at(_collection_set_regions.get_by_index(i));
 274 
 275     guarantee(curr->is_young(), "Region must be young but is %s", curr->get_type_str());
 276 
 277     SurvRateGroup* group = curr->surv_rate_group();
 278 
 279     if (group == NULL) {
 280       log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
 281       ret = false;
 282     }
 283 
 284     if (curr->age_in_surv_rate_group() < 0) {
 285       log_error(gc, verify)("## encountered negative age in young region");
 286       ret = false;
 287     }
 288   }
 289 
 290   if (!ret) {
 291     LogStreamHandle(Error, gc, verify) log;
 292     print(&log);
 293   }


 449       // avoid generating output per region.
 450       log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
 451                                 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
 452                                 old_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
 453     }
 454 
 455     cset_chooser()->verify();
 456   }
 457 
 458   stop_incremental_building();
 459 
 460   log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
 461                             old_region_length(), predicted_old_time_ms, time_remaining_ms);
 462 
 463   double non_young_end_time_sec = os::elapsedTime();
 464   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
 465 }
 466 
 467 #ifdef ASSERT
 468 void G1CollectionSet::verify_young_cset_indices() const {


 469   ResourceMark rm;
 470   uint* heap_region_indices = NEW_RESOURCE_ARRAY(uint, young_region_length());
 471   for (uint i = 0; i < young_region_length(); ++i) {
 472     heap_region_indices[i] = (uint)-1;
 473   }
 474 
 475   size_t length = _collection_set_regions.length();
 476   for (size_t i = 0; i < length; i++) {
 477     HeapRegion* hr = G1CollectedHeap::heap()->region_at(_collection_set_regions.get_by_index(i));
 478 
 479     const int idx = hr->young_index_in_cset();
 480     assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", hr->hrm_index());
 481     assert((uint)idx < young_region_length(), "Young cset index too large for region %u", hr->hrm_index());
 482 
 483     assert(heap_region_indices[idx] == (uint)-1,
 484            "Index %d used by multiple regions, first use by region %u, second by region %u",
 485            idx, heap_region_indices[idx], hr->hrm_index());
 486 
 487     heap_region_indices[idx] = hr->hrm_index();
 488   }
 489 }
 490 #endif


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1CollectedHeap.hpp"
  27 #include "gc/g1/g1CollectionSet.hpp"
  28 #include "gc/g1/g1CollectorState.hpp"

  29 #include "gc/g1/g1Policy.hpp"
  30 #include "gc/g1/heapRegion.inline.hpp"
  31 #include "gc/g1/heapRegionRemSet.hpp"
  32 #include "gc/g1/heapRegionSet.hpp"
  33 #include "logging/logStream.hpp"
  34 #include "utilities/debug.hpp"
  35 
  36 G1CollectorState* G1CollectionSet::collector_state() {
  37   return _g1->collector_state();
  38 }
  39 
  40 G1GCPhaseTimes* G1CollectionSet::phase_times() {
  41   return _policy->phase_times();
  42 }
  43 
  44 CollectionSetChooser* G1CollectionSet::cset_chooser() {
  45   return _cset_chooser;
  46 }
  47 
  48 double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) {
  49   return _policy->predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
  50 }
  51 
  52 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
  53   _g1(g1h),
  54   _policy(policy),
  55   _cset_chooser(new CollectionSetChooser()),
  56   _eden_region_length(0),
  57   _survivor_region_length(0),
  58   _old_region_length(0),

  59   _bytes_used_before(0),
  60   _recorded_rs_lengths(0),
  61   _collection_set_regions(NULL),
  62   _collection_set_cur_length(0),
  63   _collection_set_max_length(0),
  64   // Incremental CSet attributes
  65   _inc_build_state(Inactive),
  66   _inc_bytes_used_before(0),
  67   _inc_recorded_rs_lengths(0),
  68   _inc_recorded_rs_lengths_diffs(0),
  69   _inc_predicted_elapsed_time_ms(0.0),
  70   _inc_predicted_elapsed_time_ms_diffs(0.0) {
  71 }
  72 
  73 G1CollectionSet::~G1CollectionSet() {
  74   if (_collection_set_regions != NULL) {
  75     FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
  76   }
  77   delete _cset_chooser;
  78 }
  79 
  80 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
  81                                           uint survivor_cset_region_length) {
  82   assert_at_safepoint(true);
  83 
  84   _eden_region_length     = eden_cset_region_length;
  85   _survivor_region_length = survivor_cset_region_length;
  86 
  87   assert((size_t) young_region_length() == _collection_set_cur_length,
  88          "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
  89 
  90   _old_region_length      = 0;
  91 }
  92 
  93 void G1CollectionSet::set_max_length(uint max_region_length) {
  94   guarantee(_collection_set_regions == NULL, "Must only initialize once.");
  95   _collection_set_max_length = max_region_length;
  96   _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
  97 }
  98 
  99 void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
 100   _recorded_rs_lengths = rs_lengths;
 101 }
 102 
 103 // Add the heap region at the head of the non-incremental collection set
 104 void G1CollectionSet::add_old_region(HeapRegion* hr) {
 105   assert_at_safepoint(true);
 106 
 107   assert(_inc_build_state == Active, "Precondition");
 108   assert(hr->is_old(), "the region should be old");
 109 
 110   assert(!hr->in_collection_set(), "should not already be in the CSet");
 111   _g1->register_old_region_with_cset(hr);
 112 
 113   _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
 114   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
 115   
 116   _bytes_used_before += hr->used();
 117   size_t rs_length = hr->rem_set()->occupied();
 118   _recorded_rs_lengths += rs_length;
 119   _old_region_length += 1;
 120 }
 121 
 122 // Initialize the per-collection-set information
 123 void G1CollectionSet::start_incremental_building() {
 124   assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
 125   assert(_inc_build_state == Inactive, "Precondition");
 126 


 127   _inc_bytes_used_before = 0;
 128 
 129   _inc_recorded_rs_lengths = 0;
 130   _inc_recorded_rs_lengths_diffs = 0;
 131   _inc_predicted_elapsed_time_ms = 0.0;
 132   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 133   _inc_build_state = Active;
 134 }
 135 
 136 void G1CollectionSet::finalize_incremental_building() {
 137   assert(_inc_build_state == Active, "Precondition");
 138   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
 139 
 140   // The two "main" fields, _inc_recorded_rs_lengths and
 141   // _inc_predicted_elapsed_time_ms, are updated by the thread
 142   // that adds a new region to the CSet. Further updates by the
 143   // concurrent refinement thread that samples the young RSet lengths
 144   // are accumulated in the *_diffs fields. Here we add the diffs to
 145   // the "main" fields.
 146 


 148     _inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs;
 149   } else {
 150     // This is defensive. The diff should in theory be always positive
 151     // as RSets can only grow between GCs. However, given that we
 152     // sample their size concurrently with other threads updating them
 153     // it's possible that we might get the wrong size back, which
 154     // could make the calculations somewhat inaccurate.
 155     size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs);
 156     if (_inc_recorded_rs_lengths >= diffs) {
 157       _inc_recorded_rs_lengths -= diffs;
 158     } else {
 159       _inc_recorded_rs_lengths = 0;
 160     }
 161   }
 162   _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs;
 163 
 164   _inc_recorded_rs_lengths_diffs = 0;
 165   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 166 }
 167 
 168 void G1CollectionSet::clear() {
 169   assert_at_safepoint(true);
 170   _collection_set_cur_length = 0;
 171 }
 172 
 173 void G1CollectionSet::iterate(HeapRegionClosure* cl, bool may_be_aborted) {
 174   iterate_from(cl, 0, 1, may_be_aborted);
 175 }
 176 
 177 void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers, bool may_be_aborted) {
 178   size_t len = _collection_set_cur_length;
 179   OrderAccess::loadload();
 180   if (len == 0) {
 181     return;
 182   }
 183   size_t start_pos = (worker_id * len) / total_workers;
 184   size_t cur_pos = start_pos;
 185 
 186   do {
 187     HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions[cur_pos]);
 188     bool result = cl->doHeapRegion(r);
 189     guarantee(may_be_aborted || !result, "This iteration should not abort.");
 190     if (result) {
 191       cl->incomplete();
 192       return;
 193     }
 194     cur_pos++;
 195     if (cur_pos == len) {
 196       cur_pos = 0;
 197     }
 198   } while (cur_pos != start_pos);
 199 }
 200 
 201 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
 202                                                      size_t new_rs_length) {
 203   // Update the CSet information that is dependent on the new RS length
 204   assert(hr->is_young(), "Precondition");
 205   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
 206 
 207   // We could have updated _inc_recorded_rs_lengths and
 208   // _inc_predicted_elapsed_time_ms directly but we'd need to do
 209   // that atomically, as this code is executed by a concurrent
 210   // refinement thread, potentially concurrently with a mutator thread
 211   // allocating a new region and also updating the same fields. To


 213   // separate fields (*_diffs) and we'll just add them to the "main"
 214   // fields at the start of a GC.
 215 
 216   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
 217   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
 218   _inc_recorded_rs_lengths_diffs += rs_lengths_diff;
 219 
 220   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
 221   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 222   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
 223   _inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
 224 
 225   hr->set_recorded_rs_length(new_rs_length);
 226   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
 227 }
 228 
 229 void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
 230   assert(hr->is_young(), "invariant");
 231   assert(_inc_build_state == Active, "Precondition");
 232 
 233   size_t collection_set_length = _collection_set_cur_length;
 234   assert(collection_set_length <= INT_MAX, "Collection set is too large with %d entries", (int)collection_set_length);
 235   hr->set_young_index_in_cset((int)collection_set_length);
 236 
 237   _collection_set_regions[_collection_set_cur_length] = hr->hrm_index();
 238   // Concurrent readers must observe the store of the value in the array before an
 239   // update to the length field.
 240   OrderAccess::storestore();
 241   _collection_set_cur_length++;
 242   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
 243 
 244   // This routine is used when:
 245   // * adding survivor regions to the incremental cset at the end of an
 246   //   evacuation pause or
 247   // * adding the current allocation region to the incremental cset
 248   //   when it is retired.
 249   // Therefore this routine may be called at a safepoint by the
 250   // VM thread, or in-between safepoints by mutator threads (when
 251   // retiring the current allocation region)
 252   // We need to clear and set the cached recorded/cached collection set
 253   // information in the heap region here (before the region gets added
 254   // to the collection set). An individual heap region's cached values
 255   // are calculated, aggregated with the policy collection set info,
 256   // and cached in the heap region here (initially) and (subsequently)
 257   // by the Young List sampling code.
 258 
 259   size_t rs_length = hr->rem_set()->occupied();
 260   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 261 
 262   // Cache the values we have added to the aggregated information


 270   _inc_recorded_rs_lengths += rs_length;
 271   _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
 272   _inc_bytes_used_before += used_bytes;
 273 
 274   assert(!hr->in_collection_set(), "invariant");
 275   _g1->register_young_region_with_cset(hr);
 276 }
 277 
 278 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
 279   assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
 280   add_young_region_common(hr);
 281 }
 282 
 283 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
 284   assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
 285   add_young_region_common(hr);
 286 }
 287 
 288 #ifndef PRODUCT
 289 bool G1CollectionSet::verify_young_ages() {
 290   assert_at_safepoint(true);
 291 
 292   bool ret = true;
 293 
 294   size_t length = _collection_set_cur_length;
 295   for (size_t i = 0; i < length; i++) {
 296     HeapRegion* curr = G1CollectedHeap::heap()->region_at(_collection_set_regions[i]);
 297 
 298     guarantee(curr->is_young(), "Region must be young but is %s", curr->get_type_str());
 299 
 300     SurvRateGroup* group = curr->surv_rate_group();
 301 
 302     if (group == NULL) {
 303       log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
 304       ret = false;
 305     }
 306 
 307     if (curr->age_in_surv_rate_group() < 0) {
 308       log_error(gc, verify)("## encountered negative age in young region");
 309       ret = false;
 310     }
 311   }
 312 
 313   if (!ret) {
 314     LogStreamHandle(Error, gc, verify) log;
 315     print(&log);
 316   }


 472       // avoid generating output per region.
 473       log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
 474                                 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
 475                                 old_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
 476     }
 477 
 478     cset_chooser()->verify();
 479   }
 480 
 481   stop_incremental_building();
 482 
 483   log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
 484                             old_region_length(), predicted_old_time_ms, time_remaining_ms);
 485 
 486   double non_young_end_time_sec = os::elapsedTime();
 487   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
 488 }
 489 
 490 #ifdef ASSERT
 491 void G1CollectionSet::verify_young_cset_indices() const {
 492   assert_at_safepoint(true);
 493 
 494   ResourceMark rm;
 495   uint* heap_region_indices = NEW_RESOURCE_ARRAY(uint, young_region_length());
 496   for (uint i = 0; i < young_region_length(); ++i) {
 497     heap_region_indices[i] = (uint)-1;
 498   }
 499 
 500   size_t length = _collection_set_cur_length;
 501   for (size_t i = 0; i < length; i++) {
 502     HeapRegion* hr = G1CollectedHeap::heap()->region_at(_collection_set_regions[i]);
 503 
 504     const int idx = hr->young_index_in_cset();
 505     assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", hr->hrm_index());
 506     assert((uint)idx < young_region_length(), "Young cset index too large for region %u", hr->hrm_index());
 507 
 508     assert(heap_region_indices[idx] == (uint)-1,
 509            "Index %d used by multiple regions, first use by region %u, second by region %u",
 510            idx, heap_region_indices[idx], hr->hrm_index());
 511 
 512     heap_region_indices[idx] = hr->hrm_index();
 513   }
 514 }
 515 #endif
< prev index next >