1 /*
   2  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1CollectedHeap.inline.hpp"
  27 #include "gc/g1/g1CollectionSet.hpp"
  28 #include "gc/g1/g1CollectionSetCandidates.hpp"
  29 #include "gc/g1/g1CollectorState.hpp"
  30 #include "gc/g1/g1HotCardCache.hpp"
  31 #include "gc/g1/g1ParScanThreadState.hpp"
  32 #include "gc/g1/g1Policy.hpp"
  33 #include "gc/g1/heapRegion.inline.hpp"
  34 #include "gc/g1/heapRegionRemSet.hpp"
  35 #include "gc/g1/heapRegionSet.hpp"
  36 #include "logging/logStream.hpp"
  37 #include "utilities/debug.hpp"
  38 #include "utilities/globalDefinitions.hpp"
  39 #include "utilities/quickSort.hpp"
  40 
  41 G1CollectorState* G1CollectionSet::collector_state() {
  42   return _g1h->collector_state();
  43 }
  44 
  45 G1GCPhaseTimes* G1CollectionSet::phase_times() {
  46   return _policy->phase_times();
  47 }
  48 
  49 double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) {
  50   return _policy->predict_region_elapsed_time_ms(hr, collector_state()->in_young_only_phase());
  51 }
  52 
  53 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
  54   _g1h(g1h),
  55   _policy(policy),
  56   _candidates(NULL),
  57   _eden_region_length(0),
  58   _survivor_region_length(0),
  59   _old_region_length(0),
  60   _collection_set_regions(NULL),
  61   _collection_set_cur_length(0),
  62   _collection_set_max_length(0),
  63   _num_optional_regions(0),
  64   _bytes_used_before(0),
  65   _recorded_rs_length(0),
  66   _inc_build_state(Inactive),
  67   _inc_part_start(0),
  68   _inc_bytes_used_before(0),
  69   _inc_recorded_rs_length(0),
  70   _inc_recorded_rs_length_diff(0),
  71   _inc_predicted_elapsed_time_ms(0.0),
  72   _inc_predicted_elapsed_time_ms_diff(0.0) {
  73 }
  74 
  75 G1CollectionSet::~G1CollectionSet() {
  76   FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
  77   free_optional_regions();
  78   clear_candidates();
  79 }
  80 
  81 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
  82                                           uint survivor_cset_region_length) {
  83   assert_at_safepoint_on_vm_thread();
  84 
  85   _eden_region_length     = eden_cset_region_length;
  86   _survivor_region_length = survivor_cset_region_length;
  87 
  88   assert((size_t) young_region_length() == _collection_set_cur_length,
  89          "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
  90 
  91   _old_region_length = 0;
  92   free_optional_regions();
  93 }
  94 
  95 void G1CollectionSet::initialize(uint max_region_length) {
  96   guarantee(_collection_set_regions == NULL, "Must only initialize once.");
  97   _collection_set_max_length = max_region_length;
  98   _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
  99 }
 100 
 101 void G1CollectionSet::free_optional_regions() {
 102   _num_optional_regions = 0;
 103 }
 104 
 105 void G1CollectionSet::clear_candidates() {
 106   delete _candidates;
 107   _candidates = NULL;
 108 }
 109 
 110 void G1CollectionSet::set_recorded_rs_length(size_t rs_length) {
 111   _recorded_rs_length = rs_length;
 112 }
 113 
 114 // Add the heap region at the head of the non-incremental collection set
 115 void G1CollectionSet::add_old_region(HeapRegion* hr) {
 116   assert_at_safepoint_on_vm_thread();
 117 
 118   assert(_inc_build_state == Active,
 119          "Precondition, actively building cset or adding optional later on");
 120   assert(hr->is_old(), "the region should be old");
 121 
 122   assert(!hr->in_collection_set(), "should not already be in the collection set");
 123   _g1h->register_old_region_with_region_attr(hr);
 124 
 125   _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
 126   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
 127 
 128   _bytes_used_before += hr->used();
 129   _recorded_rs_length += hr->rem_set()->occupied();
 130   _old_region_length++;
 131 
 132   _g1h->old_set_remove(hr);
 133 }
 134 
 135 void G1CollectionSet::add_optional_region(HeapRegion* hr) {
 136   assert(hr->is_old(), "the region should be old");
 137   assert(!hr->in_collection_set(), "should not already be in the CSet");
 138 
 139   _g1h->register_optional_region_with_region_attr(hr);
 140 
 141   hr->set_index_in_opt_cset(_num_optional_regions++);
 142 }
 143 
 144 void G1CollectionSet::start_incremental_building() {
 145   assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
 146   assert(_inc_build_state == Inactive, "Precondition");
 147 
 148   _inc_bytes_used_before = 0;
 149 
 150   _inc_recorded_rs_length = 0;
 151   _inc_recorded_rs_length_diff = 0;
 152   _inc_predicted_elapsed_time_ms = 0.0;
 153   _inc_predicted_elapsed_time_ms_diff = 0.0;
 154 
 155   update_incremental_marker();
 156 }
 157 
 158 void G1CollectionSet::finalize_incremental_building() {
 159   assert(_inc_build_state == Active, "Precondition");
 160   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
 161 
 162   // The two "main" fields, _inc_recorded_rs_length and
 163   // _inc_predicted_elapsed_time_ms, are updated by the thread
 164   // that adds a new region to the CSet. Further updates by the
 165   // concurrent refinement thread that samples the young RSet lengths
 166   // are accumulated in the *_diff fields. Here we add the diffs to
 167   // the "main" fields.
 168 
 169   _inc_recorded_rs_length += _inc_recorded_rs_length_diff;
 170   _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diff;
 171 
 172   _inc_recorded_rs_length_diff = 0;
 173   _inc_predicted_elapsed_time_ms_diff = 0.0;
 174 }
 175 
 176 void G1CollectionSet::clear() {
 177   assert_at_safepoint_on_vm_thread();
 178   _collection_set_cur_length = 0;
 179 }
 180 
 181 void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
 182   size_t len = _collection_set_cur_length;
 183   OrderAccess::loadload();
 184 
 185   for (uint i = 0; i < len; i++) {
 186     HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
 187     bool result = cl->do_heap_region(r);
 188     if (result) {
 189       cl->set_incomplete();
 190       return;
 191     }
 192   }
 193 }
 194 
 195 void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
 196   assert_at_safepoint();
 197 
 198   for (uint i = 0; i < _num_optional_regions; i++) {
 199     HeapRegion* r = _candidates->at(i);
 200     bool result = cl->do_heap_region(r);
 201     guarantee(!result, "Must not cancel iteration");
 202   }
 203 }
 204 
 205 void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
 206                                                     HeapRegionClaimer* hr_claimer,
 207                                                     uint worker_id,
 208                                                     uint total_workers) const {
 209   assert_at_safepoint();
 210 
 211   size_t len = increment_length();
 212   if (len == 0) {
 213     return;
 214   }
 215 
 216   size_t start_pos = (worker_id * len) / total_workers;
 217   size_t cur_pos = start_pos;
 218 
 219   do {
 220     uint region_idx = _collection_set_regions[cur_pos + _inc_part_start];
 221     if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
 222       HeapRegion* r = _g1h->region_at(region_idx);
 223       bool result = cl->do_heap_region(r);
 224       guarantee(!result, "Must not cancel iteration");
 225     }
 226 
 227     cur_pos++;
 228     if (cur_pos == len) {
 229       cur_pos = 0;
 230     }
 231   } while (cur_pos != start_pos);
 232 }
 233 
 234 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
 235                                                      size_t new_rs_length) {
 236   // Update the CSet information that is dependent on the new RS length
 237   assert(hr->is_young(), "Precondition");
 238   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
 239 
 240   // We could have updated _inc_recorded_rs_length and
 241   // _inc_predicted_elapsed_time_ms directly but we'd need to do
 242   // that atomically, as this code is executed by a concurrent
 243   // refinement thread, potentially concurrently with a mutator thread
 244   // allocating a new region and also updating the same fields. To
 245   // avoid the atomic operations we accumulate these updates on two
 246   // separate fields (*_diff) and we'll just add them to the "main"
 247   // fields at the start of a GC.
 248 
 249   size_t old_rs_length = hr->recorded_rs_length();
 250   assert(old_rs_length <= new_rs_length,
 251          "Remembered set sizes must increase (changed from " SIZE_FORMAT " to " SIZE_FORMAT " region %u type %s)",
 252          old_rs_length, new_rs_length, hr->hrm_index(), hr->get_short_type_str());
 253   size_t rs_length_diff = new_rs_length - old_rs_length;
 254   _inc_recorded_rs_length_diff += rs_length_diff;
 255 
 256   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
 257   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 258   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
 259   _inc_predicted_elapsed_time_ms_diff += elapsed_ms_diff;
 260 
 261   hr->set_recorded_rs_length(new_rs_length);
 262   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
 263 }
 264 
 265 void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
 266   assert(hr->is_young(), "invariant");
 267   assert(_inc_build_state == Active, "Precondition");
 268 
 269   // This routine is used when:
 270   // * adding survivor regions to the incremental cset at the end of an
 271   //   evacuation pause or
 272   // * adding the current allocation region to the incremental cset
 273   //   when it is retired.
 274   // Therefore this routine may be called at a safepoint by the
 275   // VM thread, or in-between safepoints by mutator threads (when
 276   // retiring the current allocation region)
 277   // We need to clear and set the cached recorded/cached collection set
 278   // information in the heap region here (before the region gets added
 279   // to the collection set). An individual heap region's cached values
 280   // are calculated, aggregated with the policy collection set info,
 281   // and cached in the heap region here (initially) and (subsequently)
 282   // by the Young List sampling code.
 283   // Ignore calls to this due to retirement during full gc.
 284 
 285   if (!_g1h->collector_state()->in_full_gc()) {
 286     size_t rs_length = hr->rem_set()->occupied();
 287     double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 288 
 289     // Cache the values we have added to the aggregated information
 290     // in the heap region in case we have to remove this region from
 291     // the incremental collection set, or it is updated by the
 292     // rset sampling code
 293     hr->set_recorded_rs_length(rs_length);
 294     hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
 295 
 296     _inc_recorded_rs_length += rs_length;
 297     _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
 298     _inc_bytes_used_before += hr->used();
 299   }
 300 
 301   assert(!hr->in_collection_set(), "invariant");
 302   _g1h->register_young_region_with_region_attr(hr);
 303 
 304   size_t collection_set_length = _collection_set_cur_length;
 305   // We use UINT_MAX as "invalid" marker in verification.
 306   assert(collection_set_length < (UINT_MAX - 1),
 307          "Collection set is too large with " SIZE_FORMAT " entries", collection_set_length);
 308   hr->set_young_index_in_cset((uint)collection_set_length + 1);
 309 
 310   _collection_set_regions[collection_set_length] = hr->hrm_index();
 311   // Concurrent readers must observe the store of the value in the array before an
 312   // update to the length field.
 313   OrderAccess::storestore();
 314   _collection_set_cur_length++;
 315   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
 316 }
 317 
 318 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
 319   assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
 320   add_young_region_common(hr);
 321 }
 322 
 323 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
 324   assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
 325   add_young_region_common(hr);
 326 }
 327 
 328 #ifndef PRODUCT
 329 class G1VerifyYoungAgesClosure : public HeapRegionClosure {
 330 public:
 331   bool _valid;
 332 public:
 333   G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
 334 
 335   virtual bool do_heap_region(HeapRegion* r) {
 336     guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
 337 
 338     SurvRateGroup* group = r->surv_rate_group();
 339 
 340     if (group == NULL) {
 341       log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
 342       _valid = false;
 343     }
 344 
 345     if (r->age_in_surv_rate_group() < 0) {
 346       log_error(gc, verify)("## encountered negative age in young region");
 347       _valid = false;
 348     }
 349 
 350     return false;
 351   }
 352 
 353   bool valid() const { return _valid; }
 354 };
 355 
 356 bool G1CollectionSet::verify_young_ages() {
 357   assert_at_safepoint_on_vm_thread();
 358 
 359   G1VerifyYoungAgesClosure cl;
 360   iterate(&cl);
 361 
 362   if (!cl.valid()) {
 363     LogStreamHandle(Error, gc, verify) log;
 364     print(&log);
 365   }
 366 
 367   return cl.valid();
 368 }
 369 
 370 class G1PrintCollectionSetDetailClosure : public HeapRegionClosure {
 371   outputStream* _st;
 372 public:
 373   G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
 374 
 375   virtual bool do_heap_region(HeapRegion* r) {
 376     assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
 377     _st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
 378                   HR_FORMAT_PARAMS(r),
 379                   p2i(r->prev_top_at_mark_start()),
 380                   p2i(r->next_top_at_mark_start()),
 381                   r->age_in_surv_rate_group_cond());
 382     return false;
 383   }
 384 };
 385 
 386 void G1CollectionSet::print(outputStream* st) {
 387   st->print_cr("\nCollection_set:");
 388 
 389   G1PrintCollectionSetDetailClosure cl(st);
 390   iterate(&cl);
 391 }
 392 #endif // !PRODUCT
 393 
 394 double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors) {
 395   double young_start_time_sec = os::elapsedTime();
 396 
 397   finalize_incremental_building();
 398 
 399   guarantee(target_pause_time_ms > 0.0,
 400             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
 401 
 402   size_t pending_cards = _policy->pending_cards_at_gc_start() + _g1h->hot_card_cache()->num_entries();
 403   double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
 404   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
 405 
 406   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
 407                             pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
 408 
 409   // The young list is laid with the survivor regions from the previous
 410   // pause are appended to the RHS of the young list, i.e.
 411   //   [Newly Young Regions ++ Survivors from last pause].
 412 
 413   uint survivor_region_length = survivors->length();
 414   uint eden_region_length = _g1h->eden_regions_count();
 415   init_region_lengths(eden_region_length, survivor_region_length);
 416 
 417   verify_young_cset_indices();
 418 
 419   // Clear the fields that point to the survivor list - they are all young now.
 420   survivors->convert_to_eden();
 421 
 422   _bytes_used_before = _inc_bytes_used_before;
 423   time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0);
 424 
 425   log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
 426                             eden_region_length, survivor_region_length, _inc_predicted_elapsed_time_ms, target_pause_time_ms);
 427 
 428   // The number of recorded young regions is the incremental
 429   // collection set's current size
 430   set_recorded_rs_length(_inc_recorded_rs_length);
 431 
 432   double young_end_time_sec = os::elapsedTime();
 433   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
 434 
 435   return time_remaining_ms;
 436 }
 437 
 438 static int compare_region_idx(const uint a, const uint b) {
 439   if (a > b) {
 440     return 1;
 441   } else if (a == b) {
 442     return 0;
 443   } else {
 444     return -1;
 445   }
 446 }
 447 
 448 void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
 449   double non_young_start_time_sec = os::elapsedTime();
 450 
 451   if (collector_state()->in_mixed_phase()) {
 452     candidates()->verify();
 453 
 454     uint num_initial_old_regions;
 455     uint num_optional_old_regions;
 456 
 457     _policy->calculate_old_collection_set_regions(candidates(),
 458                                                   time_remaining_ms,
 459                                                   num_initial_old_regions,
 460                                                   num_optional_old_regions);
 461 
 462     // Prepare initial old regions.
 463     move_candidates_to_collection_set(num_initial_old_regions);
 464 
 465     // Prepare optional old regions for evacuation.
 466     uint candidate_idx = candidates()->cur_idx();
 467     for (uint i = 0; i < num_optional_old_regions; i++) {
 468       add_optional_region(candidates()->at(candidate_idx + i));
 469     }
 470 
 471     candidates()->verify();
 472   }
 473 
 474   stop_incremental_building();
 475 
 476   double non_young_end_time_sec = os::elapsedTime();
 477   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
 478 
 479   QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
 480 }
 481 
 482 void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_regions) {
 483   if (num_old_candidate_regions == 0) {
 484     return;
 485   }
 486   uint candidate_idx = candidates()->cur_idx();
 487   for (uint i = 0; i < num_old_candidate_regions; i++) {
 488     HeapRegion* r = candidates()->at(candidate_idx + i);
 489     // This potentially optional candidate region is going to be an actual collection
 490     // set region. Clear cset marker.
 491     _g1h->clear_region_attr(r);
 492     add_old_region(r);
 493   }
 494   candidates()->remove(num_old_candidate_regions);
 495 
 496   candidates()->verify();
 497 }
 498 
 499 void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
 500   double time_remaining_ms = finalize_young_part(target_pause_time_ms, survivor);
 501   finalize_old_part(time_remaining_ms);
 502 }
 503 
 504 bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) {
 505   update_incremental_marker();
 506 
 507   uint num_selected_regions;
 508   _policy->calculate_optional_collection_set_regions(candidates(),
 509                                                      _num_optional_regions,
 510                                                      remaining_pause_time,
 511                                                      num_selected_regions);
 512 
 513   move_candidates_to_collection_set(num_selected_regions);
 514 
 515   _num_optional_regions -= num_selected_regions;
 516 
 517   stop_incremental_building();
 518 
 519   _g1h->verify_region_attr_remset_update();
 520 
 521   return num_selected_regions > 0;
 522 }
 523 
 524 void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) {
 525   for (uint i = 0; i < _num_optional_regions; i++) {
 526     HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
 527     pss->record_unused_optional_region(r);
 528     // Clear collection set marker and make sure that the remembered set information
 529     // is correct as we still need it later.
 530     _g1h->clear_region_attr(r);
 531     _g1h->register_region_with_region_attr(r);
 532     r->clear_index_in_opt_cset();
 533   }
 534   free_optional_regions();
 535 
 536   _g1h->verify_region_attr_remset_update();
 537 }
 538 
 539 #ifdef ASSERT
 540 class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
 541 private:
 542   size_t _young_length;
 543   uint* _heap_region_indices;
 544 public:
 545   G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
 546     _heap_region_indices = NEW_C_HEAP_ARRAY(uint, young_length + 1, mtGC);
 547     for (size_t i = 0; i < young_length + 1; i++) {
 548       _heap_region_indices[i] = UINT_MAX;
 549     }
 550   }
 551   ~G1VerifyYoungCSetIndicesClosure() {
 552     FREE_C_HEAP_ARRAY(int, _heap_region_indices);
 553   }
 554 
 555   virtual bool do_heap_region(HeapRegion* r) {
 556     const uint idx = r->young_index_in_cset();
 557 
 558     assert(idx > 0, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
 559     assert(idx <= _young_length, "Young cset index %u too large for region %u", idx, r->hrm_index());
 560 
 561     assert(_heap_region_indices[idx] == UINT_MAX,
 562            "Index %d used by multiple regions, first use by region %u, second by region %u",
 563            idx, _heap_region_indices[idx], r->hrm_index());
 564 
 565     _heap_region_indices[idx] = r->hrm_index();
 566 
 567     return false;
 568   }
 569 };
 570 
 571 void G1CollectionSet::verify_young_cset_indices() const {
 572   assert_at_safepoint_on_vm_thread();
 573 
 574   G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
 575   iterate(&cl);
 576 }
 577 #endif