1 /*
   2  * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1CollectedHeap.hpp"
  27 #include "gc/g1/g1CollectionSet.hpp"
  28 #include "gc/g1/g1CollectorState.hpp"
  29 #include "gc/g1/g1Policy.hpp"
  30 #include "gc/g1/heapRegion.inline.hpp"
  31 #include "gc/g1/heapRegionRemSet.hpp"
  32 #include "gc/g1/heapRegionSet.hpp"
  33 #include "logging/logStream.hpp"
  34 #include "utilities/debug.hpp"
  35 #include "utilities/quickSort.hpp"
  36 
  37 G1CollectorState* G1CollectionSet::collector_state() {
  38   return _g1h->collector_state();
  39 }
  40 
  41 G1GCPhaseTimes* G1CollectionSet::phase_times() {
  42   return _policy->phase_times();
  43 }
  44 
  45 CollectionSetChooser* G1CollectionSet::cset_chooser() {
  46   return _cset_chooser;
  47 }
  48 
  49 double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) {
  50   return _policy->predict_region_elapsed_time_ms(hr, collector_state()->in_young_only_phase());
  51 }
  52 
  53 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
  54   _g1h(g1h),
  55   _policy(policy),
  56   _cset_chooser(new CollectionSetChooser()),
  57   _eden_region_length(0),
  58   _survivor_region_length(0),
  59   _old_region_length(0),
  60   _collection_set_regions(NULL),
  61   _collection_set_cur_length(0),
  62   _collection_set_max_length(0),
  63   _bytes_used_before(0),
  64   _recorded_rs_lengths(0),
  65   _inc_build_state(Inactive),
  66   _inc_bytes_used_before(0),
  67   _inc_recorded_rs_lengths(0),
  68   _inc_recorded_rs_lengths_diffs(0),
  69   _inc_predicted_elapsed_time_ms(0.0),
  70   _inc_predicted_elapsed_time_ms_diffs(0.0) {
  71 }
  72 
  73 G1CollectionSet::~G1CollectionSet() {
  74   if (_collection_set_regions != NULL) {
  75     FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
  76   }
  77   delete _cset_chooser;
  78 }
  79 
  80 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
  81                                           uint survivor_cset_region_length) {
  82   assert_at_safepoint_on_vm_thread();
  83 
  84   _eden_region_length     = eden_cset_region_length;
  85   _survivor_region_length = survivor_cset_region_length;
  86 
  87   assert((size_t) young_region_length() == _collection_set_cur_length,
  88          "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
  89 
  90   _old_region_length      = 0;
  91 }
  92 
  93 void G1CollectionSet::initialize(uint max_region_length) {
  94   guarantee(_collection_set_regions == NULL, "Must only initialize once.");
  95   _collection_set_max_length = max_region_length;
  96   _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
  97 }
  98 
  99 void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
 100   _recorded_rs_lengths = rs_lengths;
 101 }
 102 
 103 // Add the heap region at the head of the non-incremental collection set
 104 void G1CollectionSet::add_old_region(HeapRegion* hr) {
 105   assert_at_safepoint_on_vm_thread();
 106 
 107   assert(_inc_build_state == Active, "Precondition");
 108   assert(hr->is_old(), "the region should be old");
 109 
 110   assert(!hr->in_collection_set(), "should not already be in the CSet");
 111   _g1h->register_old_region_with_cset(hr);
 112 
 113   _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
 114   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
 115 
 116   _bytes_used_before += hr->used();
 117   size_t rs_length = hr->rem_set()->occupied();
 118   _recorded_rs_lengths += rs_length;
 119   _old_region_length += 1;
 120 }
 121 
 122 // Initialize the per-collection-set information
 123 void G1CollectionSet::start_incremental_building() {
 124   assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
 125   assert(_inc_build_state == Inactive, "Precondition");
 126 
 127   _inc_bytes_used_before = 0;
 128 
 129   _inc_recorded_rs_lengths = 0;
 130   _inc_recorded_rs_lengths_diffs = 0;
 131   _inc_predicted_elapsed_time_ms = 0.0;
 132   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 133   _inc_build_state = Active;
 134 }
 135 
 136 void G1CollectionSet::finalize_incremental_building() {
 137   assert(_inc_build_state == Active, "Precondition");
 138   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
 139 
 140   // The two "main" fields, _inc_recorded_rs_lengths and
 141   // _inc_predicted_elapsed_time_ms, are updated by the thread
 142   // that adds a new region to the CSet. Further updates by the
 143   // concurrent refinement thread that samples the young RSet lengths
 144   // are accumulated in the *_diffs fields. Here we add the diffs to
 145   // the "main" fields.
 146 
 147   if (_inc_recorded_rs_lengths_diffs >= 0) {
 148     _inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs;
 149   } else {
 150     // This is defensive. The diff should in theory be always positive
 151     // as RSets can only grow between GCs. However, given that we
 152     // sample their size concurrently with other threads updating them
 153     // it's possible that we might get the wrong size back, which
 154     // could make the calculations somewhat inaccurate.
 155     size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs);
 156     if (_inc_recorded_rs_lengths >= diffs) {
 157       _inc_recorded_rs_lengths -= diffs;
 158     } else {
 159       _inc_recorded_rs_lengths = 0;
 160     }
 161   }
 162   _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs;
 163 
 164   _inc_recorded_rs_lengths_diffs = 0;
 165   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 166 }
 167 
 168 void G1CollectionSet::clear() {
 169   assert_at_safepoint_on_vm_thread();
 170   _collection_set_cur_length = 0;
 171 }
 172 
 173 void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
 174   iterate_from(cl, 0, 1);
 175 }
 176 
 177 void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
 178   size_t len = _collection_set_cur_length;
 179   OrderAccess::loadload();
 180   if (len == 0) {
 181     return;
 182   }
 183   size_t start_pos = (worker_id * len) / total_workers;
 184   size_t cur_pos = start_pos;
 185 
 186   do {
 187     HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos]);
 188     bool result = cl->do_heap_region(r);
 189     if (result) {
 190       cl->set_incomplete();
 191       return;
 192     }
 193     cur_pos++;
 194     if (cur_pos == len) {
 195       cur_pos = 0;
 196     }
 197   } while (cur_pos != start_pos);
 198 }
 199 
 200 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
 201                                                      size_t new_rs_length) {
 202   // Update the CSet information that is dependent on the new RS length
 203   assert(hr->is_young(), "Precondition");
 204   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
 205 
 206   // We could have updated _inc_recorded_rs_lengths and
 207   // _inc_predicted_elapsed_time_ms directly but we'd need to do
 208   // that atomically, as this code is executed by a concurrent
 209   // refinement thread, potentially concurrently with a mutator thread
 210   // allocating a new region and also updating the same fields. To
 211   // avoid the atomic operations we accumulate these updates on two
 212   // separate fields (*_diffs) and we'll just add them to the "main"
 213   // fields at the start of a GC.
 214 
 215   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
 216   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
 217   _inc_recorded_rs_lengths_diffs += rs_lengths_diff;
 218 
 219   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
 220   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 221   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
 222   _inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
 223 
 224   hr->set_recorded_rs_length(new_rs_length);
 225   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
 226 }
 227 
 228 void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
 229   assert(hr->is_young(), "invariant");
 230   assert(_inc_build_state == Active, "Precondition");
 231 
 232   size_t collection_set_length = _collection_set_cur_length;
 233   assert(collection_set_length <= INT_MAX, "Collection set is too large with %d entries", (int)collection_set_length);
 234   hr->set_young_index_in_cset((int)collection_set_length);
 235 
 236   _collection_set_regions[collection_set_length] = hr->hrm_index();
 237   // Concurrent readers must observe the store of the value in the array before an
 238   // update to the length field.
 239   OrderAccess::storestore();
 240   _collection_set_cur_length++;
 241   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
 242 
 243   // This routine is used when:
 244   // * adding survivor regions to the incremental cset at the end of an
 245   //   evacuation pause or
 246   // * adding the current allocation region to the incremental cset
 247   //   when it is retired.
 248   // Therefore this routine may be called at a safepoint by the
 249   // VM thread, or in-between safepoints by mutator threads (when
 250   // retiring the current allocation region)
 251   // We need to clear and set the cached recorded/cached collection set
 252   // information in the heap region here (before the region gets added
 253   // to the collection set). An individual heap region's cached values
 254   // are calculated, aggregated with the policy collection set info,
 255   // and cached in the heap region here (initially) and (subsequently)
 256   // by the Young List sampling code.
 257   // Ignore calls to this due to retirement during full gc.
 258 
 259   if (!_g1h->collector_state()->in_full_gc()) {
 260     size_t rs_length = hr->rem_set()->occupied();
 261     double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 262 
 263     // Cache the values we have added to the aggregated information
 264     // in the heap region in case we have to remove this region from
 265     // the incremental collection set, or it is updated by the
 266     // rset sampling code
 267     hr->set_recorded_rs_length(rs_length);
 268     hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
 269 
 270     _inc_recorded_rs_lengths += rs_length;
 271     _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
 272     _inc_bytes_used_before += hr->used();
 273   }
 274 
 275   assert(!hr->in_collection_set(), "invariant");
 276   _g1h->register_young_region_with_cset(hr);
 277 }
 278 
 279 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
 280   assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
 281   add_young_region_common(hr);
 282 }
 283 
 284 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
 285   assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
 286   add_young_region_common(hr);
 287 }
 288 
 289 #ifndef PRODUCT
 290 class G1VerifyYoungAgesClosure : public HeapRegionClosure {
 291 public:
 292   bool _valid;
 293 public:
 294   G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
 295 
 296   virtual bool do_heap_region(HeapRegion* r) {
 297     guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
 298 
 299     SurvRateGroup* group = r->surv_rate_group();
 300 
 301     if (group == NULL) {
 302       log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
 303       _valid = false;
 304     }
 305 
 306     if (r->age_in_surv_rate_group() < 0) {
 307       log_error(gc, verify)("## encountered negative age in young region");
 308       _valid = false;
 309     }
 310 
 311     return false;
 312   }
 313 
 314   bool valid() const { return _valid; }
 315 };
 316 
 317 bool G1CollectionSet::verify_young_ages() {
 318   assert_at_safepoint_on_vm_thread();
 319 
 320   G1VerifyYoungAgesClosure cl;
 321   iterate(&cl);
 322 
 323   if (!cl.valid()) {
 324     LogStreamHandle(Error, gc, verify) log;
 325     print(&log);
 326   }
 327 
 328   return cl.valid();
 329 }
 330 
 331 class G1PrintCollectionSetDetailClosure : public HeapRegionClosure {
 332   outputStream* _st;
 333 public:
 334   G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
 335 
 336   virtual bool do_heap_region(HeapRegion* r) {
 337     assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
 338     _st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
 339                   HR_FORMAT_PARAMS(r),
 340                   p2i(r->prev_top_at_mark_start()),
 341                   p2i(r->next_top_at_mark_start()),
 342                   r->age_in_surv_rate_group_cond());
 343     return false;
 344   }
 345 };
 346 
 347 void G1CollectionSet::print(outputStream* st) {
 348   st->print_cr("\nCollection_set:");
 349 
 350   G1PrintCollectionSetDetailClosure cl(st);
 351   iterate(&cl);
 352 }
 353 #endif // !PRODUCT
 354 
 355 double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors) {
 356   double young_start_time_sec = os::elapsedTime();
 357 
 358   finalize_incremental_building();
 359 
 360   guarantee(target_pause_time_ms > 0.0,
 361             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
 362 
 363   size_t pending_cards = _policy->pending_cards();
 364   double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
 365   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
 366 
 367   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
 368                             pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
 369 
 370   // The young list is laid with the survivor regions from the previous
 371   // pause are appended to the RHS of the young list, i.e.
 372   //   [Newly Young Regions ++ Survivors from last pause].
 373 
 374   uint survivor_region_length = survivors->length();
 375   uint eden_region_length = _g1h->eden_regions_count();
 376   init_region_lengths(eden_region_length, survivor_region_length);
 377 
 378   verify_young_cset_indices();
 379 
 380   // Clear the fields that point to the survivor list - they are all young now.
 381   survivors->convert_to_eden();
 382 
 383   _bytes_used_before = _inc_bytes_used_before;
 384   time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0);
 385 
 386   log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
 387                             eden_region_length, survivor_region_length, _inc_predicted_elapsed_time_ms, target_pause_time_ms);
 388 
 389   // The number of recorded young regions is the incremental
 390   // collection set's current size
 391   set_recorded_rs_lengths(_inc_recorded_rs_lengths);
 392 
 393   double young_end_time_sec = os::elapsedTime();
 394   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
 395 
 396   return time_remaining_ms;
 397 }
 398 
 399 static int compare_region_idx(const uint a, const uint b) {
 400   if (a > b) {
 401     return 1;
 402   } else if (a == b) {
 403     return 0;
 404   } else {
 405     return -1;
 406   }
 407 }
 408 
 409 void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
 410   double non_young_start_time_sec = os::elapsedTime();
 411   double predicted_old_time_ms = 0.0;
 412 
 413   if (collector_state()->in_mixed_phase()) {
 414     cset_chooser()->verify();
 415     const uint min_old_cset_length = _policy->calc_min_old_cset_length();
 416     const uint max_old_cset_length = _policy->calc_max_old_cset_length();
 417 
 418     uint expensive_region_num = 0;
 419     bool check_time_remaining = _policy->adaptive_young_list_length();
 420 
 421     HeapRegion* hr = cset_chooser()->peek();
 422     while (hr != NULL) {
 423       if (old_region_length() >= max_old_cset_length) {
 424         // Added maximum number of old regions to the CSet.
 425         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
 426                                   old_region_length(), max_old_cset_length);
 427         break;
 428       }
 429 
 430       // Stop adding regions if the remaining reclaimable space is
 431       // not above G1HeapWastePercent.
 432       size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
 433       double reclaimable_percent = _policy->reclaimable_bytes_percent(reclaimable_bytes);
 434       double threshold = (double) G1HeapWastePercent;
 435       if (reclaimable_percent <= threshold) {
 436         // We've added enough old regions that the amount of uncollected
 437         // reclaimable space is at or below the waste threshold. Stop
 438         // adding old regions to the CSet.
 439         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
 440                                   "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
 441                                   old_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
 442         break;
 443       }
 444 
 445       double predicted_time_ms = predict_region_elapsed_time_ms(hr);
 446       if (check_time_remaining) {
 447         if (predicted_time_ms > time_remaining_ms) {
 448           // Too expensive for the current CSet.
 449 
 450           if (old_region_length() >= min_old_cset_length) {
 451             // We have added the minimum number of old regions to the CSet,
 452             // we are done with this CSet.
 453             log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
 454                                       "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
 455                                       predicted_time_ms, time_remaining_ms, old_region_length(), min_old_cset_length);
 456             break;
 457           }
 458 
 459           // We'll add it anyway given that we haven't reached the
 460           // minimum number of old regions.
 461           expensive_region_num += 1;
 462         }
 463       } else {
 464         if (old_region_length() >= min_old_cset_length) {
 465           // In the non-auto-tuning case, we'll finish adding regions
 466           // to the CSet if we reach the minimum.
 467 
 468           log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
 469                                     old_region_length(), min_old_cset_length);
 470           break;
 471         }
 472       }
 473 
 474       // We will add this region to the CSet.
 475       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
 476       predicted_old_time_ms += predicted_time_ms;
 477       cset_chooser()->pop(); // already have region via peek()
 478       _g1h->old_set_remove(hr);
 479       add_old_region(hr);
 480 
 481       hr = cset_chooser()->peek();
 482     }
 483     if (hr == NULL) {
 484       log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
 485     }
 486 
 487     if (expensive_region_num > 0) {
 488       // We print the information once here at the end, predicated on
 489       // whether we added any apparently expensive regions or not, to
 490       // avoid generating output per region.
 491       log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
 492                                 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
 493                                 old_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
 494     }
 495 
 496     cset_chooser()->verify();
 497   }
 498 
 499   stop_incremental_building();
 500 
 501   log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
 502                             old_region_length(), predicted_old_time_ms, time_remaining_ms);
 503 
 504   double non_young_end_time_sec = os::elapsedTime();
 505   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
 506 
 507   QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
 508 }
 509 
 510 #ifdef ASSERT
 511 class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
 512 private:
 513   size_t _young_length;
 514   int* _heap_region_indices;
 515 public:
 516   G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
 517     _heap_region_indices = NEW_C_HEAP_ARRAY(int, young_length, mtGC);
 518     for (size_t i = 0; i < young_length; i++) {
 519       _heap_region_indices[i] = -1;
 520     }
 521   }
 522   ~G1VerifyYoungCSetIndicesClosure() {
 523     FREE_C_HEAP_ARRAY(int, _heap_region_indices);
 524   }
 525 
 526   virtual bool do_heap_region(HeapRegion* r) {
 527     const int idx = r->young_index_in_cset();
 528 
 529     assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
 530     assert((size_t)idx < _young_length, "Young cset index too large for region %u", r->hrm_index());
 531 
 532     assert(_heap_region_indices[idx] == -1,
 533            "Index %d used by multiple regions, first use by region %u, second by region %u",
 534            idx, _heap_region_indices[idx], r->hrm_index());
 535 
 536     _heap_region_indices[idx] = r->hrm_index();
 537 
 538     return false;
 539   }
 540 };
 541 
 542 void G1CollectionSet::verify_young_cset_indices() const {
 543   assert_at_safepoint_on_vm_thread();
 544 
 545   G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
 546   iterate(&cl);
 547 }
 548 #endif