1 /*
   2  * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1CollectedHeap.hpp"
  27 #include "gc/g1/g1CollectionSet.hpp"
  28 #include "gc/g1/g1CollectorPolicy.hpp"
  29 #include "gc/g1/g1CollectorState.hpp"
  30 #include "gc/g1/heapRegion.inline.hpp"
  31 #include "gc/g1/heapRegionRemSet.hpp"
  32 #include "gc/g1/heapRegionSet.hpp"
  33 #include "utilities/debug.hpp"
  34 
  35 G1CollectorState* G1CollectionSet::collector_state() {
  36   return _g1->collector_state();
  37 }
  38 
  39 G1GCPhaseTimes* G1CollectionSet::phase_times() {
  40   return _policy->phase_times();
  41 }
  42 
  43 CollectionSetChooser* G1CollectionSet::cset_chooser() {
  44   return _cset_chooser;
  45 }
  46 
  47 double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) {
  48   return _policy->predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
  49 }
  50 
  51 
  52 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h) :
  53   _g1(g1h),
  54   _policy(NULL),
  55   _cset_chooser(new CollectionSetChooser()),
  56   _eden_region_length(0),
  57   _survivor_region_length(0),
  58   _old_region_length(0),
  59 
  60   _head(NULL),
  61   _bytes_used_before(0),
  62   _bytes_live_before(0),
  63   _recorded_rs_lengths(0),
  64   // Incremental CSet attributes
  65   _inc_build_state(Inactive),
  66   _inc_head(NULL),
  67   _inc_tail(NULL),
  68   _inc_bytes_used_before(0),
  69   _inc_bytes_live_before(0),
  70   _inc_recorded_rs_lengths(0),
  71   _inc_recorded_rs_lengths_diffs(0),
  72   _inc_predicted_elapsed_time_ms(0.0),
  73   _inc_predicted_elapsed_time_ms_diffs(0.0) {}
  74 
  75 G1CollectionSet::~G1CollectionSet() {
  76   delete _cset_chooser;
  77 }
  78 
  79 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
  80                                           uint survivor_cset_region_length) {
  81   _eden_region_length     = eden_cset_region_length;
  82   _survivor_region_length = survivor_cset_region_length;
  83   _old_region_length      = 0;
  84 }
  85 
  86 void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
  87   _recorded_rs_lengths = rs_lengths;
  88 }
  89 
  90 // Add the heap region at the head of the non-incremental collection set
  91 void G1CollectionSet::add_old_region(HeapRegion* hr) {
  92   assert(_inc_build_state == Active, "Precondition");
  93   assert(hr->is_old(), "the region should be old");
  94 
  95   assert(!hr->in_collection_set(), "should not already be in the CSet");
  96   _g1->register_old_region_with_cset(hr);
  97   hr->set_next_in_collection_set(_head);
  98   _head = hr;
  99   _bytes_used_before += hr->used();
 100   _bytes_live_before += hr->live_bytes();
 101   size_t rs_length = hr->rem_set()->occupied();
 102   _recorded_rs_lengths += rs_length;
 103   _old_region_length += 1;
 104 }
 105 
 106 // Initialize the per-collection-set information
 107 void G1CollectionSet::start_incremental_building() {
 108   assert(_inc_build_state == Inactive, "Precondition");
 109 
 110   _inc_head = NULL;
 111   _inc_tail = NULL;
 112   _inc_bytes_used_before = 0;
 113   _inc_bytes_live_before = 0;
 114 
 115   _inc_recorded_rs_lengths = 0;
 116   _inc_recorded_rs_lengths_diffs = 0;
 117   _inc_predicted_elapsed_time_ms = 0.0;
 118   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 119   _inc_build_state = Active;
 120 }
 121 
 122 void G1CollectionSet::finalize_incremental_building() {
 123   assert(_inc_build_state == Active, "Precondition");
 124   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
 125 
 126   // The two "main" fields, _inc_recorded_rs_lengths and
 127   // _inc_predicted_elapsed_time_ms, are updated by the thread
 128   // that adds a new region to the CSet. Further updates by the
 129   // concurrent refinement thread that samples the young RSet lengths
 130   // are accumulated in the *_diffs fields. Here we add the diffs to
 131   // the "main" fields.
 132 
 133   if (_inc_recorded_rs_lengths_diffs >= 0) {
 134     _inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs;
 135   } else {
 136     // This is defensive. The diff should in theory be always positive
 137     // as RSets can only grow between GCs. However, given that we
 138     // sample their size concurrently with other threads updating them
 139     // it's possible that we might get the wrong size back, which
 140     // could make the calculations somewhat inaccurate.
 141     size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs);
 142     if (_inc_recorded_rs_lengths >= diffs) {
 143       _inc_recorded_rs_lengths -= diffs;
 144     } else {
 145       _inc_recorded_rs_lengths = 0;
 146     }
 147   }
 148   _inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs;
 149 
 150   _inc_recorded_rs_lengths_diffs = 0;
 151   _inc_predicted_elapsed_time_ms_diffs = 0.0;
 152 }
 153 
 154 void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
 155                                                      size_t new_rs_length) {
 156   // Update the CSet information that is dependent on the new RS length
 157   assert(hr->is_young(), "Precondition");
 158   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
 159 
 160   // We could have updated _inc_recorded_rs_lengths and
 161   // _inc_predicted_elapsed_time_ms directly but we'd need to do
 162   // that atomically, as this code is executed by a concurrent
 163   // refinement thread, potentially concurrently with a mutator thread
 164   // allocating a new region and also updating the same fields. To
 165   // avoid the atomic operations we accumulate these updates on two
 166   // separate fields (*_diffs) and we'll just add them to the "main"
 167   // fields at the start of a GC.
 168 
 169   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
 170   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
 171   _inc_recorded_rs_lengths_diffs += rs_lengths_diff;
 172 
 173   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
 174   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 175   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
 176   _inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
 177 
 178   hr->set_recorded_rs_length(new_rs_length);
 179   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
 180 }
 181 
 182 void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
 183   assert(hr->is_young(), "invariant");
 184   assert(hr->young_index_in_cset() > -1, "should have already been set");
 185   assert(_inc_build_state == Active, "Precondition");
 186 
 187   // This routine is used when:
 188   // * adding survivor regions to the incremental cset at the end of an
 189   //   evacuation pause or
 190   // * adding the current allocation region to the incremental cset
 191   //   when it is retired.
 192   // Therefore this routine may be called at a safepoint by the
 193   // VM thread, or in-between safepoints by mutator threads (when
 194   // retiring the current allocation region)
 195   // We need to clear and set the cached recorded/cached collection set
 196   // information in the heap region here (before the region gets added
 197   // to the collection set). An individual heap region's cached values
 198   // are calculated, aggregated with the policy collection set info,
 199   // and cached in the heap region here (initially) and (subsequently)
 200   // by the Young List sampling code.
 201 
 202   size_t rs_length = hr->rem_set()->occupied();
 203   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 204 
 205   // Cache the values we have added to the aggregated information
 206   // in the heap region in case we have to remove this region from
 207   // the incremental collection set, or it is updated by the
 208   // rset sampling code
 209   hr->set_recorded_rs_length(rs_length);
 210   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
 211 
 212   size_t used_bytes = hr->used();
 213   _inc_recorded_rs_lengths += rs_length;
 214   _inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
 215   _inc_bytes_used_before += used_bytes;
 216   _inc_bytes_live_before += hr->live_bytes();
 217 
 218   assert(!hr->in_collection_set(), "invariant");
 219   _g1->register_young_region_with_cset(hr);
 220   assert(hr->next_in_collection_set() == NULL, "invariant");
 221 }
 222 
 223 // Add the region at the RHS of the incremental cset
 224 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
 225   // We should only ever be appending survivors at the end of a pause
 226   assert(hr->is_survivor(), "Logic");
 227 
 228   // Do the 'common' stuff
 229   add_young_region_common(hr);
 230 
 231   // Now add the region at the right hand side
 232   if (_inc_tail == NULL) {
 233     assert(_inc_head == NULL, "invariant");
 234     _inc_head = hr;
 235   } else {
 236     _inc_tail->set_next_in_collection_set(hr);
 237   }
 238   _inc_tail = hr;
 239 }
 240 
 241 // Add the region to the LHS of the incremental cset
 242 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
 243   // Survivors should be added to the RHS at the end of a pause
 244   assert(hr->is_eden(), "Logic");
 245 
 246   // Do the 'common' stuff
 247   add_young_region_common(hr);
 248 
 249   // Add the region at the left hand side
 250   hr->set_next_in_collection_set(_inc_head);
 251   if (_inc_head == NULL) {
 252     assert(_inc_tail == NULL, "Invariant");
 253     _inc_tail = hr;
 254   }
 255   _inc_head = hr;
 256 }
 257 
 258 #ifndef PRODUCT
 259 void G1CollectionSet::print(HeapRegion* list_head, outputStream* st) {
 260   assert(list_head == inc_head() || list_head == head(), "must be");
 261 
 262   st->print_cr("\nCollection_set:");
 263   HeapRegion* csr = list_head;
 264   while (csr != NULL) {
 265     HeapRegion* next = csr->next_in_collection_set();
 266     assert(csr->in_collection_set(), "bad CS");
 267     st->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
 268                  HR_FORMAT_PARAMS(csr),
 269                  p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
 270                  csr->age_in_surv_rate_group_cond());
 271     csr = next;
 272   }
 273 }
 274 #endif // !PRODUCT
 275 
 276 double G1CollectionSet::finalize_young_part(double target_pause_time_ms) {
 277   double young_start_time_sec = os::elapsedTime();
 278 
 279   YoungList* young_list = _g1->young_list();
 280   finalize_incremental_building();
 281 
 282   guarantee(target_pause_time_ms > 0.0,
 283             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
 284   guarantee(_head == NULL, "Precondition");
 285 
 286   size_t pending_cards = _policy->pending_cards();
 287   double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
 288   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
 289 
 290   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
 291                             pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
 292 
 293   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
 294 
 295   // The young list is laid with the survivor regions from the previous
 296   // pause are appended to the RHS of the young list, i.e.
 297   //   [Newly Young Regions ++ Survivors from last pause].
 298 
 299   uint survivor_region_length = young_list->survivor_length();
 300   uint eden_region_length = young_list->eden_length();
 301   init_region_lengths(eden_region_length, survivor_region_length);
 302 
 303   HeapRegion* hr = young_list->first_survivor_region();
 304   while (hr != NULL) {
 305     assert(hr->is_survivor(), "badly formed young list");
 306     // There is a convention that all the young regions in the CSet
 307     // are tagged as "eden", so we do this for the survivors here. We
 308     // use the special set_eden_pre_gc() as it doesn't check that the
 309     // region is free (which is not the case here).
 310     hr->set_eden_pre_gc();
 311     hr = hr->get_next_young_region();
 312   }
 313 
 314   // Clear the fields that point to the survivor list - they are all young now.
 315   young_list->clear_survivors();
 316 
 317   _head = _inc_head;
 318   _bytes_used_before = _inc_bytes_used_before;
 319   _bytes_live_before = _inc_bytes_live_before;
 320   time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0);
 321 
 322   log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
 323                             eden_region_length, survivor_region_length, _inc_predicted_elapsed_time_ms, target_pause_time_ms);
 324 
 325   // The number of recorded young regions is the incremental
 326   // collection set's current size
 327   set_recorded_rs_lengths(_inc_recorded_rs_lengths);
 328 
 329   double young_end_time_sec = os::elapsedTime();
 330   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
 331 
 332   return time_remaining_ms;
 333 }
 334 
 335 void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
 336   double non_young_start_time_sec = os::elapsedTime();
 337   double predicted_old_time_ms = 0.0;
 338 
 339   if (!collector_state()->gcs_are_young()) {
 340     cset_chooser()->verify();
 341     const uint min_old_cset_length = _policy->calc_min_old_cset_length();
 342     const uint max_old_cset_length = _policy->calc_max_old_cset_length();
 343     const size_t estimated_available_bytes = _policy->available_bytes_estimate();
 344 
 345     uint expensive_region_num = 0;
 346     bool check_time_remaining = _policy->adaptive_young_list_length();
 347 
 348     HeapRegion* hr = cset_chooser()->peek();
 349     while (hr != NULL) {
 350       if (old_region_length() >= max_old_cset_length) {
 351         // Added maximum number of old regions to the CSet.
 352         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
 353                                   old_region_length(), max_old_cset_length);
 354         break;
 355       }
 356 
 357       // Stop adding regions if the remaining reclaimable space is
 358       // not above G1HeapWastePercent.
 359       size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
 360       double reclaimable_perc = _policy->reclaimable_bytes_perc(reclaimable_bytes);
 361       double threshold = (double) G1HeapWastePercent;
 362       if (reclaimable_perc <= threshold) {
 363         // We've added enough old regions that the amount of uncollected
 364         // reclaimable space is at or below the waste threshold. Stop
 365         // adding old regions to the CSet.
 366         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
 367                                   "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
 368                                   old_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
 369         break;
 370       }
 371 
 372       // Stop adding regions if the live bytes (according to the last marking)
 373       // added so far would exceed the estimated free bytes.
 374       if ((_bytes_live_before + hr->live_bytes()) > estimated_available_bytes) {
 375         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reached estimated free space limit)");
 376         break;
 377       }
 378 
 379       double predicted_time_ms = predict_region_elapsed_time_ms(hr);
 380       if (check_time_remaining) {
 381         if (predicted_time_ms > time_remaining_ms) {
 382           // Too expensive for the current CSet.
 383 
 384           if (old_region_length() >= min_old_cset_length) {
 385             // We have added the minimum number of old regions to the CSet,
 386             // we are done with this CSet.
 387             log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
 388                                       "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
 389                                       predicted_time_ms, time_remaining_ms, old_region_length(), min_old_cset_length);
 390             break;
 391           }
 392 
 393           // We'll add it anyway given that we haven't reached the
 394           // minimum number of old regions.
 395           expensive_region_num += 1;
 396         }
 397       } else {
 398         if (old_region_length() >= min_old_cset_length) {
 399           // In the non-auto-tuning case, we'll finish adding regions
 400           // to the CSet if we reach the minimum.
 401 
 402           log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
 403                                     old_region_length(), min_old_cset_length);
 404           break;
 405         }
 406       }
 407 
 408       // We will add this region to the CSet.
 409       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
 410       predicted_old_time_ms += predicted_time_ms;
 411       cset_chooser()->pop(); // already have region via peek()
 412       _g1->old_set_remove(hr);
 413       add_old_region(hr);
 414 
 415       hr = cset_chooser()->peek();
 416     }
 417     if (hr == NULL) {
 418       log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
 419     }
 420 
 421     if (expensive_region_num > 0) {
 422       // We print the information once here at the end, predicated on
 423       // whether we added any apparently expensive regions or not, to
 424       // avoid generating output per region.
 425       log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
 426                                 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
 427                                 old_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
 428     }
 429 
 430     cset_chooser()->verify();
 431   }
 432 
 433   stop_incremental_building();
 434 
 435   log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
 436                             old_region_length(), predicted_old_time_ms, time_remaining_ms);
 437 
 438   double non_young_end_time_sec = os::elapsedTime();
 439   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
 440 }