< prev index next >

src/hotspot/share/gc/g1/g1Policy.cpp

Print this page
rev 49511 : imported patch 8200234-g1concurrentmark-refactorings
rev 49522 : [mq]: 8035557-pause-time-predictions
rev 49523 : imported patch 8035557-pause-time-predictions-cleanup
rev 49525 : [mq]: 8200426-sangheon-review


  45 #include "utilities/debug.hpp"
  46 #include "utilities/growableArray.hpp"
  47 #include "utilities/pair.hpp"
  48 
  49 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  50   _predictor(G1ConfidencePercent / 100.0),
  51   _analytics(new G1Analytics(&_predictor)),
  52   _remset_tracker(),
  53   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  54   _ihop_control(create_ihop_control(&_predictor)),
  55   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  56   _young_list_fixed_length(0),
  57   _short_lived_surv_rate_group(new SurvRateGroup()),
  58   _survivor_surv_rate_group(new SurvRateGroup()),
  59   _reserve_factor((double) G1ReservePercent / 100.0),
  60   _reserve_regions(0),
  61   _rs_lengths_prediction(0),
  62   _bytes_allocated_in_old_since_last_gc(0),
  63   _initial_mark_to_mixed(),
  64   _collection_set(NULL),
  65   _g1(NULL),
  66   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
  67   _tenuring_threshold(MaxTenuringThreshold),
  68   _max_survivor_regions(0),
  69   _survivors_age_table(true),
  70   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) {
  71 }
  72 
  73 G1Policy::~G1Policy() {
  74   delete _ihop_control;
  75 }
  76 
  77 G1CollectorState* G1Policy::collector_state() const { return _g1->collector_state(); }
  78 
  79 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
  80   _g1 = g1h;
  81   _collection_set = collection_set;
  82 
  83   assert(Heap_lock->owned_by_self(), "Locking discipline.");
  84 
  85   if (!adaptive_young_list_length()) {
  86     _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
  87   }
  88   _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
  89 
  90   _free_regions_at_end_of_collection = _g1->num_free_regions();
  91 
  92   update_young_list_max_and_target_length();
  93   // We may immediately start allocating regions and placing them on the
  94   // collection set list. Initialize the per-collection set info
  95   _collection_set->start_incremental_building();
  96 }
  97 
  98 void G1Policy::note_gc_start() {
  99   phase_times()->note_gc_start();
 100 }
 101 
 102 class G1YoungLengthPredictor {
 103   const bool _during_cm;
 104   const double _base_time_ms;
 105   const double _base_free_regions;
 106   const double _target_pause_time_ms;
 107   const G1Policy* const _policy;
 108 
 109  public:
 110   G1YoungLengthPredictor(bool during_cm,


 201 }
 202 
 203 uint G1Policy::update_young_list_max_and_target_length(size_t rs_lengths) {
 204   uint unbounded_target_length = update_young_list_target_length(rs_lengths);
 205   update_max_gc_locker_expansion();
 206   return unbounded_target_length;
 207 }
 208 
 209 uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
 210   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
 211   _young_list_target_length = young_lengths.first;
 212   return young_lengths.second;
 213 }
 214 
 215 G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengths) const {
 216   YoungTargetLengths result;
 217 
 218   // Calculate the absolute and desired min bounds first.
 219 
 220   // This is how many young regions we already have (currently: the survivors).
 221   const uint base_min_length = _g1->survivor_regions_count();
 222   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 223   // This is the absolute minimum young length. Ensure that we
 224   // will at least have one eden region available for allocation.
 225   uint absolute_min_length = base_min_length + MAX2(_g1->eden_regions_count(), (uint)1);
 226   // If we shrank the young list target it should not shrink below the current size.
 227   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 228   // Calculate the absolute and desired max bounds.
 229 
 230   uint desired_max_length = calculate_young_list_desired_max_length();
 231 
 232   uint young_list_target_length = 0;
 233   if (adaptive_young_list_length()) {
 234     if (collector_state()->in_young_only_phase()) {
 235       young_list_target_length =
 236                         calculate_young_list_target_length(rs_lengths,
 237                                                            base_min_length,
 238                                                            desired_min_length,
 239                                                            desired_max_length);
 240     } else {
 241       // Don't calculate anything and let the code below bound it to
 242       // the desired_min_length, i.e., do the next GC as soon as
 243       // possible to maximize how many old regions we can add to it.
 244     }
 245   } else {


 364       // These are the post-conditions of the binary search above:
 365       assert(min_young_length < max_young_length,
 366              "otherwise we should have discovered that max_young_length "
 367              "fits into the pause target and not done the binary search");
 368       assert(p.will_fit(min_young_length),
 369              "min_young_length, the result of the binary search, should "
 370              "fit into the pause target");
 371       assert(!p.will_fit(min_young_length + 1),
 372              "min_young_length, the result of the binary search, should be "
 373              "optimal, so no larger length should fit into the pause target");
 374     }
 375   } else {
 376     // Even the minimum length doesn't fit into the pause time
 377     // target, return it as the result nevertheless.
 378   }
 379   return base_min_length + min_young_length;
 380 }
 381 
 382 double G1Policy::predict_survivor_regions_evac_time() const {
 383   double survivor_regions_evac_time = 0.0;
 384   const GrowableArray<HeapRegion*>* survivor_regions = _g1->survivor()->regions();
 385 
 386   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
 387        it != survivor_regions->end();
 388        ++it) {
 389     survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->in_young_only_phase());
 390   }
 391   return survivor_regions_evac_time;
 392 }
 393 
 394 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
 395   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 396 
 397   if (rs_lengths > _rs_lengths_prediction) {
 398     // add 10% to avoid having to recalculate often
 399     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 400     update_rs_lengths_prediction(rs_lengths_prediction);
 401 
 402     update_young_list_max_and_target_length(rs_lengths_prediction);
 403   }
 404 }


 427   double end_sec = os::elapsedTime();
 428   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 429   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 430 
 431   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 432 
 433   collector_state()->set_in_full_gc(false);
 434 
 435   // "Nuke" the heuristics that control the young/mixed GC
 436   // transitions and make sure we start with young GCs after the Full GC.
 437   collector_state()->set_in_young_only_phase(true);
 438   collector_state()->set_in_young_gc_before_mixed(false);
 439   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 440   collector_state()->set_in_initial_mark_gc(false);
 441   collector_state()->set_mark_or_rebuild_in_progress(false);
 442   collector_state()->set_clearing_next_bitmap(false);
 443 
 444   _short_lived_surv_rate_group->start_adding_regions();
 445   // also call this on any additional surv rate groups
 446 
 447   _free_regions_at_end_of_collection = _g1->num_free_regions();
 448   // Reset survivors SurvRateGroup.
 449   _survivor_surv_rate_group->reset();
 450   update_young_list_max_and_target_length();
 451   update_rs_lengths_prediction();
 452 
 453   _bytes_allocated_in_old_since_last_gc = 0;
 454 
 455   record_pause(FullGC, _full_collection_start_sec, end_sec);
 456 }
 457 
 458 void G1Policy::record_collection_pause_start(double start_time_sec) {
 459   // We only need to do this here as the policy will only be applied
 460   // to the GC we're about to start. so, no point is calculating this
 461   // every time we calculate / recalculate the target young length.
 462   update_survivors_policy();
 463 
 464   assert(_g1->used() == _g1->recalculate_used(),
 465          "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
 466          _g1->used(), _g1->recalculate_used());
 467 
 468   phase_times()->record_cur_collection_start_sec(start_time_sec);
 469   _pending_cards = _g1->pending_card_num();
 470 
 471   _collection_set->reset_bytes_used_before();
 472   _bytes_copied_during_gc = 0;
 473 
 474   // do that for any other surv rate groups
 475   _short_lived_surv_rate_group->stop_adding_regions();
 476   _survivors_age_table.clear();
 477 
 478   assert(_g1->collection_set()->verify_young_ages(), "region age verification failed");
 479 }
 480 
 481 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
 482   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 483   collector_state()->set_in_initial_mark_gc(false);
 484 }
 485 
 486 void G1Policy::record_concurrent_mark_remark_start() {
 487   _mark_remark_start_sec = os::elapsedTime();
 488 }
 489 
 490 void G1Policy::record_concurrent_mark_remark_end() {
 491   double end_time_sec = os::elapsedTime();
 492   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 493   _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
 494   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
 495 
 496   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 497 }
 498 


 510 }
 511 
 512 double G1Policy::non_young_other_time_ms() const {
 513   return phase_times()->non_young_cset_choice_time_ms() +
 514          phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
 515 }
 516 
 517 double G1Policy::other_time_ms(double pause_time_ms) const {
 518   return pause_time_ms - phase_times()->cur_collection_par_time_ms();
 519 }
 520 
 521 double G1Policy::constant_other_time_ms(double pause_time_ms) const {
 522   return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
 523 }
 524 
 525 CollectionSetChooser* G1Policy::cset_chooser() const {
 526   return _collection_set->cset_chooser();
 527 }
 528 
 529 bool G1Policy::about_to_start_mixed_phase() const {
 530   return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
 531 }
 532 
 533 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 534   if (about_to_start_mixed_phase()) {
 535     return false;
 536   }
 537 
 538   size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
 539 
 540   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 541   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 542   size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
 543 
 544   bool result = false;
 545   if (marking_request_bytes > marking_initiating_used_threshold) {
 546     result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
 547     log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
 548                               result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
 549                               cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
 550   }
 551 
 552   return result;
 553 }
 554 
 555 // Anything below that is considered to be zero
 556 #define MIN_TIMER_GRANULARITY 0.0000001
 557 
 558 void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
 559   double end_time_sec = os::elapsedTime();
 560 
 561   size_t cur_used_bytes = _g1->used();
 562   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 563   bool this_pause_included_initial_mark = false;
 564   bool this_pause_was_young_only = collector_state()->in_young_only_phase();
 565 
 566   bool update_stats = !_g1->evacuation_failed();
 567 
 568   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 569 
 570   _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 571 
 572   this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
 573   if (this_pause_included_initial_mark) {
 574     record_concurrent_mark_init_end(0.0);
 575   } else {
 576     maybe_start_marking();
 577   }
 578 
 579   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
 580   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 581     // This usually happens due to the timer not having the required
 582     // granularity. Some Linuxes are the usual culprits.
 583     // We'll just set it to something (arbitrarily) small.
 584     app_time_ms = 1.0;
 585   }
 586 


 686                                                             _collection_set->old_region_length());
 687     }
 688 
 689     _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
 690 
 691     // Do not update RS lengths with information from mixed gc: this one is wildly different
 692     // to during young only gc and messes up young gen sizing right after the mixed gc phase.
 693     // During mixed gc we do not use it anyway for young gen sizing.
 694     if (this_pause_was_young_only) {
 695       _analytics->report_pending_cards((double) _pending_cards);
 696       _analytics->report_rs_lengths((double) _max_rs_lengths);
 697     }
 698   }
 699 
 700   assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
 701          "If the last pause has been an initial mark, we should not have been in the marking window");
 702   if (this_pause_included_initial_mark) {
 703     collector_state()->set_mark_or_rebuild_in_progress(true);
 704   }
 705 
 706   _free_regions_at_end_of_collection = _g1->num_free_regions();
 707   // IHOP control wants to know the expected young gen length if it were not
 708   // restrained by the heap reserve. Using the actual length would make the
 709   // prediction too small and the limit the young gen every time we get to the
 710   // predicted target occupancy.
 711   size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
 712   update_rs_lengths_prediction();
 713 
 714   update_ihop_prediction(app_time_ms / 1000.0,
 715                          _bytes_allocated_in_old_since_last_gc,
 716                          last_unrestrained_young_length * HeapRegion::GrainBytes,
 717                          this_pause_was_young_only);
 718   _bytes_allocated_in_old_since_last_gc = 0;
 719 
 720   _ihop_control->send_trace_event(_g1->gc_tracer_stw());
 721 
 722   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 723   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 724 
 725   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
 726     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 727                                 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
 728                                 update_rs_time_goal_ms, scan_hcc_time_ms);
 729 
 730     update_rs_time_goal_ms = 0;
 731   } else {
 732     update_rs_time_goal_ms -= scan_hcc_time_ms;
 733   }
 734   _g1->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
 735                                    phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
 736                                    update_rs_time_goal_ms);
 737 
 738   cset_chooser()->verify();
 739 }
 740 
 741 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
 742   if (G1UseAdaptiveIHOP) {
 743     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
 744                                      predictor,
 745                                      G1ReservePercent,
 746                                      G1HeapWastePercent);
 747   } else {
 748     return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
 749   }
 750 }
 751 
 752 void G1Policy::update_ihop_prediction(double mutator_time_s,
 753                                       size_t mutator_alloc_bytes,
 754                                       size_t young_gen_size,


 843   // Predicting the number of cards is based on which type of GC
 844   // we're predicting for.
 845   size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
 846   size_t bytes_to_copy = predict_bytes_to_copy(hr);
 847 
 848   double region_elapsed_time_ms =
 849     _analytics->predict_rs_scan_time_ms(card_num, collector_state()->in_young_only_phase()) +
 850     _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
 851 
 852   // The prediction of the "other" time for this region is based
 853   // upon the region type and NOT the GC type.
 854   if (hr->is_young()) {
 855     region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
 856   } else {
 857     region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
 858   }
 859   return region_elapsed_time_ms;
 860 }
 861 
 862 bool G1Policy::should_allocate_mutator_region() const {
 863   uint young_list_length = _g1->young_regions_count();
 864   uint young_list_target_length = _young_list_target_length;
 865   return young_list_length < young_list_target_length;
 866 }
 867 
 868 bool G1Policy::can_expand_young_list() const {
 869   uint young_list_length = _g1->young_regions_count();
 870   uint young_list_max_length = _young_list_max_length;
 871   return young_list_length < young_list_max_length;
 872 }
 873 
 874 bool G1Policy::adaptive_young_list_length() const {
 875   return _young_gen_sizer.adaptive_young_list_length();
 876 }
 877 
 878 size_t G1Policy::desired_survivor_size() const {
 879   size_t const survivor_capacity = HeapRegion::GrainWords * _max_survivor_regions;
 880   return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
 881 }
 882 
 883 void G1Policy::print_age_table() {
 884   _survivors_age_table.print_age_table(_tenuring_threshold);
 885 }
 886 
 887 void G1Policy::update_max_gc_locker_expansion() {
 888   uint expansion_region_num = 0;
 889   if (GCLockerEdenExpansionPercent > 0) {


 901 
 902 // Calculates survivor space parameters.
 903 void G1Policy::update_survivors_policy() {
 904   double max_survivor_regions_d =
 905                  (double) _young_list_target_length / (double) SurvivorRatio;
 906   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
 907   // smaller than 1.0) we'll get 1.
 908   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
 909 
 910   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(desired_survivor_size());
 911   if (UsePerfData) {
 912     _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
 913     _policy_counters->desired_survivor_size()->set_value(desired_survivor_size() * oopSize);
 914   }
 915 }
 916 
 917 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
 918   // We actually check whether we are marking here and not if we are in a
 919   // reclamation phase. This means that we will schedule a concurrent mark
 920   // even while we are still in the process of reclaiming memory.
 921   bool during_cycle = _g1->concurrent_mark()->cm_thread()->during_cycle();
 922   if (!during_cycle) {
 923     log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
 924     collector_state()->set_initiate_conc_mark_if_possible(true);
 925     return true;
 926   } else {
 927     log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
 928     return false;
 929   }
 930 }
 931 
 932 void G1Policy::initiate_conc_mark() {
 933   collector_state()->set_in_initial_mark_gc(true);
 934   collector_state()->set_initiate_conc_mark_if_possible(false);
 935 }
 936 
 937 void G1Policy::decide_on_conc_mark_initiation() {
 938   // We are about to decide on whether this pause will be an
 939   // initial-mark pause.
 940 
 941   // First, collector_state()->in_initial_mark_gc() should not be already set. We
 942   // will set it here if we have to. However, it should be cleared by
 943   // the end of the pause (it's only set for the duration of an
 944   // initial-mark pause).
 945   assert(!collector_state()->in_initial_mark_gc(), "pre-condition");
 946 
 947   if (collector_state()->initiate_conc_mark_if_possible()) {
 948     // We had noticed on a previous pause that the heap occupancy has
 949     // gone over the initiating threshold and we should start a
 950     // concurrent marking cycle. So we might initiate one.
 951 
 952     if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
 953       // Initiate a new initial mark if there is no marking or reclamation going on.
 954       initiate_conc_mark();
 955       log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
 956     } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
 957       // Initiate a user requested initial mark. An initial mark must be young only
 958       // GC, so the collector state must be updated to reflect this.
 959       collector_state()->set_in_young_only_phase(true);
 960       collector_state()->set_in_young_gc_before_mixed(false);
 961 
 962       // We might have ended up coming here about to start a mixed phase with a collection set
 963       // active. The following remark might change the change the "evacuation efficiency" of
 964       // the regions in this set, leading to failing asserts later.
 965       // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
 966       clear_collection_set_candidates();
 967       abort_time_to_mixed_tracking();
 968       initiate_conc_mark();
 969       log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
 970     } else {
 971       // The concurrent marking thread is still finishing up the
 972       // previous cycle. If we start one right now the two cycles
 973       // overlap. In particular, the concurrent marking thread might
 974       // be in the process of clearing the next marking bitmap (which
 975       // we will use for the next cycle if we start one). Starting a
 976       // cycle now will be bad given that parts of the marking
 977       // information might get cleared by the marking thread. And we
 978       // cannot wait for the marking thread to finish the cycle as it
 979       // periodically yields while clearing the next marking bitmap
 980       // and, if it's in a yield point, it's waiting for us to
 981       // finish. So, at this point we will not start a cycle and we'll
 982       // let the concurrent marking thread complete the last one.
 983       log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
 984     }
 985   }
 986 }
 987 
 988 void G1Policy::record_concurrent_mark_cleanup_end() {
 989   cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
 990 
 991   bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
 992   if (!mixed_gc_pending) {
 993     clear_collection_set_candidates();
 994     abort_time_to_mixed_tracking();
 995   }
 996   collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
 997   collector_state()->set_mark_or_rebuild_in_progress(false);
 998 
 999   double end_sec = os::elapsedTime();
1000   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1001   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1002   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1003 
1004   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1005 }
1006 
1007 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1008   return percent_of(reclaimable_bytes, _g1->capacity());
1009 }
1010 
1011 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
1012   virtual bool do_heap_region(HeapRegion* r) {
1013     r->rem_set()->clear_locked(true /* only_cardset */);
1014     return false;
1015   }
1016 };
1017 
1018 void G1Policy::clear_collection_set_candidates() {
1019   // Clear remembered sets of remaining candidate regions and the actual candidate
1020   // list.
1021   G1ClearCollectionSetCandidateRemSets cl;
1022   cset_chooser()->iterate(&cl);
1023   cset_chooser()->clear();
1024 }
1025 
1026 void G1Policy::maybe_start_marking() {
1027   if (need_to_start_conc_mark("end of GC")) {
1028     // Note: this might have already been set, if during the last




  45 #include "utilities/debug.hpp"
  46 #include "utilities/growableArray.hpp"
  47 #include "utilities/pair.hpp"
  48 
  49 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  50   _predictor(G1ConfidencePercent / 100.0),
  51   _analytics(new G1Analytics(&_predictor)),
  52   _remset_tracker(),
  53   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  54   _ihop_control(create_ihop_control(&_predictor)),
  55   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  56   _young_list_fixed_length(0),
  57   _short_lived_surv_rate_group(new SurvRateGroup()),
  58   _survivor_surv_rate_group(new SurvRateGroup()),
  59   _reserve_factor((double) G1ReservePercent / 100.0),
  60   _reserve_regions(0),
  61   _rs_lengths_prediction(0),
  62   _bytes_allocated_in_old_since_last_gc(0),
  63   _initial_mark_to_mixed(),
  64   _collection_set(NULL),
  65   _g1h(NULL),
  66   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
  67   _tenuring_threshold(MaxTenuringThreshold),
  68   _max_survivor_regions(0),
  69   _survivors_age_table(true),
  70   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) {
  71 }
  72 
  73 G1Policy::~G1Policy() {
  74   delete _ihop_control;
  75 }
  76 
  77 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
  78 
  79 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
  80   _g1h = g1h;
  81   _collection_set = collection_set;
  82 
  83   assert(Heap_lock->owned_by_self(), "Locking discipline.");
  84 
  85   if (!adaptive_young_list_length()) {
  86     _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
  87   }
  88   _young_gen_sizer.adjust_max_new_size(_g1h->max_regions());
  89 
  90   _free_regions_at_end_of_collection = _g1h->num_free_regions();
  91 
  92   update_young_list_max_and_target_length();
  93   // We may immediately start allocating regions and placing them on the
  94   // collection set list. Initialize the per-collection set info
  95   _collection_set->start_incremental_building();
  96 }
  97 
  98 void G1Policy::note_gc_start() {
  99   phase_times()->note_gc_start();
 100 }
 101 
 102 class G1YoungLengthPredictor {
 103   const bool _during_cm;
 104   const double _base_time_ms;
 105   const double _base_free_regions;
 106   const double _target_pause_time_ms;
 107   const G1Policy* const _policy;
 108 
 109  public:
 110   G1YoungLengthPredictor(bool during_cm,


 201 }
 202 
 203 uint G1Policy::update_young_list_max_and_target_length(size_t rs_lengths) {
 204   uint unbounded_target_length = update_young_list_target_length(rs_lengths);
 205   update_max_gc_locker_expansion();
 206   return unbounded_target_length;
 207 }
 208 
 209 uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
 210   YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
 211   _young_list_target_length = young_lengths.first;
 212   return young_lengths.second;
 213 }
 214 
 215 G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengths) const {
 216   YoungTargetLengths result;
 217 
 218   // Calculate the absolute and desired min bounds first.
 219 
 220   // This is how many young regions we already have (currently: the survivors).
 221   const uint base_min_length = _g1h->survivor_regions_count();
 222   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 223   // This is the absolute minimum young length. Ensure that we
 224   // will at least have one eden region available for allocation.
 225   uint absolute_min_length = base_min_length + MAX2(_g1h->eden_regions_count(), (uint)1);
 226   // If we shrank the young list target it should not shrink below the current size.
 227   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 228   // Calculate the absolute and desired max bounds.
 229 
 230   uint desired_max_length = calculate_young_list_desired_max_length();
 231 
 232   uint young_list_target_length = 0;
 233   if (adaptive_young_list_length()) {
 234     if (collector_state()->in_young_only_phase()) {
 235       young_list_target_length =
 236                         calculate_young_list_target_length(rs_lengths,
 237                                                            base_min_length,
 238                                                            desired_min_length,
 239                                                            desired_max_length);
 240     } else {
 241       // Don't calculate anything and let the code below bound it to
 242       // the desired_min_length, i.e., do the next GC as soon as
 243       // possible to maximize how many old regions we can add to it.
 244     }
 245   } else {


 364       // These are the post-conditions of the binary search above:
 365       assert(min_young_length < max_young_length,
 366              "otherwise we should have discovered that max_young_length "
 367              "fits into the pause target and not done the binary search");
 368       assert(p.will_fit(min_young_length),
 369              "min_young_length, the result of the binary search, should "
 370              "fit into the pause target");
 371       assert(!p.will_fit(min_young_length + 1),
 372              "min_young_length, the result of the binary search, should be "
 373              "optimal, so no larger length should fit into the pause target");
 374     }
 375   } else {
 376     // Even the minimum length doesn't fit into the pause time
 377     // target, return it as the result nevertheless.
 378   }
 379   return base_min_length + min_young_length;
 380 }
 381 
 382 double G1Policy::predict_survivor_regions_evac_time() const {
 383   double survivor_regions_evac_time = 0.0;
 384   const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
 385 
 386   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
 387        it != survivor_regions->end();
 388        ++it) {
 389     survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->in_young_only_phase());
 390   }
 391   return survivor_regions_evac_time;
 392 }
 393 
 394 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
 395   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 396 
 397   if (rs_lengths > _rs_lengths_prediction) {
 398     // add 10% to avoid having to recalculate often
 399     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 400     update_rs_lengths_prediction(rs_lengths_prediction);
 401 
 402     update_young_list_max_and_target_length(rs_lengths_prediction);
 403   }
 404 }


 427   double end_sec = os::elapsedTime();
 428   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 429   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 430 
 431   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 432 
 433   collector_state()->set_in_full_gc(false);
 434 
 435   // "Nuke" the heuristics that control the young/mixed GC
 436   // transitions and make sure we start with young GCs after the Full GC.
 437   collector_state()->set_in_young_only_phase(true);
 438   collector_state()->set_in_young_gc_before_mixed(false);
 439   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 440   collector_state()->set_in_initial_mark_gc(false);
 441   collector_state()->set_mark_or_rebuild_in_progress(false);
 442   collector_state()->set_clearing_next_bitmap(false);
 443 
 444   _short_lived_surv_rate_group->start_adding_regions();
 445   // also call this on any additional surv rate groups
 446 
 447   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 448   // Reset survivors SurvRateGroup.
 449   _survivor_surv_rate_group->reset();
 450   update_young_list_max_and_target_length();
 451   update_rs_lengths_prediction();
 452 
 453   _bytes_allocated_in_old_since_last_gc = 0;
 454 
 455   record_pause(FullGC, _full_collection_start_sec, end_sec);
 456 }
 457 
 458 void G1Policy::record_collection_pause_start(double start_time_sec) {
 459   // We only need to do this here as the policy will only be applied
 460   // to the GC we're about to start. so, no point is calculating this
 461   // every time we calculate / recalculate the target young length.
 462   update_survivors_policy();
 463 
 464   assert(_g1h->used() == _g1h->recalculate_used(),
 465          "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
 466          _g1h->used(), _g1h->recalculate_used());
 467 
 468   phase_times()->record_cur_collection_start_sec(start_time_sec);
 469   _pending_cards = _g1h->pending_card_num();
 470 
 471   _collection_set->reset_bytes_used_before();
 472   _bytes_copied_during_gc = 0;
 473 
 474   // do that for any other surv rate groups
 475   _short_lived_surv_rate_group->stop_adding_regions();
 476   _survivors_age_table.clear();
 477 
 478   assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
 479 }
 480 
 481 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
 482   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 483   collector_state()->set_in_initial_mark_gc(false);
 484 }
 485 
 486 void G1Policy::record_concurrent_mark_remark_start() {
 487   _mark_remark_start_sec = os::elapsedTime();
 488 }
 489 
 490 void G1Policy::record_concurrent_mark_remark_end() {
 491   double end_time_sec = os::elapsedTime();
 492   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 493   _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
 494   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
 495 
 496   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 497 }
 498 


 510 }
 511 
 512 double G1Policy::non_young_other_time_ms() const {
 513   return phase_times()->non_young_cset_choice_time_ms() +
 514          phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
 515 }
 516 
 517 double G1Policy::other_time_ms(double pause_time_ms) const {
 518   return pause_time_ms - phase_times()->cur_collection_par_time_ms();
 519 }
 520 
 521 double G1Policy::constant_other_time_ms(double pause_time_ms) const {
 522   return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
 523 }
 524 
 525 CollectionSetChooser* G1Policy::cset_chooser() const {
 526   return _collection_set->cset_chooser();
 527 }
 528 
 529 bool G1Policy::about_to_start_mixed_phase() const {
 530   return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
 531 }
 532 
 533 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 534   if (about_to_start_mixed_phase()) {
 535     return false;
 536   }
 537 
 538   size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
 539 
 540   size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
 541   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 542   size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
 543 
 544   bool result = false;
 545   if (marking_request_bytes > marking_initiating_used_threshold) {
 546     result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
 547     log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
 548                               result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
 549                               cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
 550   }
 551 
 552   return result;
 553 }
 554 
 555 // Anything below that is considered to be zero
 556 #define MIN_TIMER_GRANULARITY 0.0000001
 557 
 558 void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
 559   double end_time_sec = os::elapsedTime();
 560 
 561   size_t cur_used_bytes = _g1h->used();
 562   assert(cur_used_bytes == _g1h->recalculate_used(), "It should!");
 563   bool this_pause_included_initial_mark = false;
 564   bool this_pause_was_young_only = collector_state()->in_young_only_phase();
 565 
 566   bool update_stats = !_g1h->evacuation_failed();
 567 
 568   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 569 
 570   _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 571 
 572   this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
 573   if (this_pause_included_initial_mark) {
 574     record_concurrent_mark_init_end(0.0);
 575   } else {
 576     maybe_start_marking();
 577   }
 578 
 579   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
 580   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 581     // This usually happens due to the timer not having the required
 582     // granularity. Some Linuxes are the usual culprits.
 583     // We'll just set it to something (arbitrarily) small.
 584     app_time_ms = 1.0;
 585   }
 586 


 686                                                             _collection_set->old_region_length());
 687     }
 688 
 689     _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
 690 
 691     // Do not update RS lengths with information from mixed gc: this one is wildly different
 692     // to during young only gc and messes up young gen sizing right after the mixed gc phase.
 693     // During mixed gc we do not use it anyway for young gen sizing.
 694     if (this_pause_was_young_only) {
 695       _analytics->report_pending_cards((double) _pending_cards);
 696       _analytics->report_rs_lengths((double) _max_rs_lengths);
 697     }
 698   }
 699 
 700   assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
 701          "If the last pause has been an initial mark, we should not have been in the marking window");
 702   if (this_pause_included_initial_mark) {
 703     collector_state()->set_mark_or_rebuild_in_progress(true);
 704   }
 705 
 706   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 707   // IHOP control wants to know the expected young gen length if it were not
 708   // restrained by the heap reserve. Using the actual length would make the
 709   // prediction too small and the limit the young gen every time we get to the
 710   // predicted target occupancy.
 711   size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
 712   update_rs_lengths_prediction();
 713 
 714   update_ihop_prediction(app_time_ms / 1000.0,
 715                          _bytes_allocated_in_old_since_last_gc,
 716                          last_unrestrained_young_length * HeapRegion::GrainBytes,
 717                          this_pause_was_young_only);
 718   _bytes_allocated_in_old_since_last_gc = 0;
 719 
 720   _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 721 
 722   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 723   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 724 
 725   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
 726     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 727                                 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
 728                                 update_rs_time_goal_ms, scan_hcc_time_ms);
 729 
 730     update_rs_time_goal_ms = 0;
 731   } else {
 732     update_rs_time_goal_ms -= scan_hcc_time_ms;
 733   }
 734   _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
 735                                    phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
 736                                    update_rs_time_goal_ms);
 737 
 738   cset_chooser()->verify();
 739 }
 740 
 741 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
 742   if (G1UseAdaptiveIHOP) {
 743     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
 744                                      predictor,
 745                                      G1ReservePercent,
 746                                      G1HeapWastePercent);
 747   } else {
 748     return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
 749   }
 750 }
 751 
 752 void G1Policy::update_ihop_prediction(double mutator_time_s,
 753                                       size_t mutator_alloc_bytes,
 754                                       size_t young_gen_size,


 843   // Predicting the number of cards is based on which type of GC
 844   // we're predicting for.
 845   size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
 846   size_t bytes_to_copy = predict_bytes_to_copy(hr);
 847 
 848   double region_elapsed_time_ms =
 849     _analytics->predict_rs_scan_time_ms(card_num, collector_state()->in_young_only_phase()) +
 850     _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
 851 
 852   // The prediction of the "other" time for this region is based
 853   // upon the region type and NOT the GC type.
 854   if (hr->is_young()) {
 855     region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
 856   } else {
 857     region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
 858   }
 859   return region_elapsed_time_ms;
 860 }
 861 
 862 bool G1Policy::should_allocate_mutator_region() const {
 863   uint young_list_length = _g1h->young_regions_count();
 864   uint young_list_target_length = _young_list_target_length;
 865   return young_list_length < young_list_target_length;
 866 }
 867 
 868 bool G1Policy::can_expand_young_list() const {
 869   uint young_list_length = _g1h->young_regions_count();
 870   uint young_list_max_length = _young_list_max_length;
 871   return young_list_length < young_list_max_length;
 872 }
 873 
 874 bool G1Policy::adaptive_young_list_length() const {
 875   return _young_gen_sizer.adaptive_young_list_length();
 876 }
 877 
 878 size_t G1Policy::desired_survivor_size() const {
 879   size_t const survivor_capacity = HeapRegion::GrainWords * _max_survivor_regions;
 880   return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
 881 }
 882 
 883 void G1Policy::print_age_table() {
 884   _survivors_age_table.print_age_table(_tenuring_threshold);
 885 }
 886 
 887 void G1Policy::update_max_gc_locker_expansion() {
 888   uint expansion_region_num = 0;
 889   if (GCLockerEdenExpansionPercent > 0) {


 901 
 902 // Calculates survivor space parameters.
 903 void G1Policy::update_survivors_policy() {
 904   double max_survivor_regions_d =
 905                  (double) _young_list_target_length / (double) SurvivorRatio;
 906   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
 907   // smaller than 1.0) we'll get 1.
 908   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
 909 
 910   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(desired_survivor_size());
 911   if (UsePerfData) {
 912     _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
 913     _policy_counters->desired_survivor_size()->set_value(desired_survivor_size() * oopSize);
 914   }
 915 }
 916 
 917 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
 918   // We actually check whether we are marking here and not if we are in a
 919   // reclamation phase. This means that we will schedule a concurrent mark
 920   // even while we are still in the process of reclaiming memory.
 921   bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle();
 922   if (!during_cycle) {
 923     log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
 924     collector_state()->set_initiate_conc_mark_if_possible(true);
 925     return true;
 926   } else {
 927     log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
 928     return false;
 929   }
 930 }
 931 
 932 void G1Policy::initiate_conc_mark() {
 933   collector_state()->set_in_initial_mark_gc(true);
 934   collector_state()->set_initiate_conc_mark_if_possible(false);
 935 }
 936 
 937 void G1Policy::decide_on_conc_mark_initiation() {
 938   // We are about to decide on whether this pause will be an
 939   // initial-mark pause.
 940 
 941   // First, collector_state()->in_initial_mark_gc() should not be already set. We
 942   // will set it here if we have to. However, it should be cleared by
 943   // the end of the pause (it's only set for the duration of an
 944   // initial-mark pause).
 945   assert(!collector_state()->in_initial_mark_gc(), "pre-condition");
 946 
 947   if (collector_state()->initiate_conc_mark_if_possible()) {
 948     // We had noticed on a previous pause that the heap occupancy has
 949     // gone over the initiating threshold and we should start a
 950     // concurrent marking cycle. So we might initiate one.
 951 
 952     if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
 953       // Initiate a new initial mark if there is no marking or reclamation going on.
 954       initiate_conc_mark();
 955       log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
 956     } else if (_g1h->is_user_requested_concurrent_full_gc(_g1h->gc_cause())) {
 957       // Initiate a user requested initial mark. An initial mark must be young only
 958       // GC, so the collector state must be updated to reflect this.
 959       collector_state()->set_in_young_only_phase(true);
 960       collector_state()->set_in_young_gc_before_mixed(false);
 961 
 962       // We might have ended up coming here about to start a mixed phase with a collection set
 963       // active. The following remark might change the change the "evacuation efficiency" of
 964       // the regions in this set, leading to failing asserts later.
 965       // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
 966       clear_collection_set_candidates();
 967       abort_time_to_mixed_tracking();
 968       initiate_conc_mark();
 969       log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
 970     } else {
 971       // The concurrent marking thread is still finishing up the
 972       // previous cycle. If we start one right now the two cycles
 973       // overlap. In particular, the concurrent marking thread might
 974       // be in the process of clearing the next marking bitmap (which
 975       // we will use for the next cycle if we start one). Starting a
 976       // cycle now will be bad given that parts of the marking
 977       // information might get cleared by the marking thread. And we
 978       // cannot wait for the marking thread to finish the cycle as it
 979       // periodically yields while clearing the next marking bitmap
 980       // and, if it's in a yield point, it's waiting for us to
 981       // finish. So, at this point we will not start a cycle and we'll
 982       // let the concurrent marking thread complete the last one.
 983       log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
 984     }
 985   }
 986 }
 987 
 988 void G1Policy::record_concurrent_mark_cleanup_end() {
 989   cset_chooser()->rebuild(_g1h->workers(), _g1h->num_regions());
 990 
 991   bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
 992   if (!mixed_gc_pending) {
 993     clear_collection_set_candidates();
 994     abort_time_to_mixed_tracking();
 995   }
 996   collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
 997   collector_state()->set_mark_or_rebuild_in_progress(false);
 998 
 999   double end_sec = os::elapsedTime();
1000   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1001   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1002   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1003 
1004   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1005 }
1006 
1007 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1008   return percent_of(reclaimable_bytes, _g1h->capacity());
1009 }
1010 
1011 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
1012   virtual bool do_heap_region(HeapRegion* r) {
1013     r->rem_set()->clear_locked(true /* only_cardset */);
1014     return false;
1015   }
1016 };
1017 
1018 void G1Policy::clear_collection_set_candidates() {
1019   // Clear remembered sets of remaining candidate regions and the actual candidate
1020   // list.
1021   G1ClearCollectionSetCandidateRemSets cl;
1022   cset_chooser()->iterate(&cl);
1023   cset_chooser()->clear();
1024 }
1025 
1026 void G1Policy::maybe_start_marking() {
1027   if (need_to_start_conc_mark("end of GC")) {
1028     // Note: this might have already been set, if during the last


< prev index next >