< prev index next >

src/hotspot/share/gc/g1/g1Policy.cpp

Print this page
rev 57223 : imported patch 8225484-changes-to-survivor-calculation


  46 #include "runtime/arguments.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/mutexLocker.hpp"
  49 #include "utilities/debug.hpp"
  50 #include "utilities/growableArray.hpp"
  51 #include "utilities/pair.hpp"
  52 
  53 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  54   _predictor(G1ConfidencePercent / 100.0),
  55   _analytics(new G1Analytics(&_predictor)),
  56   _remset_tracker(),
  57   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  58   _ihop_control(create_ihop_control(&_predictor)),
  59   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  60   _full_collection_start_sec(0.0),
  61   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
  62   _young_list_target_length(0),
  63   _young_list_fixed_length(0),
  64   _young_list_max_length(0),
  65   _eden_surv_rate_group(new G1SurvRateGroup()),
  66   _survivor_surv_rate_group(new G1SurvRateGroup()),

  67   _reserve_factor((double) G1ReservePercent / 100.0),
  68   _reserve_regions(0),
  69   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  70   _free_regions_at_end_of_collection(0),
  71   _rs_length(0),
  72   _rs_length_prediction(0),
  73   _pending_cards_at_gc_start(0),
  74   _pending_cards_at_prev_gc_end(0),
  75   _total_mutator_refined_cards(0),
  76   _total_concurrent_refined_cards(0),
  77   _total_concurrent_refinement_time(),
  78   _bytes_allocated_in_old_since_last_gc(0),
  79   _initial_mark_to_mixed(),
  80   _collection_set(NULL),
  81   _g1h(NULL),
  82   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
  83   _mark_remark_start_sec(0),
  84   _mark_cleanup_start_sec(0),
  85   _tenuring_threshold(MaxTenuringThreshold),
  86   _max_survivor_regions(0),
  87   _survivors_age_table(true)
  88 {
  89 }
  90 
  91 G1Policy::~G1Policy() {
  92   delete _ihop_control;
  93   delete _young_gen_sizer;
  94 }
  95 
  96 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  97   if (G1Arguments::is_heterogeneous_heap()) {
  98     return new G1HeterogeneousHeapPolicy(gc_timer_stw);
  99   } else {
 100     return new G1Policy(gc_timer_stw);
 101   }
 102 }
 103 
 104 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
 105 
 106 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
 107   _g1h = g1h;


 381       assert(p.will_fit(min_young_length),
 382              "min_young_length, the result of the binary search, should "
 383              "fit into the pause target");
 384       assert(!p.will_fit(min_young_length + 1),
 385              "min_young_length, the result of the binary search, should be "
 386              "optimal, so no larger length should fit into the pause target");
 387     }
 388   } else {
 389     // Even the minimum length doesn't fit into the pause time
 390     // target, return it as the result nevertheless.
 391   }
 392   return base_min_length + min_young_length;
 393 }
 394 
 395 double G1Policy::predict_survivor_regions_evac_time() const {
 396   double survivor_regions_evac_time = 0.0;
 397   const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
 398   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
 399        it != survivor_regions->end();
 400        ++it) {



 401     survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
 402   }
 403   return survivor_regions_evac_time;
 404 }
 405 
 406 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
 407   guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
 408 
 409   if (rs_length > _rs_length_prediction) {
 410     // add 10% to avoid having to recalculate often
 411     size_t rs_length_prediction = rs_length * 1100 / 1000;
 412     update_rs_length_prediction(rs_length_prediction);
 413 
 414     update_young_list_max_and_target_length(rs_length_prediction);
 415   }
 416 }
 417 
 418 void G1Policy::update_rs_length_prediction() {
 419   update_rs_length_prediction(_analytics->predict_rs_length());
 420 }


 438   // Consider this like a collection pause for the purposes of allocation
 439   // since last pause.
 440   double end_sec = os::elapsedTime();
 441   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 442   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 443 
 444   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 445 
 446   collector_state()->set_in_full_gc(false);
 447 
 448   // "Nuke" the heuristics that control the young/mixed GC
 449   // transitions and make sure we start with young GCs after the Full GC.
 450   collector_state()->set_in_young_only_phase(true);
 451   collector_state()->set_in_young_gc_before_mixed(false);
 452   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 453   collector_state()->set_in_initial_mark_gc(false);
 454   collector_state()->set_mark_or_rebuild_in_progress(false);
 455   collector_state()->set_clearing_next_bitmap(false);
 456 
 457   _eden_surv_rate_group->start_adding_regions();
 458   // also call this on any additional surv rate groups
 459 
 460   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 461   _survivor_surv_rate_group->reset();
 462   update_young_list_max_and_target_length();
 463   update_rs_length_prediction();
 464   _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
 465 
 466   _bytes_allocated_in_old_since_last_gc = 0;
 467 
 468   record_pause(FullGC, _full_collection_start_sec, end_sec);
 469 }
 470 
 471 void G1Policy::record_concurrent_refinement_data(bool is_full_collection) {
 472   _pending_cards_at_gc_start = _g1h->pending_card_num();
 473 
 474   // Record info about concurrent refinement thread processing.
 475   G1ConcurrentRefine* cr = _g1h->concurrent_refine();
 476   G1ConcurrentRefine::RefinementStats cr_stats = cr->total_refinement_stats();
 477 
 478   Tickspan cr_time = cr_stats._time - _total_concurrent_refinement_time;
 479   _total_concurrent_refinement_time = cr_stats._time;
 480 
 481   size_t cr_cards = cr_stats._cards - _total_concurrent_refined_cards;


 510            cr_cards, mut_cards);
 511     size_t logged_cards = total_cards - _pending_cards_at_prev_gc_end;
 512     double logging_start_time = _analytics->prev_collection_pause_end_ms();
 513     double logging_end_time = Ticks::now().seconds() * MILLIUNITS;
 514     double logging_time = logging_end_time - logging_start_time;
 515     // Unlike above for conc-refine rate, here we should not require a
 516     // non-empty sample, since an application could go some time with only
 517     // young-gen or filtered out writes.  But we'll ignore unusually short
 518     // sample periods, as they may just pollute the predictions.
 519     if (logging_time > 1.0) {   // Require > 1ms sample time.
 520       _analytics->report_logged_cards_rate_ms(logged_cards / logging_time);
 521     }
 522   }
 523 }
 524 
 525 void G1Policy::record_collection_pause_start(double start_time_sec) {
 526   // We only need to do this here as the policy will only be applied
 527   // to the GC we're about to start. so, no point is calculating this
 528   // every time we calculate / recalculate the target young length.
 529   update_survivors_policy();


 530 
 531   assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
 532          "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
 533          max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
 534   assert_used_and_recalculate_used_equal(_g1h);
 535 
 536   phase_times()->record_cur_collection_start_sec(start_time_sec);
 537 
 538   record_concurrent_refinement_data(false /* is_full_collection */);
 539 
 540   _collection_set->reset_bytes_used_before();
 541 
 542   // do that for any other surv rate groups
 543   _eden_surv_rate_group->stop_adding_regions();
 544   _survivors_age_table.clear();

 545 
 546   assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
 547 }
 548 
 549 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
 550   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 551   collector_state()->set_in_initial_mark_gc(false);
 552 }
 553 
 554 void G1Policy::record_concurrent_mark_remark_start() {
 555   _mark_remark_start_sec = os::elapsedTime();
 556 }
 557 
 558 void G1Policy::record_concurrent_mark_remark_end() {
 559   double end_time_sec = os::elapsedTime();
 560   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 561   _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
 562   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
 563 
 564   record_pause(Remark, _mark_remark_start_sec, end_time_sec);


 635   G1GCPhaseTimes* p = phase_times();
 636 
 637   double end_time_sec = os::elapsedTime();
 638 
 639   bool this_pause_included_initial_mark = false;
 640   bool this_pause_was_young_only = collector_state()->in_young_only_phase();
 641 
 642   bool update_stats = !_g1h->evacuation_failed();
 643 
 644   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 645 
 646   _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 647 
 648   this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
 649   if (this_pause_included_initial_mark) {
 650     record_concurrent_mark_init_end(0.0);
 651   } else {
 652     maybe_start_marking();
 653   }
 654 











 655   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
 656   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 657     // This usually happens due to the timer not having the required
 658     // granularity. Some Linuxes are the usual culprits.
 659     // We'll just set it to something (arbitrarily) small.
 660     app_time_ms = 1.0;
 661   }
 662 
 663   if (update_stats) {
 664     // We maintain the invariant that all objects allocated by mutator
 665     // threads will be allocated out of eden regions. So, we can use
 666     // the eden region number allocated since the previous GC to
 667     // calculate the application's allocate rate. The only exception
 668     // to that is humongous objects that are allocated separately. But
 669     // given that humongous object allocations do not really affect
 670     // either the pause's duration nor when the next pause will take
 671     // place we can safely ignore them here.
 672     uint regions_allocated = _collection_set->eden_region_length();
 673     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
 674     _analytics->report_alloc_rate_ms(alloc_rate_ms);


 897 void G1Policy::print_phases() {
 898   phase_times()->print();
 899 }
 900 
 901 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
 902                                               size_t rs_length) const {
 903   size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
 904   return
 905     _analytics->predict_card_merge_time_ms(pending_cards + rs_length, collector_state()->in_young_only_phase()) +
 906     _analytics->predict_card_scan_time_ms(effective_scanned_cards, collector_state()->in_young_only_phase()) +
 907     _analytics->predict_constant_other_time_ms() +
 908     predict_survivor_regions_evac_time();
 909 }
 910 
 911 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
 912   size_t rs_length = _analytics->predict_rs_length();
 913   return predict_base_elapsed_time_ms(pending_cards, rs_length);
 914 }
 915 
 916 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
 917   size_t bytes_to_copy;
 918   if (!hr->is_young()) {
 919     bytes_to_copy = hr->max_live_bytes();
 920   } else {
 921     bytes_to_copy = (size_t) (hr->used() * hr->surv_rate_prediction(_predictor));















 922   }
 923   return bytes_to_copy;
 924 }
 925 
 926 double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) const {
 927   if (count == 0) {
 928     return 0.0;
 929   }
 930   size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count) * HeapRegion::GrainBytes;
 931   if (bytes_to_copy != NULL) {
 932     *bytes_to_copy = expected_bytes;
 933   }
 934   return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->mark_or_rebuild_in_progress());
 935 }
 936 
 937 double G1Policy::predict_region_copy_time_ms(HeapRegion* hr) const {
 938   size_t const bytes_to_copy = predict_bytes_to_copy(hr);
 939   return _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
 940 }
 941 


1371     assert(r != NULL, "Region must exist");
1372     prediction_ms += predict_region_total_time_ms(r, false);
1373 
1374     if (prediction_ms > time_remaining_ms) {
1375       log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.",
1376                                 prediction_ms, r->hrm_index(), time_remaining_ms);
1377       break;
1378     }
1379     // This region will be included in the next optional evacuation.
1380 
1381     time_remaining_ms -= prediction_ms;
1382     num_optional_regions++;
1383     r = candidates->at(++candidate_idx);
1384   }
1385 
1386   log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms",
1387                             num_optional_regions, max_optional_regions, prediction_ms);
1388 }
1389 
1390 void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
1391   note_start_adding_survivor_regions();
1392 
1393   HeapRegion* last = NULL;
1394   for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
1395        it != survivors->regions()->end();
1396        ++it) {
1397     HeapRegion* curr = *it;
1398     set_region_survivor(curr);
1399 
1400     // The region is a non-empty survivor so let's add it to
1401     // the incremental collection set for the next evacuation
1402     // pause.
1403     _collection_set->add_survivor_regions(curr);
1404 
1405     last = curr;
1406   }
1407   note_stop_adding_survivor_regions();
1408 
1409   // Don't clear the survivor list handles until the start of
1410   // the next evacuation pause - we need it in order to re-tag
1411   // the survivor regions from this evacuation pause as 'young'
1412   // at the start of the next.
1413 }


  46 #include "runtime/arguments.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/mutexLocker.hpp"
  49 #include "utilities/debug.hpp"
  50 #include "utilities/growableArray.hpp"
  51 #include "utilities/pair.hpp"
  52 
  53 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  54   _predictor(G1ConfidencePercent / 100.0),
  55   _analytics(new G1Analytics(&_predictor)),
  56   _remset_tracker(),
  57   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  58   _ihop_control(create_ihop_control(&_predictor)),
  59   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  60   _full_collection_start_sec(0.0),
  61   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
  62   _young_list_target_length(0),
  63   _young_list_fixed_length(0),
  64   _young_list_max_length(0),
  65   _eden_surv_rate_group(new G1SurvRateGroup()),
  66   _survivor_used_bytes_at_start(0),
  67   _survivor_used_bytes_at_end(0),
  68   _reserve_factor((double) G1ReservePercent / 100.0),
  69   _reserve_regions(0),
  70   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  71   _free_regions_at_end_of_collection(0),
  72   _rs_length(0),
  73   _rs_length_prediction(0),
  74   _pending_cards_at_gc_start(0),
  75   _pending_cards_at_prev_gc_end(0),
  76   _total_mutator_refined_cards(0),
  77   _total_concurrent_refined_cards(0),
  78   _total_concurrent_refinement_time(),
  79   _bytes_allocated_in_old_since_last_gc(0),
  80   _initial_mark_to_mixed(),
  81   _collection_set(NULL),
  82   _g1h(NULL),
  83   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
  84   _mark_remark_start_sec(0),
  85   _mark_cleanup_start_sec(0),
  86   _tenuring_threshold(MaxTenuringThreshold),
  87   _max_survivor_regions(0),
  88   _surviving_survivor_words(0)
  89 {
  90 }
  91 
  92 G1Policy::~G1Policy() {
  93   delete _ihop_control;
  94   delete _young_gen_sizer;
  95 }
  96 
  97 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  98   if (G1Arguments::is_heterogeneous_heap()) {
  99     return new G1HeterogeneousHeapPolicy(gc_timer_stw);
 100   } else {
 101     return new G1Policy(gc_timer_stw);
 102   }
 103 }
 104 
 105 G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
 106 
 107 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
 108   _g1h = g1h;


 382       assert(p.will_fit(min_young_length),
 383              "min_young_length, the result of the binary search, should "
 384              "fit into the pause target");
 385       assert(!p.will_fit(min_young_length + 1),
 386              "min_young_length, the result of the binary search, should be "
 387              "optimal, so no larger length should fit into the pause target");
 388     }
 389   } else {
 390     // Even the minimum length doesn't fit into the pause time
 391     // target, return it as the result nevertheless.
 392   }
 393   return base_min_length + min_young_length;
 394 }
 395 
 396 double G1Policy::predict_survivor_regions_evac_time() const {
 397   double survivor_regions_evac_time = 0.0;
 398   const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
 399   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
 400        it != survivor_regions->end();
 401        ++it) {
 402     // We could split out copy_time from total time here and calculate it based on
 403     // the number of survivor regions. Since we need to iterate over the regions
 404     // for the non_copy time anyway, keep it.
 405     survivor_regions_evac_time += predict_region_total_time_ms(*it, collector_state()->in_young_only_phase());
 406   }
 407   return survivor_regions_evac_time;
 408 }
 409 
 410 void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_length) {
 411   guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
 412 
 413   if (rs_length > _rs_length_prediction) {
 414     // add 10% to avoid having to recalculate often
 415     size_t rs_length_prediction = rs_length * 1100 / 1000;
 416     update_rs_length_prediction(rs_length_prediction);
 417 
 418     update_young_list_max_and_target_length(rs_length_prediction);
 419   }
 420 }
 421 
 422 void G1Policy::update_rs_length_prediction() {
 423   update_rs_length_prediction(_analytics->predict_rs_length());
 424 }


 442   // Consider this like a collection pause for the purposes of allocation
 443   // since last pause.
 444   double end_sec = os::elapsedTime();
 445   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 446   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 447 
 448   _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
 449 
 450   collector_state()->set_in_full_gc(false);
 451 
 452   // "Nuke" the heuristics that control the young/mixed GC
 453   // transitions and make sure we start with young GCs after the Full GC.
 454   collector_state()->set_in_young_only_phase(true);
 455   collector_state()->set_in_young_gc_before_mixed(false);
 456   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 457   collector_state()->set_in_initial_mark_gc(false);
 458   collector_state()->set_mark_or_rebuild_in_progress(false);
 459   collector_state()->set_clearing_next_bitmap(false);
 460 
 461   _eden_surv_rate_group->start_adding_regions();

 462 
 463   _free_regions_at_end_of_collection = _g1h->num_free_regions();

 464   update_young_list_max_and_target_length();
 465   update_rs_length_prediction();
 466   _pending_cards_at_prev_gc_end = _g1h->pending_card_num();
 467 
 468   _bytes_allocated_in_old_since_last_gc = 0;
 469 
 470   record_pause(FullGC, _full_collection_start_sec, end_sec);
 471 }
 472 
 473 void G1Policy::record_concurrent_refinement_data(bool is_full_collection) {
 474   _pending_cards_at_gc_start = _g1h->pending_card_num();
 475 
 476   // Record info about concurrent refinement thread processing.
 477   G1ConcurrentRefine* cr = _g1h->concurrent_refine();
 478   G1ConcurrentRefine::RefinementStats cr_stats = cr->total_refinement_stats();
 479 
 480   Tickspan cr_time = cr_stats._time - _total_concurrent_refinement_time;
 481   _total_concurrent_refinement_time = cr_stats._time;
 482 
 483   size_t cr_cards = cr_stats._cards - _total_concurrent_refined_cards;


 512            cr_cards, mut_cards);
 513     size_t logged_cards = total_cards - _pending_cards_at_prev_gc_end;
 514     double logging_start_time = _analytics->prev_collection_pause_end_ms();
 515     double logging_end_time = Ticks::now().seconds() * MILLIUNITS;
 516     double logging_time = logging_end_time - logging_start_time;
 517     // Unlike above for conc-refine rate, here we should not require a
 518     // non-empty sample, since an application could go some time with only
 519     // young-gen or filtered out writes.  But we'll ignore unusually short
 520     // sample periods, as they may just pollute the predictions.
 521     if (logging_time > 1.0) {   // Require > 1ms sample time.
 522       _analytics->report_logged_cards_rate_ms(logged_cards / logging_time);
 523     }
 524   }
 525 }
 526 
 527 void G1Policy::record_collection_pause_start(double start_time_sec) {
 528   // We only need to do this here as the policy will only be applied
 529   // to the GC we're about to start. so, no point is calculating this
 530   // every time we calculate / recalculate the target young length.
 531   update_survivors_policy();
 532   _survivor_used_bytes_at_start = _g1h->survivor()->used_bytes();
 533   _survivor_used_bytes_at_end = 0;
 534   
 535   assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
 536          "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
 537          max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
 538   assert_used_and_recalculate_used_equal(_g1h);
 539 
 540   phase_times()->record_cur_collection_start_sec(start_time_sec);
 541 
 542   record_concurrent_refinement_data(false /* is_full_collection */);
 543 
 544   _collection_set->reset_bytes_used_before();
 545 
 546   // do that for any other surv rate groups
 547   _eden_surv_rate_group->stop_adding_regions();
 548   _survivors_age_table.clear();
 549   _surviving_survivor_words = 0;
 550 
 551   assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
 552 }
 553 
 554 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
 555   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 556   collector_state()->set_in_initial_mark_gc(false);
 557 }
 558 
 559 void G1Policy::record_concurrent_mark_remark_start() {
 560   _mark_remark_start_sec = os::elapsedTime();
 561 }
 562 
 563 void G1Policy::record_concurrent_mark_remark_end() {
 564   double end_time_sec = os::elapsedTime();
 565   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 566   _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
 567   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
 568 
 569   record_pause(Remark, _mark_remark_start_sec, end_time_sec);


 640   G1GCPhaseTimes* p = phase_times();
 641 
 642   double end_time_sec = os::elapsedTime();
 643 
 644   bool this_pause_included_initial_mark = false;
 645   bool this_pause_was_young_only = collector_state()->in_young_only_phase();
 646 
 647   bool update_stats = !_g1h->evacuation_failed();
 648 
 649   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 650 
 651   _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 652 
 653   this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
 654   if (this_pause_included_initial_mark) {
 655     record_concurrent_mark_init_end(0.0);
 656   } else {
 657     maybe_start_marking();
 658   }
 659 
 660   size_t survived = _surviving_survivor_words * HeapWordSize;
 661 
 662   if (_survivor_used_bytes_at_start != 0) {
 663     double ratio = (double)survived / _survivor_used_bytes_at_start;
 664     guarantee(ratio >= 0.0 && ratio <= 1.0, "ratio %.3lf", ratio);
 665     _analytics->report_survivor_ratio(ratio);
 666   } else {
 667     _analytics->report_survivor_ratio(0.0f);
 668   }
 669   _survivor_used_bytes_at_end = survived;
 670   
 671   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
 672   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 673     // This usually happens due to the timer not having the required
 674     // granularity. Some Linuxes are the usual culprits.
 675     // We'll just set it to something (arbitrarily) small.
 676     app_time_ms = 1.0;
 677   }
 678 
 679   if (update_stats) {
 680     // We maintain the invariant that all objects allocated by mutator
 681     // threads will be allocated out of eden regions. So, we can use
 682     // the eden region number allocated since the previous GC to
 683     // calculate the application's allocate rate. The only exception
 684     // to that is humongous objects that are allocated separately. But
 685     // given that humongous object allocations do not really affect
 686     // either the pause's duration nor when the next pause will take
 687     // place we can safely ignore them here.
 688     uint regions_allocated = _collection_set->eden_region_length();
 689     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
 690     _analytics->report_alloc_rate_ms(alloc_rate_ms);


 913 void G1Policy::print_phases() {
 914   phase_times()->print();
 915 }
 916 
 917 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
 918                                               size_t rs_length) const {
 919   size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
 920   return
 921     _analytics->predict_card_merge_time_ms(pending_cards + rs_length, collector_state()->in_young_only_phase()) +
 922     _analytics->predict_card_scan_time_ms(effective_scanned_cards, collector_state()->in_young_only_phase()) +
 923     _analytics->predict_constant_other_time_ms() +
 924     predict_survivor_regions_evac_time();
 925 }
 926 
 927 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
 928   size_t rs_length = _analytics->predict_rs_length();
 929   return predict_base_elapsed_time_ms(pending_cards, rs_length);
 930 }
 931 
 932 size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
 933   size_t bytes_to_copy = 0;
 934   if (!hr->is_young()) {
 935     bytes_to_copy = hr->max_live_bytes();
 936   } else {
 937     size_t survived = hr->survivor_bytes();
 938 
 939     guarantee(hr->used() >= survived, "Used " SIZE_FORMAT " >= survived " SIZE_FORMAT, hr->used(), survived);
 940 
 941     if (survived > 0) {
 942       bytes_to_copy += survived * _analytics->predict_survivor_ratio();
 943     }
 944 
 945     if (hr->used() > survived) {
 946       bytes_to_copy += (size_t) ((hr->used() - survived) * hr->surv_rate_prediction(_predictor));
 947     }
 948     /*
 949     log_debug(gc)("straggler region %u type %s old survived " SIZE_FORMAT " exp survived " SIZE_FORMAT " eden used " SIZE_FORMAT " exp eden survived " SIZE_FORMAT " total " SIZE_FORMAT,
 950                   hr->hrm_index(), hr->get_short_type_str(), survived, (size_t)(survived * _analytics->predict_survivor_ratio()), hr->used() - survived,
 951                   ((hr->used() - survived) > 0) ? (size_t)((hr->used() - survived) * hr->surv_rate_prediction(_predictor)) : 0, bytes_to_copy);
 952      */
 953   }
 954   return bytes_to_copy;
 955 }
 956 
 957 double G1Policy::predict_eden_copy_time_ms(uint count, size_t* bytes_to_copy) const {
 958   if (count == 0) {
 959     return 0.0;
 960   }
 961   size_t const expected_bytes = _eden_surv_rate_group->accum_surv_rate_pred(count) * HeapRegion::GrainBytes;
 962   if (bytes_to_copy != NULL) {
 963     *bytes_to_copy = expected_bytes;
 964   }
 965   return _analytics->predict_object_copy_time_ms(expected_bytes, collector_state()->mark_or_rebuild_in_progress());
 966 }
 967 
 968 double G1Policy::predict_region_copy_time_ms(HeapRegion* hr) const {
 969   size_t const bytes_to_copy = predict_bytes_to_copy(hr);
 970   return _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->mark_or_rebuild_in_progress());
 971 }
 972 


1402     assert(r != NULL, "Region must exist");
1403     prediction_ms += predict_region_total_time_ms(r, false);
1404 
1405     if (prediction_ms > time_remaining_ms) {
1406       log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.",
1407                                 prediction_ms, r->hrm_index(), time_remaining_ms);
1408       break;
1409     }
1410     // This region will be included in the next optional evacuation.
1411 
1412     time_remaining_ms -= prediction_ms;
1413     num_optional_regions++;
1414     r = candidates->at(++candidate_idx);
1415   }
1416 
1417   log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms",
1418                             num_optional_regions, max_optional_regions, prediction_ms);
1419 }
1420 
1421 void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {


1422   HeapRegion* last = NULL;
1423   for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
1424        it != survivors->regions()->end();
1425        ++it) {
1426     HeapRegion* curr = *it;

1427 
1428     // The region is a non-empty survivor so let's add it to
1429     // the incremental collection set for the next evacuation
1430     // pause.
1431     _collection_set->add_survivor_regions(curr);
1432 
1433     last = curr;
1434   }

1435 
1436   // Don't clear the survivor list handles until the start of
1437   // the next evacuation pause - we need it in order to re-tag
1438   // the survivor regions from this evacuation pause as 'young'
1439   // at the start of the next.
1440 }
< prev index next >