Print this page
G1: Use SoftMaxHeapSize to guide GC heuristics


 662 
 663   if (update_stats) {
 664     // We maintain the invariant that all objects allocated by mutator
 665     // threads will be allocated out of eden regions. So, we can use
 666     // the eden region number allocated since the previous GC to
 667     // calculate the application's allocate rate. The only exception
 668     // to that is humongous objects that are allocated separately. But
 669     // given that humongous object allocations do not really affect
 670     // either the pause's duration nor when the next pause will take
 671     // place we can safely ignore them here.
 672     uint regions_allocated = _collection_set->eden_region_length();
 673     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
 674     _analytics->report_alloc_rate_ms(alloc_rate_ms);
 675 
 676     double interval_ms =
 677       (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
 678     _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
 679     _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
 680   }
 681 



 682   if (collector_state()->in_young_gc_before_mixed()) {
 683     assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
 684     // This has been the young GC before we start doing mixed GCs. We already
 685     // decided to start mixed GCs much earlier, so there is nothing to do except
 686     // advancing the state.
 687     collector_state()->set_in_young_only_phase(false);
 688     collector_state()->set_in_young_gc_before_mixed(false);
 689   } else if (!this_pause_was_young_only) {
 690     // This is a mixed GC. Here we decide whether to continue doing more
 691     // mixed GCs or not.
 692     if (!next_gc_should_be_mixed("continue mixed GCs",
 693                                  "do not continue mixed GCs")) {
 694       collector_state()->set_in_young_only_phase(true);

 695 
 696       clear_collection_set_candidates();
 697       maybe_start_marking();
 698     }
 699   }
 700 
 701   _eden_surv_rate_group->start_adding_regions();
 702 
 703   double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
 704   if (update_stats) {
 705     size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
 706                                           p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
 707     // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
 708     size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
 709                                       p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
 710                                       total_log_buffer_cards;
 711 
 712     // The threshold for the number of cards in a given sampling which we consider
 713     // large enough so that the impact from setup and other costs is negligible.
 714     size_t const CardsNumSamplingThreshold = 10;


1393   HeapRegion* last = NULL;
1394   for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
1395        it != survivors->regions()->end();
1396        ++it) {
1397     HeapRegion* curr = *it;
1398     set_region_survivor(curr);
1399 
1400     // The region is a non-empty survivor so let's add it to
1401     // the incremental collection set for the next evacuation
1402     // pause.
1403     _collection_set->add_survivor_regions(curr);
1404 
1405     last = curr;
1406   }
1407   note_stop_adding_survivor_regions();
1408 
1409   // Don't clear the survivor list handles until the start of
1410   // the next evacuation pause - we need it in order to re-tag
1411   // the survivor regions from this evacuation pause as 'young'
1412   // at the start of the next.







1413 }


 662 
 663   if (update_stats) {
 664     // We maintain the invariant that all objects allocated by mutator
 665     // threads will be allocated out of eden regions. So, we can use
 666     // the eden region number allocated since the previous GC to
 667     // calculate the application's allocate rate. The only exception
 668     // to that is humongous objects that are allocated separately. But
 669     // given that humongous object allocations do not really affect
 670     // either the pause's duration nor when the next pause will take
 671     // place we can safely ignore them here.
 672     uint regions_allocated = _collection_set->eden_region_length();
 673     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
 674     _analytics->report_alloc_rate_ms(alloc_rate_ms);
 675 
 676     double interval_ms =
 677       (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
 678     _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
 679     _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
 680   }
 681 
 682   if (collector_state()->finish_of_mixed_gc()) {
 683     collector_state()->set_finish_of_mixed_gc(false);
 684   }
 685   if (collector_state()->in_young_gc_before_mixed()) {
 686     assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
 687     // This has been the young GC before we start doing mixed GCs. We already
 688     // decided to start mixed GCs much earlier, so there is nothing to do except
 689     // advancing the state.
 690     collector_state()->set_in_young_only_phase(false);
 691     collector_state()->set_in_young_gc_before_mixed(false);
 692   } else if (!this_pause_was_young_only) {
 693     // This is a mixed GC. Here we decide whether to continue doing more
 694     // mixed GCs or not.
 695     if (!next_gc_should_be_mixed("continue mixed GCs",
 696                                  "do not continue mixed GCs")) {
 697       collector_state()->set_in_young_only_phase(true);
 698       collector_state()->set_finish_of_mixed_gc(true);
 699 
 700       clear_collection_set_candidates();
 701       maybe_start_marking();
 702     }
 703   }
 704 
 705   _eden_surv_rate_group->start_adding_regions();
 706 
 707   double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
 708   if (update_stats) {
 709     size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
 710                                           p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
 711     // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
 712     size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
 713                                       p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
 714                                       total_log_buffer_cards;
 715 
 716     // The threshold for the number of cards in a given sampling which we consider
 717     // large enough so that the impact from setup and other costs is negligible.
 718     size_t const CardsNumSamplingThreshold = 10;


1397   HeapRegion* last = NULL;
1398   for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
1399        it != survivors->regions()->end();
1400        ++it) {
1401     HeapRegion* curr = *it;
1402     set_region_survivor(curr);
1403 
1404     // The region is a non-empty survivor so let's add it to
1405     // the incremental collection set for the next evacuation
1406     // pause.
1407     _collection_set->add_survivor_regions(curr);
1408 
1409     last = curr;
1410   }
1411   note_stop_adding_survivor_regions();
1412 
1413   // Don't clear the survivor list handles until the start of
1414   // the next evacuation pause - we need it in order to re-tag
1415   // the survivor regions from this evacuation pause as 'young'
1416   // at the start of the next.
1417 }
1418 
1419 size_t G1Policy::minimum_desired_bytes_after_concurrent_mark(size_t used_bytes) {
1420   size_t minimum_desired_buffer_size = _ihop_control->predict_unstrained_buffer_size();
1421   return minimum_desired_buffer_size != 0 ?
1422            minimum_desired_buffer_size : _young_list_max_length * HeapRegion::GrainBytes
1423          + _reserve_regions * HeapRegion::GrainBytes + used_bytes;
1424 }