< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page




 435 
 436   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 437 
 438   initialize_gc_policy_counters();
 439 
 440   if (adaptive_young_list_length()) {
 441     _young_list_fixed_length = 0;
 442   } else {
 443     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
 444   }
 445   _free_regions_at_end_of_collection = _g1->num_free_regions();
 446   update_young_list_target_length();
 447 
 448   // We may immediately start allocating regions and placing them on the
 449   // collection set list. Initialize the per-collection set info
 450   start_incremental_cset_building();
 451 }
 452 
 453 // Create the jstat counters for the policy.
 454 void G1CollectorPolicy::initialize_gc_policy_counters() {
 455   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 456 }
 457 
 458 bool G1CollectorPolicy::predict_will_fit(uint young_length,
 459                                          double base_time_ms,
 460                                          uint base_free_regions,
 461                                          double target_pause_time_ms) {
 462   if (young_length >= base_free_regions) {
 463     // end condition 1: not enough space for the young regions
 464     return false;
 465   }
 466 
 467   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
 468   size_t bytes_to_copy =
 469                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 470   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 471   double young_other_time_ms = predict_young_other_time_ms(young_length);
 472   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 473   if (pause_time_ms > target_pause_time_ms) {
 474     // end condition 2: prediction is over the target pause time
 475     return false;




 435 
 436   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 437 
 438   initialize_gc_policy_counters();
 439 
 440   if (adaptive_young_list_length()) {
 441     _young_list_fixed_length = 0;
 442   } else {
 443     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
 444   }
 445   _free_regions_at_end_of_collection = _g1->num_free_regions();
 446   update_young_list_target_length();
 447 
 448   // We may immediately start allocating regions and placing them on the
 449   // collection set list. Initialize the per-collection set info
 450   start_incremental_cset_building();
 451 }
 452 
 453 // Create the jstat counters for the policy.
 454 void G1CollectorPolicy::initialize_gc_policy_counters() {
 455   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2);
 456 }
 457 
 458 bool G1CollectorPolicy::predict_will_fit(uint young_length,
 459                                          double base_time_ms,
 460                                          uint base_free_regions,
 461                                          double target_pause_time_ms) {
 462   if (young_length >= base_free_regions) {
 463     // end condition 1: not enough space for the young regions
 464     return false;
 465   }
 466 
 467   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
 468   size_t bytes_to_copy =
 469                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 470   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 471   double young_other_time_ms = predict_young_other_time_ms(young_length);
 472   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 473   if (pause_time_ms > target_pause_time_ms) {
 474     // end condition 2: prediction is over the target pause time
 475     return false;


< prev index next >