< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page
rev 9408 : 8143215: gcc 4.1.2: fix three issues breaking the build.


 459   size_t bytes_to_copy =
 460                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 461   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 462   double young_other_time_ms = predict_young_other_time_ms(young_length);
 463   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 464   if (pause_time_ms > target_pause_time_ms) {
 465     // end condition 2: prediction is over the target pause time
 466     return false;
 467   }
 468 
 469   size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
 470 
 471   // When copying, we will likely need more bytes free than is live in the region.
 472   // Add some safety margin to factor in the confidence of our guess, and the
 473   // natural expected waste.
 474   // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
 475   // of the calculation: the lower the confidence, the more headroom.
 476   // (100 + TargetPLABWastePct) represents the increase in expected bytes during
 477   // copying due to anticipated waste in the PLABs.
 478   double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
 479   size_t expected_bytes_to_copy = safety_factor * bytes_to_copy;
 480 
 481   if (expected_bytes_to_copy > free_bytes) {
 482     // end condition 3: out-of-space
 483     return false;
 484   }
 485 
 486   // success!
 487   return true;
 488 }
 489 
 490 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
 491   // re-calculate the necessary reserve
 492   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 493   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 494   // smaller than 1.0) we'll get 1.
 495   _reserve_regions = (uint) ceil(reserve_regions_d);
 496 
 497   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 498 }
 499 


 506       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 507       double alloc_rate_ms = predict_alloc_rate_ms();
 508       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 509     } else {
 510       // otherwise we don't have enough info to make the prediction
 511     }
 512   }
 513   desired_min_length += base_min_length;
 514   // make sure we don't go below any user-defined minimum bound
 515   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 516 }
 517 
 518 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
 519   // Here, we might want to also take into account any additional
 520   // constraints (i.e., user-defined minimum bound). Currently, we
 521   // effectively don't set this bound.
 522   return _young_gen_sizer->max_desired_young_length();
 523 }
 524 
 525 void G1CollectorPolicy::update_young_list_max_and_target_length() {
 526   update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq));
 527 }
 528 
 529 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
 530   update_young_list_target_length(rs_lengths);
 531   update_max_gc_locker_expansion();
 532 }
 533 
 534 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 535   _young_list_target_length = bounded_young_list_target_length(rs_lengths);
 536 }
 537 
 538 void G1CollectorPolicy::update_young_list_target_length() {
 539   update_young_list_target_length(get_new_prediction(_rs_lengths_seq));
 540 }
 541 
 542 uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const {
 543   // Calculate the absolute and desired min bounds.
 544 
 545   // This is how many young regions we already have (currently: the survivors).
 546   uint base_min_length = recorded_survivor_regions();
 547   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 548   // This is the absolute minimum young length. Ensure that we
 549   // will at least have one eden region available for allocation.
 550   uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
 551   // If we shrank the young list target it should not shrink below the current size.
 552   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 553   // Calculate the absolute and desired max bounds.
 554 
 555   // We will try our best not to "eat" into the reserve.
 556   uint absolute_max_length = 0;
 557   if (_free_regions_at_end_of_collection > _reserve_regions) {
 558     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 559   }


 709        r = r->get_next_young_region()) {
 710     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
 711   }
 712   return survivor_regions_evac_time;
 713 }
 714 
 715 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
 716   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 717 
 718   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
 719   if (rs_lengths > _rs_lengths_prediction) {
 720     // add 10% to avoid having to recalculate often
 721     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 722     update_rs_lengths_prediction(rs_lengths_prediction);
 723 
 724     update_young_list_max_and_target_length(rs_lengths_prediction);
 725   }
 726 }
 727 
 728 void G1CollectorPolicy::update_rs_lengths_prediction() {
 729   update_rs_lengths_prediction(get_new_prediction(_rs_lengths_seq));
 730 }
 731 
 732 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
 733   if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
 734     _rs_lengths_prediction = prediction;
 735   }
 736 }
 737 
 738 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
 739                                                bool is_tlab,
 740                                                bool* gc_overhead_limit_was_exceeded) {
 741   guarantee(false, "Not using this policy feature yet.");
 742   return NULL;
 743 }
 744 
 745 // This method controls how a collector handles one or more
 746 // of its generations being fully allocated.
 747 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
 748                                                        bool is_tlab) {
 749   guarantee(false, "Not using this policy feature yet.");




 459   size_t bytes_to_copy =
 460                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 461   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 462   double young_other_time_ms = predict_young_other_time_ms(young_length);
 463   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 464   if (pause_time_ms > target_pause_time_ms) {
 465     // end condition 2: prediction is over the target pause time
 466     return false;
 467   }
 468 
 469   size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
 470 
 471   // When copying, we will likely need more bytes free than is live in the region.
 472   // Add some safety margin to factor in the confidence of our guess, and the
 473   // natural expected waste.
 474   // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
 475   // of the calculation: the lower the confidence, the more headroom.
 476   // (100 + TargetPLABWastePct) represents the increase in expected bytes during
 477   // copying due to anticipated waste in the PLABs.
 478   double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
 479   size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
 480 
 481   if (expected_bytes_to_copy > free_bytes) {
 482     // end condition 3: out-of-space
 483     return false;
 484   }
 485 
 486   // success!
 487   return true;
 488 }
 489 
 490 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
 491   // re-calculate the necessary reserve
 492   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 493   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 494   // smaller than 1.0) we'll get 1.
 495   _reserve_regions = (uint) ceil(reserve_regions_d);
 496 
 497   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 498 }
 499 


 506       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 507       double alloc_rate_ms = predict_alloc_rate_ms();
 508       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 509     } else {
 510       // otherwise we don't have enough info to make the prediction
 511     }
 512   }
 513   desired_min_length += base_min_length;
 514   // make sure we don't go below any user-defined minimum bound
 515   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 516 }
 517 
 518 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
 519   // Here, we might want to also take into account any additional
 520   // constraints (i.e., user-defined minimum bound). Currently, we
 521   // effectively don't set this bound.
 522   return _young_gen_sizer->max_desired_young_length();
 523 }
 524 
 525 void G1CollectorPolicy::update_young_list_max_and_target_length() {
 526   update_young_list_max_and_target_length((size_t)get_new_prediction(_rs_lengths_seq));
 527 }
 528 
 529 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
 530   update_young_list_target_length(rs_lengths);
 531   update_max_gc_locker_expansion();
 532 }
 533 
 534 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 535   _young_list_target_length = bounded_young_list_target_length(rs_lengths);
 536 }
 537 
 538 void G1CollectorPolicy::update_young_list_target_length() {
 539   update_young_list_target_length((size_t)get_new_prediction(_rs_lengths_seq));
 540 }
 541 
 542 uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths) const {
 543   // Calculate the absolute and desired min bounds.
 544 
 545   // This is how many young regions we already have (currently: the survivors).
 546   uint base_min_length = recorded_survivor_regions();
 547   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
 548   // This is the absolute minimum young length. Ensure that we
 549   // will at least have one eden region available for allocation.
 550   uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1);
 551   // If we shrank the young list target it should not shrink below the current size.
 552   desired_min_length = MAX2(desired_min_length, absolute_min_length);
 553   // Calculate the absolute and desired max bounds.
 554 
 555   // We will try our best not to "eat" into the reserve.
 556   uint absolute_max_length = 0;
 557   if (_free_regions_at_end_of_collection > _reserve_regions) {
 558     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 559   }


 709        r = r->get_next_young_region()) {
 710     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
 711   }
 712   return survivor_regions_evac_time;
 713 }
 714 
 715 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
 716   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 717 
 718   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
 719   if (rs_lengths > _rs_lengths_prediction) {
 720     // add 10% to avoid having to recalculate often
 721     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 722     update_rs_lengths_prediction(rs_lengths_prediction);
 723 
 724     update_young_list_max_and_target_length(rs_lengths_prediction);
 725   }
 726 }
 727 
 728 void G1CollectorPolicy::update_rs_lengths_prediction() {
 729   update_rs_lengths_prediction((size_t)get_new_prediction(_rs_lengths_seq));
 730 }
 731 
 732 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
 733   if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
 734     _rs_lengths_prediction = prediction;
 735   }
 736 }
 737 
 738 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
 739                                                bool is_tlab,
 740                                                bool* gc_overhead_limit_was_exceeded) {
 741   guarantee(false, "Not using this policy feature yet.");
 742   return NULL;
 743 }
 744 
 745 // This method controls how a collector handles one or more
 746 // of its generations being fully allocated.
 747 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
 748                                                        bool is_tlab) {
 749   guarantee(false, "Not using this policy feature yet.");


< prev index next >