< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page




 169   _collection_set = _g1->collection_set();
 170   _collection_set->set_policy(this);
 171 
 172   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 173 
 174   initialize_gc_policy_counters();
 175 
 176   if (adaptive_young_list_length()) {
 177     _young_list_fixed_length = 0;
 178   } else {
 179     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
 180   }
 181   _free_regions_at_end_of_collection = _g1->num_free_regions();
 182 
 183   update_young_list_max_and_target_length();
 184   // We may immediately start allocating regions and placing them on the
 185   // collection set list. Initialize the per-collection set info
 186   _collection_set->start_incremental_building();
 187 }
 188 
 189 void G1CollectorPolicy::note_gc_start(uint num_active_workers) {
 190   phase_times()->note_gc_start(num_active_workers);
 191 }
 192 
 193 // Create the jstat counters for the policy.
 194 void G1CollectorPolicy::initialize_gc_policy_counters() {
 195   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 196 }
 197 
 198 bool G1CollectorPolicy::predict_will_fit(uint young_length,
 199                                          double base_time_ms,
 200                                          uint base_free_regions,
 201                                          double target_pause_time_ms) const {
 202   if (young_length >= base_free_regions) {
 203     // end condition 1: not enough space for the young regions
 204     return false;
 205   }
 206 
 207   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
 208   size_t bytes_to_copy =
 209                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 210   double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy,




 169   _collection_set = _g1->collection_set();
 170   _collection_set->set_policy(this);
 171 
 172   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 173 
 174   initialize_gc_policy_counters();
 175 
 176   if (adaptive_young_list_length()) {
 177     _young_list_fixed_length = 0;
 178   } else {
 179     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
 180   }
 181   _free_regions_at_end_of_collection = _g1->num_free_regions();
 182 
 183   update_young_list_max_and_target_length();
 184   // We may immediately start allocating regions and placing them on the
 185   // collection set list. Initialize the per-collection set info
 186   _collection_set->start_incremental_building();
 187 }
 188 
 189 void G1CollectorPolicy::note_gc_start() {
 190   phase_times()->note_gc_start();
 191 }
 192 
 193 // Create the jstat counters for the policy.
 194 void G1CollectorPolicy::initialize_gc_policy_counters() {
 195   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 196 }
 197 
 198 bool G1CollectorPolicy::predict_will_fit(uint young_length,
 199                                          double base_time_ms,
 200                                          uint base_free_regions,
 201                                          double target_pause_time_ms) const {
 202   if (young_length >= base_free_regions) {
 203     // end condition 1: not enough space for the young regions
 204     return false;
 205   }
 206 
 207   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
 208   size_t bytes_to_copy =
 209                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 210   double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy,


< prev index next >