< prev index next >

src/hotspot/share/gc/parallel/psMarkSweep.cpp

Print this page




  81 // Note that the all_soft_refs_clear flag in the collector policy
  82 // may be true because this method can be called without intervening
  83 // activity.  For example when the heap space is tight and full measure
  84 // are being taken to free space.
  85 
  86 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  87   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  88   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  89   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
  90 
  91   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  92   GCCause::Cause gc_cause = heap->gc_cause();
  93   PSAdaptiveSizePolicy* policy = heap->size_policy();
  94   IsGCActiveMark mark;
  95 
  96   if (ScavengeBeforeFullGC) {
  97     PSScavenge::invoke_no_policy();
  98   }
  99 
 100   const bool clear_all_soft_refs =
 101     heap->collector_policy()->should_clear_all_soft_refs();
 102 
 103   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 104   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 105   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 106 }
 107 
 108 // This method contains no policy. You should probably
 109 // be calling invoke() instead.
 110 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 111   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 112   assert(ref_processor() != NULL, "Sanity");
 113 
 114   if (GCLocker::check_active_before_gc()) {
 115     return false;
 116   }
 117 
 118   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 119   GCCause::Cause gc_cause = heap->gc_cause();
 120 
 121   GCIdMark gc_id_mark;
 122   _gc_timer->register_gc_start();
 123   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 124 
 125   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 126 
 127   // The scope of casr should end after code that can change
 128   // CollectorPolicy::_should_clear_all_soft_refs.
 129   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 130 
 131   PSYoungGen* young_gen = heap->young_gen();
 132   PSOldGen* old_gen = heap->old_gen();
 133 
 134   // Increment the invocation count
 135   heap->increment_total_collections(true /* full */);
 136 
 137   // Save information needed to minimize mangling
 138   heap->record_gen_tops_before_GC();
 139 
 140   // We need to track unique mark sweep invocations as well.
 141   _total_invocations++;
 142 
 143   heap->print_heap_before_gc();
 144   heap->trace_heap_before_gc(_gc_tracer);
 145 
 146   // Fill in TLABs
 147   heap->accumulate_statistics_all_tlabs();
 148   heap->ensure_parsability(true);  // retire TLABs
 149 


 303           young_gen->from_space()->capacity_in_bytes() -
 304           young_gen->to_space()->capacity_in_bytes();
 305 
 306         // Used for diagnostics
 307         size_policy->clear_generation_free_space_flags();
 308 
 309         size_policy->compute_generations_free_space(young_live,
 310                                                     eden_live,
 311                                                     old_live,
 312                                                     cur_eden,
 313                                                     max_old_gen_size,
 314                                                     max_eden_size,
 315                                                     true /* full gc*/);
 316 
 317         size_policy->check_gc_overhead_limit(young_live,
 318                                              eden_live,
 319                                              max_old_gen_size,
 320                                              max_eden_size,
 321                                              true /* full gc*/,
 322                                              gc_cause,
 323                                              heap->collector_policy());
 324 
 325         size_policy->decay_supplemental_growth(true /* full gc*/);
 326 
 327         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
 328 
 329         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 330                                size_policy->calculated_survivor_size_in_bytes());
 331       }
 332       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
 333     }
 334 
 335     if (UsePerfData) {
 336       heap->gc_policy_counters()->update_counters();
 337       heap->gc_policy_counters()->update_old_capacity(
 338         old_gen->capacity_in_bytes());
 339       heap->gc_policy_counters()->update_young_capacity(
 340         young_gen->capacity_in_bytes());
 341     }
 342 
 343     heap->resize_all_tlabs();




  81 // Note that the all_soft_refs_clear flag in the collector policy
  82 // may be true because this method can be called without intervening
  83 // activity.  For example when the heap space is tight and full measure
  84 // are being taken to free space.
  85 
  86 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  87   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  88   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  89   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
  90 
  91   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  92   GCCause::Cause gc_cause = heap->gc_cause();
  93   PSAdaptiveSizePolicy* policy = heap->size_policy();
  94   IsGCActiveMark mark;
  95 
  96   if (ScavengeBeforeFullGC) {
  97     PSScavenge::invoke_no_policy();
  98   }
  99 
 100   const bool clear_all_soft_refs =
 101     heap->soft_ref_policy()->should_clear_all_soft_refs();
 102 
 103   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 104   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 105   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 106 }
 107 
 108 // This method contains no policy. You should probably
 109 // be calling invoke() instead.
 110 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 111   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 112   assert(ref_processor() != NULL, "Sanity");
 113 
 114   if (GCLocker::check_active_before_gc()) {
 115     return false;
 116   }
 117 
 118   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 119   GCCause::Cause gc_cause = heap->gc_cause();
 120 
 121   GCIdMark gc_id_mark;
 122   _gc_timer->register_gc_start();
 123   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 124 
 125   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 126 
 127   // The scope of casr should end after code that can change
 128   // CollectorPolicy::_should_clear_all_soft_refs.
 129   ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
 130 
 131   PSYoungGen* young_gen = heap->young_gen();
 132   PSOldGen* old_gen = heap->old_gen();
 133 
 134   // Increment the invocation count
 135   heap->increment_total_collections(true /* full */);
 136 
 137   // Save information needed to minimize mangling
 138   heap->record_gen_tops_before_GC();
 139 
 140   // We need to track unique mark sweep invocations as well.
 141   _total_invocations++;
 142 
 143   heap->print_heap_before_gc();
 144   heap->trace_heap_before_gc(_gc_tracer);
 145 
 146   // Fill in TLABs
 147   heap->accumulate_statistics_all_tlabs();
 148   heap->ensure_parsability(true);  // retire TLABs
 149 


 303           young_gen->from_space()->capacity_in_bytes() -
 304           young_gen->to_space()->capacity_in_bytes();
 305 
 306         // Used for diagnostics
 307         size_policy->clear_generation_free_space_flags();
 308 
 309         size_policy->compute_generations_free_space(young_live,
 310                                                     eden_live,
 311                                                     old_live,
 312                                                     cur_eden,
 313                                                     max_old_gen_size,
 314                                                     max_eden_size,
 315                                                     true /* full gc*/);
 316 
 317         size_policy->check_gc_overhead_limit(young_live,
 318                                              eden_live,
 319                                              max_old_gen_size,
 320                                              max_eden_size,
 321                                              true /* full gc*/,
 322                                              gc_cause,
 323                                              heap->soft_ref_policy());
 324 
 325         size_policy->decay_supplemental_growth(true /* full gc*/);
 326 
 327         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
 328 
 329         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 330                                size_policy->calculated_survivor_size_in_bytes());
 331       }
 332       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
 333     }
 334 
 335     if (UsePerfData) {
 336       heap->gc_policy_counters()->update_counters();
 337       heap->gc_policy_counters()->update_old_capacity(
 338         old_gen->capacity_in_bytes());
 339       heap->gc_policy_counters()->update_young_capacity(
 340         young_gen->capacity_in_bytes());
 341     }
 342 
 343     heap->resize_all_tlabs();


< prev index next >