src/share/vm/gc/parallel/psMarkSweep.cpp

Print this page




  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/isGCActiveMark.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/referenceProcessor.hpp"
  49 #include "gc/shared/spaceDecorator.hpp"
  50 #include "logging/log.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/biasedLocking.hpp"
  53 #include "runtime/fprofiler.hpp"
  54 #include "runtime/safepoint.hpp"
  55 #include "runtime/vmThread.hpp"
  56 #include "services/management.hpp"
  57 #include "services/memoryService.hpp"
  58 #include "utilities/events.hpp"
  59 #include "utilities/stack.inline.hpp"
  60 
  61 elapsedTimer        PSMarkSweep::_accumulated_time;
  62 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  63 CollectorCounters*  PSMarkSweep::_counters = NULL;

  64 
  65 void PSMarkSweep::initialize() {
  66   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
  67   set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
  68   _counters = new CollectorCounters("PSMarkSweep", 1);
  69 }
  70 




  71 // This method contains all heap specific policy for invoking mark sweep.
  72 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
  73 // the heap. It will do nothing further. If we need to bail out for policy
  74 // reasons, scavenge before full gc, or any other specialized behavior, it
  75 // needs to be added here.
  76 //
  77 // Note that this method should only be called from the vm_thread while
  78 // at a safepoint!
  79 //
  80 // Note that the all_soft_refs_clear flag in the collector policy
  81 // may be true because this method can be called without intervening
  82 // activity.  For example when the heap space is tight and full measure
  83 // are being taken to free space.
  84 
  85 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  86   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  87   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  88   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
  89 
  90   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  91   GCCause::Cause gc_cause = heap->gc_cause();
  92   PSAdaptiveSizePolicy* policy = heap->size_policy();
  93   IsGCActiveMark mark;
  94 
  95   if (ScavengeBeforeFullGC) {
  96     PSScavenge::invoke_no_policy();
  97   }
  98 
  99   const bool clear_all_soft_refs =
 100     heap->collector_policy()->should_clear_all_soft_refs();
 101 
 102   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 103   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 104   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 105 }
 106 
 107 // This method contains no policy. You should probably
 108 // be calling invoke() instead.
 109 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 110   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 111   assert(ref_processor() != NULL, "Sanity");
 112 
 113   if (GCLocker::check_active_before_gc()) {
 114     return false;
 115   }
 116 
 117   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 118   GCCause::Cause gc_cause = heap->gc_cause();
 119 



 120   GCIdMark gc_id_mark;
 121   _gc_timer->register_gc_start();
 122   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 123 
 124   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 125 
 126   // The scope of casr should end after code that can change
 127   // CollectorPolicy::_should_clear_all_soft_refs.
 128   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 129 
 130   PSYoungGen* young_gen = heap->young_gen();
 131   PSOldGen* old_gen = heap->old_gen();
 132 
 133   // Increment the invocation count
 134   heap->increment_total_collections(true /* full */);
 135 
 136   // Save information needed to minimize mangling
 137   heap->record_gen_tops_before_GC();
 138 
 139   // We need to track unique mark sweep invocations as well.


 603   FlatProfiler::oops_do(adjust_pointer_closure());
 604   Management::oops_do(adjust_pointer_closure());
 605   JvmtiExport::oops_do(adjust_pointer_closure());
 606   SystemDictionary::oops_do(adjust_pointer_closure());
 607   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 608 
 609   // Now adjust pointers in remaining weak roots.  (All of which should
 610   // have been cleared if they pointed to non-surviving objects.)
 611   // Global (weak) JNI handles
 612   JNIHandles::weak_oops_do(adjust_pointer_closure());
 613 
 614   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 615   CodeCache::blobs_do(&adjust_from_blobs);
 616   AOTLoader::oops_do(adjust_pointer_closure());
 617   StringTable::oops_do(adjust_pointer_closure());
 618   ref_processor()->weak_oops_do(adjust_pointer_closure());
 619   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 620 
 621   adjust_marks();
 622 
 623   young_gen->adjust_pointers();
 624   old_gen->adjust_pointers();
 625 }
 626 
 627 void PSMarkSweep::mark_sweep_phase4() {
 628   EventMark m("4 compact heap");
 629   GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
 630 
 631   // All pointers are now adjusted, move objects accordingly
 632 
 633   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 634   PSYoungGen* young_gen = heap->young_gen();
 635   PSOldGen* old_gen = heap->old_gen();
 636 
 637   old_gen->compact();
 638   young_gen->compact();
 639 }
 640 
 641 jlong PSMarkSweep::millis_since_last_gc() {
 642   // We need a monotonically non-decreasing time in ms but
 643   // os::javaTimeMillis() does not guarantee monotonicity.
 644   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;


  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/isGCActiveMark.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/referenceProcessor.hpp"
  49 #include "gc/shared/spaceDecorator.hpp"
  50 #include "logging/log.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/biasedLocking.hpp"
  53 #include "runtime/fprofiler.hpp"
  54 #include "runtime/safepoint.hpp"
  55 #include "runtime/vmThread.hpp"
  56 #include "services/management.hpp"
  57 #include "services/memoryService.hpp"
  58 #include "utilities/events.hpp"
  59 #include "utilities/stack.inline.hpp"
  60 
  61 elapsedTimer        PSMarkSweep::_accumulated_time;
  62 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  63 CollectorCounters*  PSMarkSweep::_counters = NULL;
  64 ReferenceProcessor* PSMarkSweep::_ref_processor_global = NULL;
  65 
  66 void PSMarkSweep::initialize() {
  67   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
  68   _ref_processor_global = new ReferenceProcessor(mr);     // a vanilla ref proc
  69   _counters = new CollectorCounters("PSMarkSweep", 1);
  70 }
  71 
  72 PSMarkSweep::PSMarkSweep() : MarkSweep() {
  73   set_ref_processor(_ref_processor_global);
  74 }
  75 
  76 // This method contains all heap specific policy for invoking mark sweep.
  77 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
  78 // the heap. It will do nothing further. If we need to bail out for policy
  79 // reasons, scavenge before full gc, or any other specialized behavior, it
  80 // needs to be added here.
  81 //
  82 // Note that this method should only be called from the vm_thread while
  83 // at a safepoint!
  84 //
  85 // Note that the all_soft_refs_clear flag in the collector policy
  86 // may be true because this method can be called without intervening
  87 // activity.  For example when the heap space is tight and full measure
  88 // are being taken to free space.
  89 
  90 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  91   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  92   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  93   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
  94 
  95   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  96   GCCause::Cause gc_cause = heap->gc_cause();
  97   PSAdaptiveSizePolicy* policy = heap->size_policy();
  98   IsGCActiveMark mark;
  99 
 100   if (ScavengeBeforeFullGC) {
 101     PSScavenge::invoke_no_policy();
 102   }
 103 
 104   const bool clear_all_soft_refs =
 105     heap->collector_policy()->should_clear_all_soft_refs();
 106 
 107   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 108   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 109   invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 110 }
 111 
 112 // This method contains no policy. You should probably
 113 // be calling invoke() instead.
 114 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 115   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 116   assert(ref_processor() != NULL, "Sanity");
 117 
 118   if (GCLocker::check_active_before_gc()) {
 119     return false;
 120   }
 121 
 122   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 123   GCCause::Cause gc_cause = heap->gc_cause();
 124 
 125   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
 126   set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
 127 
 128   GCIdMark gc_id_mark;
 129   _gc_timer->register_gc_start();
 130   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 131 
 132   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 133 
 134   // The scope of casr should end after code that can change
 135   // CollectorPolicy::_should_clear_all_soft_refs.
 136   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 137 
 138   PSYoungGen* young_gen = heap->young_gen();
 139   PSOldGen* old_gen = heap->old_gen();
 140 
 141   // Increment the invocation count
 142   heap->increment_total_collections(true /* full */);
 143 
 144   // Save information needed to minimize mangling
 145   heap->record_gen_tops_before_GC();
 146 
 147   // We need to track unique mark sweep invocations as well.


 611   FlatProfiler::oops_do(adjust_pointer_closure());
 612   Management::oops_do(adjust_pointer_closure());
 613   JvmtiExport::oops_do(adjust_pointer_closure());
 614   SystemDictionary::oops_do(adjust_pointer_closure());
 615   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 616 
 617   // Now adjust pointers in remaining weak roots.  (All of which should
 618   // have been cleared if they pointed to non-surviving objects.)
 619   // Global (weak) JNI handles
 620   JNIHandles::weak_oops_do(adjust_pointer_closure());
 621 
 622   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 623   CodeCache::blobs_do(&adjust_from_blobs);
 624   AOTLoader::oops_do(adjust_pointer_closure());
 625   StringTable::oops_do(adjust_pointer_closure());
 626   ref_processor()->weak_oops_do(adjust_pointer_closure());
 627   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 628 
 629   adjust_marks();
 630 
 631   young_gen->adjust_pointers(this);
 632   old_gen->adjust_pointers(this);
 633 }
 634 
 635 void PSMarkSweep::mark_sweep_phase4() {
 636   EventMark m("4 compact heap");
 637   GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
 638 
 639   // All pointers are now adjusted, move objects accordingly
 640 
 641   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 642   PSYoungGen* young_gen = heap->young_gen();
 643   PSOldGen* old_gen = heap->old_gen();
 644 
 645   old_gen->compact();
 646   young_gen->compact();
 647 }
 648 
 649 jlong PSMarkSweep::millis_since_last_gc() {
 650   // We need a monotonically non-decreasing time in ms but
 651   // os::javaTimeMillis() does not guarantee monotonicity.
 652   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;