< prev index next >

src/share/vm/gc/parallel/psMarkSweep.cpp

Print this page




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/parallel/parallelScavengeHeap.hpp"
  30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  31 #include "gc/parallel/psMarkSweep.hpp"
  32 #include "gc/parallel/psMarkSweepDecorator.hpp"
  33 #include "gc/parallel/psOldGen.hpp"
  34 #include "gc/parallel/psScavenge.hpp"
  35 #include "gc/parallel/psYoungGen.hpp"
  36 #include "gc/serial/markSweep.hpp"
  37 #include "gc/shared/gcCause.hpp"
  38 #include "gc/shared/gcHeapSummary.hpp"

  39 #include "gc/shared/gcLocker.inline.hpp"
  40 #include "gc/shared/gcTimer.hpp"
  41 #include "gc/shared/gcTrace.hpp"
  42 #include "gc/shared/gcTraceTime.hpp"
  43 #include "gc/shared/isGCActiveMark.hpp"
  44 #include "gc/shared/referencePolicy.hpp"
  45 #include "gc/shared/referenceProcessor.hpp"
  46 #include "gc/shared/spaceDecorator.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/biasedLocking.hpp"
  49 #include "runtime/fprofiler.hpp"
  50 #include "runtime/safepoint.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "services/management.hpp"
  53 #include "services/memoryService.hpp"
  54 #include "utilities/events.hpp"
  55 #include "utilities/stack.inline.hpp"
  56 
  57 elapsedTimer        PSMarkSweep::_accumulated_time;
  58 jlong               PSMarkSweep::_time_of_last_gc   = 0;


  96     heap->collector_policy()->should_clear_all_soft_refs();
  97 
  98   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
  99   UIntXFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 100   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 101 }
 102 
 103 // This method contains no policy. You should probably
 104 // be calling invoke() instead.
 105 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 106   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 107   assert(ref_processor() != NULL, "Sanity");
 108 
 109   if (GC_locker::check_active_before_gc()) {
 110     return false;
 111   }
 112 
 113   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 114   GCCause::Cause gc_cause = heap->gc_cause();
 115 

 116   _gc_timer->register_gc_start();
 117   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 118 
 119   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 120 
 121   // The scope of casr should end after code that can change
 122   // CollectorPolicy::_should_clear_all_soft_refs.
 123   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 124 
 125   PSYoungGen* young_gen = heap->young_gen();
 126   PSOldGen* old_gen = heap->old_gen();
 127 
 128   // Increment the invocation count
 129   heap->increment_total_collections(true /* full */);
 130 
 131   // Save information needed to minimize mangling
 132   heap->record_gen_tops_before_GC();
 133 
 134   // We need to track unique mark sweep invocations as well.
 135   _total_invocations++;


 148     Universe::verify(" VerifyBeforeGC:");
 149   }
 150 
 151   // Verify object start arrays
 152   if (VerifyObjectStartArray &&
 153       VerifyBeforeGC) {
 154     old_gen->verify_object_start_array();
 155   }
 156 
 157   heap->pre_full_gc_dump(_gc_timer);
 158 
 159   // Filled in below to track the state of the young gen after the collection.
 160   bool eden_empty;
 161   bool survivors_empty;
 162   bool young_gen_empty;
 163 
 164   {
 165     HandleMark hm;
 166 
 167     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 168     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
 169     TraceCollectorStats tcs(counters());
 170     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 171 
 172     if (TraceOldGenTime) accumulated_time()->start();
 173 
 174     // Let the size policy know we're starting
 175     size_policy->major_collection_begin();
 176 
 177     CodeCache::gc_prologue();
 178     BiasedLocking::preserve_marks();
 179 
 180     // Capture heap size before collection for printing.
 181     size_t prev_used = heap->used();
 182 
 183     // Capture metadata size before collection for sizing.
 184     size_t metadata_prev_used = MetaspaceAux::used_bytes();
 185 
 186     // For PrintGCDetails
 187     size_t old_gen_prev_used = old_gen->used_in_bytes();
 188     size_t young_gen_prev_used = young_gen->used_in_bytes();


 491   MutableSpace* to_space = young_gen->to_space();
 492   _preserved_marks = (PreservedMark*)to_space->top();
 493   _preserved_count = 0;
 494 
 495   // We want to calculate the size in bytes first.
 496   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 497   // Now divide by the size of a PreservedMark
 498   _preserved_count_max /= sizeof(PreservedMark);
 499 }
 500 
 501 
 502 void PSMarkSweep::deallocate_stacks() {
 503   _preserved_mark_stack.clear(true);
 504   _preserved_oop_stack.clear(true);
 505   _marking_stack.clear();
 506   _objarray_stack.clear(true);
 507 }
 508 
 509 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 510   // Recursively traverse all live objects and mark them
 511   GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 512 
 513   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 514 
 515   // Need to clear claim bits before the tracing starts.
 516   ClassLoaderDataGraph::clear_claimed_marks();
 517 
 518   // General strong roots.
 519   {
 520     ParallelScavengeHeap::ParStrongRootsScope psrs;
 521     Universe::oops_do(mark_and_push_closure());
 522     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 523     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
 524     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
 525     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
 526     ObjectSynchronizer::oops_do(mark_and_push_closure());
 527     FlatProfiler::oops_do(mark_and_push_closure());
 528     Management::oops_do(mark_and_push_closure());
 529     JvmtiExport::oops_do(mark_and_push_closure());
 530     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 531     ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
 532     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 533     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
 534   }
 535 
 536   // Flush marking stack.
 537   follow_stack();
 538 
 539   // Process reference objects found during marking
 540   {
 541     ref_processor()->setup_policy(clear_all_softrefs);
 542     const ReferenceProcessorStats& stats =
 543       ref_processor()->process_discovered_references(
 544         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id());
 545     gc_tracer()->report_gc_reference_stats(stats);
 546   }
 547 
 548   // This is the point where the entire marking should have completed.
 549   assert(_marking_stack.is_empty(), "Marking should have completed");
 550 
 551   // Unload classes and purge the SystemDictionary.
 552   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 553 
 554   // Unload nmethods.
 555   CodeCache::do_unloading(is_alive_closure(), purged_class);
 556 
 557   // Prune dead klasses from subklass/sibling/implementor lists.
 558   Klass::clean_weak_klass_links(is_alive_closure());
 559 
 560   // Delete entries for dead interned strings.
 561   StringTable::unlink(is_alive_closure());
 562 
 563   // Clean up unreferenced symbols in symbol table.
 564   SymbolTable::unlink();
 565   _gc_tracer->report_object_count_after_gc(is_alive_closure());
 566 }
 567 
 568 
 569 void PSMarkSweep::mark_sweep_phase2() {
 570   GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 571 
 572   // Now all live objects are marked, compute the new object addresses.
 573 
 574   // It is not required that we traverse spaces in the same order in
 575   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 576   // tracking expects us to do so. See comment under phase4.
 577 
 578   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 579   PSOldGen* old_gen = heap->old_gen();
 580 
 581   // Begin compacting into the old gen
 582   PSMarkSweepDecorator::set_destination_decorator_tenured();
 583 
 584   // This will also compact the young gen spaces.
 585   old_gen->precompact();
 586 }
 587 
 588 // This should be moved to the shared markSweep code!
 589 class PSAlwaysTrueClosure: public BoolObjectClosure {
 590 public:
 591   bool do_object_b(oop p) { return true; }
 592 };
 593 static PSAlwaysTrueClosure always_true;
 594 
 595 void PSMarkSweep::mark_sweep_phase3() {
 596   // Adjust the pointers to reflect the new locations
 597   GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 598 
 599   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 600   PSYoungGen* young_gen = heap->young_gen();
 601   PSOldGen* old_gen = heap->old_gen();
 602 
 603   // Need to clear claim bits before the tracing starts.
 604   ClassLoaderDataGraph::clear_claimed_marks();
 605 
 606   // General strong roots.
 607   Universe::oops_do(adjust_pointer_closure());
 608   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 609   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
 610   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
 611   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 612   FlatProfiler::oops_do(adjust_pointer_closure());
 613   Management::oops_do(adjust_pointer_closure());
 614   JvmtiExport::oops_do(adjust_pointer_closure());
 615   SystemDictionary::oops_do(adjust_pointer_closure());
 616   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 617 
 618   // Now adjust pointers in remaining weak roots.  (All of which should
 619   // have been cleared if they pointed to non-surviving objects.)
 620   // Global (weak) JNI handles
 621   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 622 
 623   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 624   CodeCache::blobs_do(&adjust_from_blobs);
 625   StringTable::oops_do(adjust_pointer_closure());
 626   ref_processor()->weak_oops_do(adjust_pointer_closure());
 627   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 628 
 629   adjust_marks();
 630 
 631   young_gen->adjust_pointers();
 632   old_gen->adjust_pointers();
 633 }
 634 
 635 void PSMarkSweep::mark_sweep_phase4() {
 636   EventMark m("4 compact heap");
 637   GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 638 
 639   // All pointers are now adjusted, move objects accordingly
 640 
 641   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 642   PSYoungGen* young_gen = heap->young_gen();
 643   PSOldGen* old_gen = heap->old_gen();
 644 
 645   old_gen->compact();
 646   young_gen->compact();
 647 }
 648 
 649 jlong PSMarkSweep::millis_since_last_gc() {
 650   // We need a monotonically non-decreasing time in ms but
 651   // os::javaTimeMillis() does not guarantee monotonicity.
 652   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 653   jlong ret_val = now - _time_of_last_gc;
 654   // XXX See note in genCollectedHeap::millis_since_last_gc().
 655   if (ret_val < 0) {
 656     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
 657     return 0;


  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/parallel/parallelScavengeHeap.hpp"
  30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  31 #include "gc/parallel/psMarkSweep.hpp"
  32 #include "gc/parallel/psMarkSweepDecorator.hpp"
  33 #include "gc/parallel/psOldGen.hpp"
  34 #include "gc/parallel/psScavenge.hpp"
  35 #include "gc/parallel/psYoungGen.hpp"
  36 #include "gc/serial/markSweep.hpp"
  37 #include "gc/shared/gcCause.hpp"
  38 #include "gc/shared/gcHeapSummary.hpp"
  39 #include "gc/shared/gcId.hpp"
  40 #include "gc/shared/gcLocker.inline.hpp"
  41 #include "gc/shared/gcTimer.hpp"
  42 #include "gc/shared/gcTrace.hpp"
  43 #include "gc/shared/gcTraceTime.hpp"
  44 #include "gc/shared/isGCActiveMark.hpp"
  45 #include "gc/shared/referencePolicy.hpp"
  46 #include "gc/shared/referenceProcessor.hpp"
  47 #include "gc/shared/spaceDecorator.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/biasedLocking.hpp"
  50 #include "runtime/fprofiler.hpp"
  51 #include "runtime/safepoint.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "services/management.hpp"
  54 #include "services/memoryService.hpp"
  55 #include "utilities/events.hpp"
  56 #include "utilities/stack.inline.hpp"
  57 
  58 elapsedTimer        PSMarkSweep::_accumulated_time;
  59 jlong               PSMarkSweep::_time_of_last_gc   = 0;


  97     heap->collector_policy()->should_clear_all_soft_refs();
  98 
  99   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 100   UIntXFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 101   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 102 }
 103 
 104 // This method contains no policy. You should probably
 105 // be calling invoke() instead.
 106 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 107   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 108   assert(ref_processor() != NULL, "Sanity");
 109 
 110   if (GC_locker::check_active_before_gc()) {
 111     return false;
 112   }
 113 
 114   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 115   GCCause::Cause gc_cause = heap->gc_cause();
 116 
 117   GCIdMark gc_id_mark;
 118   _gc_timer->register_gc_start();
 119   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 120 
 121   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 122 
 123   // The scope of casr should end after code that can change
 124   // CollectorPolicy::_should_clear_all_soft_refs.
 125   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 126 
 127   PSYoungGen* young_gen = heap->young_gen();
 128   PSOldGen* old_gen = heap->old_gen();
 129 
 130   // Increment the invocation count
 131   heap->increment_total_collections(true /* full */);
 132 
 133   // Save information needed to minimize mangling
 134   heap->record_gen_tops_before_GC();
 135 
 136   // We need to track unique mark sweep invocations as well.
 137   _total_invocations++;


 150     Universe::verify(" VerifyBeforeGC:");
 151   }
 152 
 153   // Verify object start arrays
 154   if (VerifyObjectStartArray &&
 155       VerifyBeforeGC) {
 156     old_gen->verify_object_start_array();
 157   }
 158 
 159   heap->pre_full_gc_dump(_gc_timer);
 160 
 161   // Filled in below to track the state of the young gen after the collection.
 162   bool eden_empty;
 163   bool survivors_empty;
 164   bool young_gen_empty;
 165 
 166   {
 167     HandleMark hm;
 168 
 169     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 170     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
 171     TraceCollectorStats tcs(counters());
 172     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 173 
 174     if (TraceOldGenTime) accumulated_time()->start();
 175 
 176     // Let the size policy know we're starting
 177     size_policy->major_collection_begin();
 178 
 179     CodeCache::gc_prologue();
 180     BiasedLocking::preserve_marks();
 181 
 182     // Capture heap size before collection for printing.
 183     size_t prev_used = heap->used();
 184 
 185     // Capture metadata size before collection for sizing.
 186     size_t metadata_prev_used = MetaspaceAux::used_bytes();
 187 
 188     // For PrintGCDetails
 189     size_t old_gen_prev_used = old_gen->used_in_bytes();
 190     size_t young_gen_prev_used = young_gen->used_in_bytes();


 493   MutableSpace* to_space = young_gen->to_space();
 494   _preserved_marks = (PreservedMark*)to_space->top();
 495   _preserved_count = 0;
 496 
 497   // We want to calculate the size in bytes first.
 498   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 499   // Now divide by the size of a PreservedMark
 500   _preserved_count_max /= sizeof(PreservedMark);
 501 }
 502 
 503 
 504 void PSMarkSweep::deallocate_stacks() {
 505   _preserved_mark_stack.clear(true);
 506   _preserved_oop_stack.clear(true);
 507   _marking_stack.clear();
 508   _objarray_stack.clear(true);
 509 }
 510 
 511 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 512   // Recursively traverse all live objects and mark them
 513   GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
 514 
 515   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 516 
 517   // Need to clear claim bits before the tracing starts.
 518   ClassLoaderDataGraph::clear_claimed_marks();
 519 
 520   // General strong roots.
 521   {
 522     ParallelScavengeHeap::ParStrongRootsScope psrs;
 523     Universe::oops_do(mark_and_push_closure());
 524     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 525     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
 526     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
 527     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
 528     ObjectSynchronizer::oops_do(mark_and_push_closure());
 529     FlatProfiler::oops_do(mark_and_push_closure());
 530     Management::oops_do(mark_and_push_closure());
 531     JvmtiExport::oops_do(mark_and_push_closure());
 532     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 533     ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
 534     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 535     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
 536   }
 537 
 538   // Flush marking stack.
 539   follow_stack();
 540 
 541   // Process reference objects found during marking
 542   {
 543     ref_processor()->setup_policy(clear_all_softrefs);
 544     const ReferenceProcessorStats& stats =
 545       ref_processor()->process_discovered_references(
 546         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
 547     gc_tracer()->report_gc_reference_stats(stats);
 548   }
 549 
 550   // This is the point where the entire marking should have completed.
 551   assert(_marking_stack.is_empty(), "Marking should have completed");
 552 
 553   // Unload classes and purge the SystemDictionary.
 554   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 555 
 556   // Unload nmethods.
 557   CodeCache::do_unloading(is_alive_closure(), purged_class);
 558 
 559   // Prune dead klasses from subklass/sibling/implementor lists.
 560   Klass::clean_weak_klass_links(is_alive_closure());
 561 
 562   // Delete entries for dead interned strings.
 563   StringTable::unlink(is_alive_closure());
 564 
 565   // Clean up unreferenced symbols in symbol table.
 566   SymbolTable::unlink();
 567   _gc_tracer->report_object_count_after_gc(is_alive_closure());
 568 }
 569 
 570 
 571 void PSMarkSweep::mark_sweep_phase2() {
 572   GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
 573 
 574   // Now all live objects are marked, compute the new object addresses.
 575 
 576   // It is not required that we traverse spaces in the same order in
 577   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 578   // tracking expects us to do so. See comment under phase4.
 579 
 580   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 581   PSOldGen* old_gen = heap->old_gen();
 582 
 583   // Begin compacting into the old gen
 584   PSMarkSweepDecorator::set_destination_decorator_tenured();
 585 
 586   // This will also compact the young gen spaces.
 587   old_gen->precompact();
 588 }
 589 
 590 // This should be moved to the shared markSweep code!
 591 class PSAlwaysTrueClosure: public BoolObjectClosure {
 592 public:
 593   bool do_object_b(oop p) { return true; }
 594 };
 595 static PSAlwaysTrueClosure always_true;
 596 
 597 void PSMarkSweep::mark_sweep_phase3() {
 598   // Adjust the pointers to reflect the new locations
 599   GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
 600 
 601   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 602   PSYoungGen* young_gen = heap->young_gen();
 603   PSOldGen* old_gen = heap->old_gen();
 604 
 605   // Need to clear claim bits before the tracing starts.
 606   ClassLoaderDataGraph::clear_claimed_marks();
 607 
 608   // General strong roots.
 609   Universe::oops_do(adjust_pointer_closure());
 610   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 611   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
 612   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
 613   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 614   FlatProfiler::oops_do(adjust_pointer_closure());
 615   Management::oops_do(adjust_pointer_closure());
 616   JvmtiExport::oops_do(adjust_pointer_closure());
 617   SystemDictionary::oops_do(adjust_pointer_closure());
 618   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 619 
 620   // Now adjust pointers in remaining weak roots.  (All of which should
 621   // have been cleared if they pointed to non-surviving objects.)
 622   // Global (weak) JNI handles
 623   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 624 
 625   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 626   CodeCache::blobs_do(&adjust_from_blobs);
 627   StringTable::oops_do(adjust_pointer_closure());
 628   ref_processor()->weak_oops_do(adjust_pointer_closure());
 629   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 630 
 631   adjust_marks();
 632 
 633   young_gen->adjust_pointers();
 634   old_gen->adjust_pointers();
 635 }
 636 
 637 void PSMarkSweep::mark_sweep_phase4() {
 638   EventMark m("4 compact heap");
 639   GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
 640 
 641   // All pointers are now adjusted, move objects accordingly
 642 
 643   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 644   PSYoungGen* young_gen = heap->young_gen();
 645   PSOldGen* old_gen = heap->old_gen();
 646 
 647   old_gen->compact();
 648   young_gen->compact();
 649 }
 650 
 651 jlong PSMarkSweep::millis_since_last_gc() {
 652   // We need a monotonically non-decreasing time in ms but
 653   // os::javaTimeMillis() does not guarantee monotonicity.
 654   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 655   jlong ret_val = now - _time_of_last_gc;
 656   // XXX See note in genCollectedHeap::millis_since_last_gc().
 657   if (ret_val < 0) {
 658     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
 659     return 0;
< prev index next >