< prev index next >

src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp

Print this page
rev 8068 : imported patch parallelscavenge_cleanup


  42 #include "gc_implementation/shared/spaceDecorator.hpp"
  43 #include "gc_interface/gcCause.hpp"
  44 #include "memory/gcLocker.inline.hpp"
  45 #include "memory/referencePolicy.hpp"
  46 #include "memory/referenceProcessor.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/biasedLocking.hpp"
  49 #include "runtime/fprofiler.hpp"
  50 #include "runtime/safepoint.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "services/management.hpp"
  53 #include "services/memoryService.hpp"
  54 #include "utilities/events.hpp"
  55 #include "utilities/stack.inline.hpp"
  56 
  57 elapsedTimer        PSMarkSweep::_accumulated_time;
  58 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  59 CollectorCounters*  PSMarkSweep::_counters = NULL;
  60 
  61 void PSMarkSweep::initialize() {
  62   MemRegion mr = Universe::heap()->reserved_region();
  63   _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
  64   _counters = new CollectorCounters("PSMarkSweep", 1);
  65 }
  66 
  67 // This method contains all heap specific policy for invoking mark sweep.
  68 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
  69 // the heap. It will do nothing further. If we need to bail out for policy
  70 // reasons, scavenge before full gc, or any other specialized behavior, it
  71 // needs to be added here.
  72 //
  73 // Note that this method should only be called from the vm_thread while
  74 // at a safepoint!
  75 //
  76 // Note that the all_soft_refs_clear flag in the collector policy
  77 // may be true because this method can be called without intervening
  78 // activity.  For example when the heap space is tight and full measure
  79 // are being taken to free space.
  80 
  81 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  82   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  83   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  84   assert(!Universe::heap()->is_gc_active(), "not reentrant");
  85 
  86   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  87   GCCause::Cause gc_cause = heap->gc_cause();
  88   PSAdaptiveSizePolicy* policy = heap->size_policy();
  89   IsGCActiveMark mark;
  90 
  91   if (ScavengeBeforeFullGC) {
  92     PSScavenge::invoke_no_policy();
  93   }
  94 
  95   const bool clear_all_soft_refs =
  96     heap->collector_policy()->should_clear_all_soft_refs();
  97 
  98   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
  99   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 100   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 101 }
 102 
 103 // This method contains no policy. You should probably
 104 // be calling invoke() instead.
 105 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 106   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 107   assert(ref_processor() != NULL, "Sanity");
 108 
 109   if (GC_locker::check_active_before_gc()) {
 110     return false;
 111   }
 112 
 113   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 114   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 115   GCCause::Cause gc_cause = heap->gc_cause();
 116 
 117   _gc_timer->register_gc_start();
 118   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 119 
 120   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 121 
 122   // The scope of casr should end after code that can change
 123   // CollectorPolicy::_should_clear_all_soft_refs.
 124   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 125 
 126   PSYoungGen* young_gen = heap->young_gen();
 127   PSOldGen* old_gen = heap->old_gen();
 128 
 129   // Increment the invocation count
 130   heap->increment_total_collections(true /* full */);
 131 
 132   // Save information needed to minimize mangling
 133   heap->record_gen_tops_before_GC();
 134 


 470   old_space->set_top(new_top);
 471   old_space->set_end(new_top);
 472   old_gen->reset_after_change();
 473 
 474   // Update the object start array for the filler object and the data from eden.
 475   ObjectStartArray* const start_array = old_gen->start_array();
 476   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
 477     start_array->allocate_block(p);
 478   }
 479 
 480   // Could update the promoted average here, but it is not typically updated at
 481   // full GCs and the value to use is unclear.  Something like
 482   //
 483   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
 484 
 485   size_policy->set_bytes_absorbed_from_eden(absorb_size);
 486   return true;
 487 }
 488 
 489 void PSMarkSweep::allocate_stacks() {
 490   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 491   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 492 
 493   PSYoungGen* young_gen = heap->young_gen();
 494 
 495   MutableSpace* to_space = young_gen->to_space();
 496   _preserved_marks = (PreservedMark*)to_space->top();
 497   _preserved_count = 0;
 498 
 499   // We want to calculate the size in bytes first.
 500   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 501   // Now divide by the size of a PreservedMark
 502   _preserved_count_max /= sizeof(PreservedMark);
 503 }
 504 
 505 
 506 void PSMarkSweep::deallocate_stacks() {
 507   _preserved_mark_stack.clear(true);
 508   _preserved_oop_stack.clear(true);
 509   _marking_stack.clear();
 510   _objarray_stack.clear(true);
 511 }
 512 
 513 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 514   // Recursively traverse all live objects and mark them
 515   GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 516   trace(" 1");
 517 
 518   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 519   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 520 
 521   // Need to clear claim bits before the tracing starts.
 522   ClassLoaderDataGraph::clear_claimed_marks();
 523 
 524   // General strong roots.
 525   {
 526     ParallelScavengeHeap::ParStrongRootsScope psrs;
 527     Universe::oops_do(mark_and_push_closure());
 528     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 529     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
 530     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
 531     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
 532     ObjectSynchronizer::oops_do(mark_and_push_closure());
 533     FlatProfiler::oops_do(mark_and_push_closure());
 534     Management::oops_do(mark_and_push_closure());
 535     JvmtiExport::oops_do(mark_and_push_closure());
 536     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 537     ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
 538     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 539     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));


 565 
 566   // Delete entries for dead interned strings.
 567   StringTable::unlink(is_alive_closure());
 568 
 569   // Clean up unreferenced symbols in symbol table.
 570   SymbolTable::unlink();
 571   _gc_tracer->report_object_count_after_gc(is_alive_closure());
 572 }
 573 
 574 
 575 void PSMarkSweep::mark_sweep_phase2() {
 576   GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 577   trace("2");
 578 
 579   // Now all live objects are marked, compute the new object addresses.
 580 
 581   // It is not required that we traverse spaces in the same order in
 582   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 583   // tracking expects us to do so. See comment under phase4.
 584 
 585   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 586   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 587 
 588   PSOldGen* old_gen = heap->old_gen();
 589 
 590   // Begin compacting into the old gen
 591   PSMarkSweepDecorator::set_destination_decorator_tenured();
 592 
 593   // This will also compact the young gen spaces.
 594   old_gen->precompact();
 595 }
 596 
 597 // This should be moved to the shared markSweep code!
 598 class PSAlwaysTrueClosure: public BoolObjectClosure {
 599 public:
 600   bool do_object_b(oop p) { return true; }
 601 };
 602 static PSAlwaysTrueClosure always_true;
 603 
 604 void PSMarkSweep::mark_sweep_phase3() {
 605   // Adjust the pointers to reflect the new locations
 606   GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 607   trace("3");
 608 
 609   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 610   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 611 
 612   PSYoungGen* young_gen = heap->young_gen();
 613   PSOldGen* old_gen = heap->old_gen();
 614 
 615   // Need to clear claim bits before the tracing starts.
 616   ClassLoaderDataGraph::clear_claimed_marks();
 617 
 618   // General strong roots.
 619   Universe::oops_do(adjust_pointer_closure());
 620   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 621   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
 622   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
 623   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 624   FlatProfiler::oops_do(adjust_pointer_closure());
 625   Management::oops_do(adjust_pointer_closure());
 626   JvmtiExport::oops_do(adjust_pointer_closure());
 627   SystemDictionary::oops_do(adjust_pointer_closure());
 628   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 629 
 630   // Now adjust pointers in remaining weak roots.  (All of which should
 631   // have been cleared if they pointed to non-surviving objects.)


 634 
 635   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 636   CodeCache::blobs_do(&adjust_from_blobs);
 637   StringTable::oops_do(adjust_pointer_closure());
 638   ref_processor()->weak_oops_do(adjust_pointer_closure());
 639   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 640 
 641   adjust_marks();
 642 
 643   young_gen->adjust_pointers();
 644   old_gen->adjust_pointers();
 645 }
 646 
 647 void PSMarkSweep::mark_sweep_phase4() {
 648   EventMark m("4 compact heap");
 649   GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 650   trace("4");
 651 
 652   // All pointers are now adjusted, move objects accordingly
 653 
 654   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 655   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 656 
 657   PSYoungGen* young_gen = heap->young_gen();
 658   PSOldGen* old_gen = heap->old_gen();
 659 
 660   old_gen->compact();
 661   young_gen->compact();
 662 }
 663 
 664 jlong PSMarkSweep::millis_since_last_gc() {
 665   // We need a monotonically non-decreasing time in ms but
 666   // os::javaTimeMillis() does not guarantee monotonicity.
 667   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 668   jlong ret_val = now - _time_of_last_gc;
 669   // XXX See note in genCollectedHeap::millis_since_last_gc().
 670   if (ret_val < 0) {
 671     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
 672     return 0;
 673   }
 674   return ret_val;
 675 }
 676 


  42 #include "gc_implementation/shared/spaceDecorator.hpp"
  43 #include "gc_interface/gcCause.hpp"
  44 #include "memory/gcLocker.inline.hpp"
  45 #include "memory/referencePolicy.hpp"
  46 #include "memory/referenceProcessor.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/biasedLocking.hpp"
  49 #include "runtime/fprofiler.hpp"
  50 #include "runtime/safepoint.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "services/management.hpp"
  53 #include "services/memoryService.hpp"
  54 #include "utilities/events.hpp"
  55 #include "utilities/stack.inline.hpp"
  56 
  57 elapsedTimer        PSMarkSweep::_accumulated_time;
  58 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  59 CollectorCounters*  PSMarkSweep::_counters = NULL;
  60 
  61 void PSMarkSweep::initialize() {
  62   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
  63   _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
  64   _counters = new CollectorCounters("PSMarkSweep", 1);
  65 }
  66 
  67 // This method contains all heap specific policy for invoking mark sweep.
  68 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
  69 // the heap. It will do nothing further. If we need to bail out for policy
  70 // reasons, scavenge before full gc, or any other specialized behavior, it
  71 // needs to be added here.
  72 //
  73 // Note that this method should only be called from the vm_thread while
  74 // at a safepoint!
  75 //
  76 // Note that the all_soft_refs_clear flag in the collector policy
  77 // may be true because this method can be called without intervening
  78 // activity.  For example when the heap space is tight and full measure
  79 // are being taken to free space.
  80 
  81 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  82   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  83   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  84   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
  85 
  86   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  87   GCCause::Cause gc_cause = heap->gc_cause();
  88   PSAdaptiveSizePolicy* policy = heap->size_policy();
  89   IsGCActiveMark mark;
  90 
  91   if (ScavengeBeforeFullGC) {
  92     PSScavenge::invoke_no_policy();
  93   }
  94 
  95   const bool clear_all_soft_refs =
  96     heap->collector_policy()->should_clear_all_soft_refs();
  97 
  98   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
  99   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 100   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 101 }
 102 
 103 // This method contains no policy. You should probably
 104 // be calling invoke() instead.
 105 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 106   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 107   assert(ref_processor() != NULL, "Sanity");
 108 
 109   if (GC_locker::check_active_before_gc()) {
 110     return false;
 111   }
 112 
 113   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();

 114   GCCause::Cause gc_cause = heap->gc_cause();
 115 
 116   _gc_timer->register_gc_start();
 117   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 118 
 119   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 120 
 121   // The scope of casr should end after code that can change
 122   // CollectorPolicy::_should_clear_all_soft_refs.
 123   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 124 
 125   PSYoungGen* young_gen = heap->young_gen();
 126   PSOldGen* old_gen = heap->old_gen();
 127 
 128   // Increment the invocation count
 129   heap->increment_total_collections(true /* full */);
 130 
 131   // Save information needed to minimize mangling
 132   heap->record_gen_tops_before_GC();
 133 


 469   old_space->set_top(new_top);
 470   old_space->set_end(new_top);
 471   old_gen->reset_after_change();
 472 
 473   // Update the object start array for the filler object and the data from eden.
 474   ObjectStartArray* const start_array = old_gen->start_array();
 475   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
 476     start_array->allocate_block(p);
 477   }
 478 
 479   // Could update the promoted average here, but it is not typically updated at
 480   // full GCs and the value to use is unclear.  Something like
 481   //
 482   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
 483 
 484   size_policy->set_bytes_absorbed_from_eden(absorb_size);
 485   return true;
 486 }
 487 
 488 void PSMarkSweep::allocate_stacks() {
 489   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();


 490   PSYoungGen* young_gen = heap->young_gen();
 491 
 492   MutableSpace* to_space = young_gen->to_space();
 493   _preserved_marks = (PreservedMark*)to_space->top();
 494   _preserved_count = 0;
 495 
 496   // We want to calculate the size in bytes first.
 497   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 498   // Now divide by the size of a PreservedMark
 499   _preserved_count_max /= sizeof(PreservedMark);
 500 }
 501 
 502 
 503 void PSMarkSweep::deallocate_stacks() {
 504   _preserved_mark_stack.clear(true);
 505   _preserved_oop_stack.clear(true);
 506   _marking_stack.clear();
 507   _objarray_stack.clear(true);
 508 }
 509 
 510 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 511   // Recursively traverse all live objects and mark them
 512   GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 513   trace(" 1");
 514 
 515   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();

 516 
 517   // Need to clear claim bits before the tracing starts.
 518   ClassLoaderDataGraph::clear_claimed_marks();
 519 
 520   // General strong roots.
 521   {
 522     ParallelScavengeHeap::ParStrongRootsScope psrs;
 523     Universe::oops_do(mark_and_push_closure());
 524     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 525     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
 526     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
 527     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
 528     ObjectSynchronizer::oops_do(mark_and_push_closure());
 529     FlatProfiler::oops_do(mark_and_push_closure());
 530     Management::oops_do(mark_and_push_closure());
 531     JvmtiExport::oops_do(mark_and_push_closure());
 532     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 533     ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
 534     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 535     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));


 561 
 562   // Delete entries for dead interned strings.
 563   StringTable::unlink(is_alive_closure());
 564 
 565   // Clean up unreferenced symbols in symbol table.
 566   SymbolTable::unlink();
 567   _gc_tracer->report_object_count_after_gc(is_alive_closure());
 568 }
 569 
 570 
 571 void PSMarkSweep::mark_sweep_phase2() {
 572   GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 573   trace("2");
 574 
 575   // Now all live objects are marked, compute the new object addresses.
 576 
 577   // It is not required that we traverse spaces in the same order in
 578   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 579   // tracking expects us to do so. See comment under phase4.
 580 
 581   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();


 582   PSOldGen* old_gen = heap->old_gen();
 583 
 584   // Begin compacting into the old gen
 585   PSMarkSweepDecorator::set_destination_decorator_tenured();
 586 
 587   // This will also compact the young gen spaces.
 588   old_gen->precompact();
 589 }
 590 
 591 // This should be moved to the shared markSweep code!
 592 class PSAlwaysTrueClosure: public BoolObjectClosure {
 593 public:
 594   bool do_object_b(oop p) { return true; }
 595 };
 596 static PSAlwaysTrueClosure always_true;
 597 
 598 void PSMarkSweep::mark_sweep_phase3() {
 599   // Adjust the pointers to reflect the new locations
 600   GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 601   trace("3");
 602 
 603   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();


 604   PSYoungGen* young_gen = heap->young_gen();
 605   PSOldGen* old_gen = heap->old_gen();
 606 
 607   // Need to clear claim bits before the tracing starts.
 608   ClassLoaderDataGraph::clear_claimed_marks();
 609 
 610   // General strong roots.
 611   Universe::oops_do(adjust_pointer_closure());
 612   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 613   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
 614   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
 615   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 616   FlatProfiler::oops_do(adjust_pointer_closure());
 617   Management::oops_do(adjust_pointer_closure());
 618   JvmtiExport::oops_do(adjust_pointer_closure());
 619   SystemDictionary::oops_do(adjust_pointer_closure());
 620   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 621 
 622   // Now adjust pointers in remaining weak roots.  (All of which should
 623   // have been cleared if they pointed to non-surviving objects.)


 626 
 627   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 628   CodeCache::blobs_do(&adjust_from_blobs);
 629   StringTable::oops_do(adjust_pointer_closure());
 630   ref_processor()->weak_oops_do(adjust_pointer_closure());
 631   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 632 
 633   adjust_marks();
 634 
 635   young_gen->adjust_pointers();
 636   old_gen->adjust_pointers();
 637 }
 638 
 639 void PSMarkSweep::mark_sweep_phase4() {
 640   EventMark m("4 compact heap");
 641   GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 642   trace("4");
 643 
 644   // All pointers are now adjusted, move objects accordingly
 645 
 646   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();


 647   PSYoungGen* young_gen = heap->young_gen();
 648   PSOldGen* old_gen = heap->old_gen();
 649 
 650   old_gen->compact();
 651   young_gen->compact();
 652 }
 653 
 654 jlong PSMarkSweep::millis_since_last_gc() {
 655   // We need a monotonically non-decreasing time in ms but
 656   // os::javaTimeMillis() does not guarantee monotonicity.
 657   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 658   jlong ret_val = now - _time_of_last_gc;
 659   // XXX See note in genCollectedHeap::millis_since_last_gc().
 660   if (ret_val < 0) {
 661     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
 662     return 0;
 663   }
 664   return ret_val;
 665 }
 666 
< prev index next >