src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>


  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
  34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  35 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
  36 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"




  37 #include "gc_implementation/shared/isGCActiveMark.hpp"
  38 #include "gc_implementation/shared/markSweep.hpp"
  39 #include "gc_implementation/shared/spaceDecorator.hpp"
  40 #include "gc_interface/gcCause.hpp"
  41 #include "memory/gcLocker.inline.hpp"
  42 #include "memory/referencePolicy.hpp"
  43 #include "memory/referenceProcessor.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "runtime/biasedLocking.hpp"
  46 #include "runtime/fprofiler.hpp"
  47 #include "runtime/safepoint.hpp"
  48 #include "runtime/vmThread.hpp"
  49 #include "services/management.hpp"
  50 #include "services/memoryService.hpp"
  51 #include "utilities/events.hpp"
  52 #include "utilities/stack.inline.hpp"
  53 
  54 elapsedTimer        PSMarkSweep::_accumulated_time;
  55 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  56 CollectorCounters*  PSMarkSweep::_counters = NULL;


  91 
  92   const bool clear_all_soft_refs =
  93     heap->collector_policy()->should_clear_all_soft_refs();
  94 
  95   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
  96   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
  97   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
  98 }
  99 
 100 // This method contains no policy. You should probably
 101 // be calling invoke() instead.
 102 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 103   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 104   assert(ref_processor() != NULL, "Sanity");
 105 
 106   if (GC_locker::check_active_before_gc()) {
 107     return false;
 108   }
 109 
 110   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 111   GCCause::Cause gc_cause = heap->gc_cause();
 112   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");





 113   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 114 
 115   // The scope of casr should end after code that can change
 116   // CollectorPolicy::_should_clear_all_soft_refs.
 117   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 118 
 119   PSYoungGen* young_gen = heap->young_gen();
 120   PSOldGen* old_gen = heap->old_gen();
 121 
 122   // Increment the invocation count
 123   heap->increment_total_collections(true /* full */);
 124 
 125   // Save information needed to minimize mangling
 126   heap->record_gen_tops_before_GC();
 127 
 128   // We need to track unique mark sweep invocations as well.
 129   _total_invocations++;
 130 
 131   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 132 
 133   heap->print_heap_before_gc();

 134 
 135   // Fill in TLABs
 136   heap->accumulate_statistics_all_tlabs();
 137   heap->ensure_parsability(true);  // retire TLABs
 138 
 139   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 140     HandleMark hm;  // Discard invalid handles created during verification
 141     Universe::verify(" VerifyBeforeGC:");
 142   }
 143 
 144   // Verify object start arrays
 145   if (VerifyObjectStartArray &&
 146       VerifyBeforeGC) {
 147     old_gen->verify_object_start_array();
 148   }
 149 
 150   heap->pre_full_gc_dump();
 151 
 152   // Filled in below to track the state of the young gen after the collection.
 153   bool eden_empty;
 154   bool survivors_empty;
 155   bool young_gen_empty;
 156 
 157   {
 158     HandleMark hm;
 159 
 160     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 161     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 162     TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
 163     TraceCollectorStats tcs(counters());
 164     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 165 
 166     if (TraceGen1Time) accumulated_time()->start();
 167 
 168     // Let the size policy know we're starting
 169     size_policy->major_collection_begin();
 170 
 171     CodeCache::gc_prologue();
 172     Threads::gc_prologue();
 173     BiasedLocking::preserve_marks();
 174 
 175     // Capture heap size before collection for printing.
 176     size_t prev_used = heap->used();
 177 
 178     // Capture metadata size before collection for sizing.
 179     size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
 180 
 181     // For PrintGCDetails
 182     size_t old_gen_prev_used = old_gen->used_in_bytes();


 357   }
 358 
 359   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 360     HandleMark hm;  // Discard invalid handles created during verification
 361     Universe::verify(" VerifyAfterGC:");
 362   }
 363 
 364   // Re-verify object start arrays
 365   if (VerifyObjectStartArray &&
 366       VerifyAfterGC) {
 367     old_gen->verify_object_start_array();
 368   }
 369 
 370   if (ZapUnusedHeapArea) {
 371     old_gen->object_space()->check_mangled_unused_area_complete();
 372   }
 373 
 374   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 375 
 376   heap->print_heap_after_gc();

 377 
 378   heap->post_full_gc_dump();
 379 
 380 #ifdef TRACESPINNING
 381   ParallelTaskTerminator::print_termination_counts();
 382 #endif
 383 




 384   return true;
 385 }
 386 
 387 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
 388                                              PSYoungGen* young_gen,
 389                                              PSOldGen* old_gen) {
 390   MutableSpace* const eden_space = young_gen->eden_space();
 391   assert(!eden_space->is_empty(), "eden must be non-empty");
 392   assert(young_gen->virtual_space()->alignment() ==
 393          old_gen->virtual_space()->alignment(), "alignments do not match");
 394 
 395   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
 396     return false;
 397   }
 398 
 399   // Both generations must be completely committed.
 400   if (young_gen->virtual_space()->uncommitted_size() != 0) {
 401     return false;
 402   }
 403   if (old_gen->virtual_space()->uncommitted_size() != 0) {


 481   MutableSpace* to_space = young_gen->to_space();
 482   _preserved_marks = (PreservedMark*)to_space->top();
 483   _preserved_count = 0;
 484 
 485   // We want to calculate the size in bytes first.
 486   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 487   // Now divide by the size of a PreservedMark
 488   _preserved_count_max /= sizeof(PreservedMark);
 489 }
 490 
 491 
 492 void PSMarkSweep::deallocate_stacks() {
 493   _preserved_mark_stack.clear(true);
 494   _preserved_oop_stack.clear(true);
 495   _marking_stack.clear();
 496   _objarray_stack.clear(true);
 497 }
 498 
 499 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 500   // Recursively traverse all live objects and mark them
 501   TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
 502   trace(" 1");
 503 
 504   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 505   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 506 
 507   // Need to clear claim bits before the tracing starts.
 508   ClassLoaderDataGraph::clear_claimed_marks();
 509 
 510   // General strong roots.
 511   {
 512     ParallelScavengeHeap::ParStrongRootsScope psrs;
 513     Universe::oops_do(mark_and_push_closure());
 514     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 515     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
 516     CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
 517     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
 518     ObjectSynchronizer::oops_do(mark_and_push_closure());
 519     FlatProfiler::oops_do(mark_and_push_closure());
 520     Management::oops_do(mark_and_push_closure());
 521     JvmtiExport::oops_do(mark_and_push_closure());
 522     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 523     ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
 524     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 525     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
 526   }
 527 
 528   // Flush marking stack.
 529   follow_stack();
 530 
 531   // Process reference objects found during marking
 532   {
 533     ref_processor()->setup_policy(clear_all_softrefs);

 534     ref_processor()->process_discovered_references(
 535       is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);

 536   }
 537 
 538   // This is the point where the entire marking should have completed.
 539   assert(_marking_stack.is_empty(), "Marking should have completed");
 540 
 541   // Unload classes and purge the SystemDictionary.
 542   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 543 
 544   // Unload nmethods.
 545   CodeCache::do_unloading(is_alive_closure(), purged_class);
 546 
 547   // Prune dead klasses from subklass/sibling/implementor lists.
 548   Klass::clean_weak_klass_links(is_alive_closure());
 549 
 550   // Delete entries for dead interned strings.
 551   StringTable::unlink(is_alive_closure());
 552 
 553   // Clean up unreferenced symbols in symbol table.
 554   SymbolTable::unlink();

 555 }
 556 
 557 
 558 void PSMarkSweep::mark_sweep_phase2() {
 559   TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
 560   trace("2");
 561 
 562   // Now all live objects are marked, compute the new object addresses.
 563 
 564   // It is not required that we traverse spaces in the same order in
 565   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 566   // tracking expects us to do so. See comment under phase4.
 567 
 568   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 569   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 570 
 571   PSOldGen* old_gen = heap->old_gen();
 572 
 573   // Begin compacting into the old gen
 574   PSMarkSweepDecorator::set_destination_decorator_tenured();
 575 
 576   // This will also compact the young gen spaces.
 577   old_gen->precompact();
 578 }
 579 
 580 // This should be moved to the shared markSweep code!
 581 class PSAlwaysTrueClosure: public BoolObjectClosure {
 582 public:
 583   bool do_object_b(oop p) { return true; }
 584 };
 585 static PSAlwaysTrueClosure always_true;
 586 
 587 void PSMarkSweep::mark_sweep_phase3() {
 588   // Adjust the pointers to reflect the new locations
 589   TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
 590   trace("3");
 591 
 592   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 593   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 594 
 595   PSYoungGen* young_gen = heap->young_gen();
 596   PSOldGen* old_gen = heap->old_gen();
 597 
 598   // Need to clear claim bits before the tracing starts.
 599   ClassLoaderDataGraph::clear_claimed_marks();
 600 
 601   // General strong roots.
 602   Universe::oops_do(adjust_pointer_closure());
 603   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 604   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
 605   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
 606   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 607   FlatProfiler::oops_do(adjust_pointer_closure());
 608   Management::oops_do(adjust_pointer_closure());
 609   JvmtiExport::oops_do(adjust_pointer_closure());


 612   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
 613 
 614   // Now adjust pointers in remaining weak roots.  (All of which should
 615   // have been cleared if they pointed to non-surviving objects.)
 616   // Global (weak) JNI handles
 617   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 618 
 619   CodeCache::oops_do(adjust_pointer_closure());
 620   StringTable::oops_do(adjust_pointer_closure());
 621   ref_processor()->weak_oops_do(adjust_pointer_closure());
 622   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 623 
 624   adjust_marks();
 625 
 626   young_gen->adjust_pointers();
 627   old_gen->adjust_pointers();
 628 }
 629 
 630 void PSMarkSweep::mark_sweep_phase4() {
 631   EventMark m("4 compact heap");
 632   TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
 633   trace("4");
 634 
 635   // All pointers are now adjusted, move objects accordingly
 636 
 637   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 638   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 639 
 640   PSYoungGen* young_gen = heap->young_gen();
 641   PSOldGen* old_gen = heap->old_gen();
 642 
 643   old_gen->compact();
 644   young_gen->compact();
 645 }
 646 
 647 jlong PSMarkSweep::millis_since_last_gc() {
 648   // We need a monotonically non-deccreasing time in ms but
 649   // os::javaTimeMillis() does not guarantee monotonicity.
 650   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 651   jlong ret_val = now - _time_of_last_gc;
 652   // XXX See note in genCollectedHeap::millis_since_last_gc().


  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
  34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  35 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
  36 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
  37 #include "gc_implementation/shared/gcHeapSummary.hpp"
  38 #include "gc_implementation/shared/gcTimer.hpp"
  39 #include "gc_implementation/shared/gcTrace.hpp"
  40 #include "gc_implementation/shared/gcTraceTime.hpp"
  41 #include "gc_implementation/shared/isGCActiveMark.hpp"
  42 #include "gc_implementation/shared/markSweep.hpp"
  43 #include "gc_implementation/shared/spaceDecorator.hpp"
  44 #include "gc_interface/gcCause.hpp"
  45 #include "memory/gcLocker.inline.hpp"
  46 #include "memory/referencePolicy.hpp"
  47 #include "memory/referenceProcessor.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/biasedLocking.hpp"
  50 #include "runtime/fprofiler.hpp"
  51 #include "runtime/safepoint.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "services/management.hpp"
  54 #include "services/memoryService.hpp"
  55 #include "utilities/events.hpp"
  56 #include "utilities/stack.inline.hpp"
  57 
  58 elapsedTimer        PSMarkSweep::_accumulated_time;
  59 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  60 CollectorCounters*  PSMarkSweep::_counters = NULL;


  95 
  96   const bool clear_all_soft_refs =
  97     heap->collector_policy()->should_clear_all_soft_refs();
  98 
  99   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 100   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 101   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 102 }
 103 
 104 // This method contains no policy. You should probably
 105 // be calling invoke() instead.
 106 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 107   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 108   assert(ref_processor() != NULL, "Sanity");
 109 
 110   if (GC_locker::check_active_before_gc()) {
 111     return false;
 112   }
 113 
 114   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();

 115   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 116   GCCause::Cause gc_cause = heap->gc_cause();
 117 
 118   _gc_timer->register_gc_start(os::elapsed_counter());
 119   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 120 
 121   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 122 
 123   // The scope of casr should end after code that can change
 124   // CollectorPolicy::_should_clear_all_soft_refs.
 125   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 126 
 127   PSYoungGen* young_gen = heap->young_gen();
 128   PSOldGen* old_gen = heap->old_gen();
 129 
 130   // Increment the invocation count
 131   heap->increment_total_collections(true /* full */);
 132 
 133   // Save information needed to minimize mangling
 134   heap->record_gen_tops_before_GC();
 135 
 136   // We need to track unique mark sweep invocations as well.
 137   _total_invocations++;
 138 
 139   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 140 
 141   heap->print_heap_before_gc();
 142   heap->trace_heap_before_gc(_gc_tracer);
 143 
 144   // Fill in TLABs
 145   heap->accumulate_statistics_all_tlabs();
 146   heap->ensure_parsability(true);  // retire TLABs
 147 
 148   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 149     HandleMark hm;  // Discard invalid handles created during verification
 150     Universe::verify(" VerifyBeforeGC:");
 151   }
 152 
 153   // Verify object start arrays
 154   if (VerifyObjectStartArray &&
 155       VerifyBeforeGC) {
 156     old_gen->verify_object_start_array();
 157   }
 158 
 159   heap->pre_full_gc_dump(_gc_timer);
 160 
 161   // Filled in below to track the state of the young gen after the collection.
 162   bool eden_empty;
 163   bool survivors_empty;
 164   bool young_gen_empty;
 165 
 166   {
 167     HandleMark hm;
 168 
 169     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 170     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 171     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
 172     TraceCollectorStats tcs(counters());
 173     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 174 
 175     if (TraceGen1Time) accumulated_time()->start();
 176 
 177     // Let the size policy know we're starting
 178     size_policy->major_collection_begin();
 179 
 180     CodeCache::gc_prologue();
 181     Threads::gc_prologue();
 182     BiasedLocking::preserve_marks();
 183 
 184     // Capture heap size before collection for printing.
 185     size_t prev_used = heap->used();
 186 
 187     // Capture metadata size before collection for sizing.
 188     size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
 189 
 190     // For PrintGCDetails
 191     size_t old_gen_prev_used = old_gen->used_in_bytes();


 366   }
 367 
 368   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 369     HandleMark hm;  // Discard invalid handles created during verification
 370     Universe::verify(" VerifyAfterGC:");
 371   }
 372 
 373   // Re-verify object start arrays
 374   if (VerifyObjectStartArray &&
 375       VerifyAfterGC) {
 376     old_gen->verify_object_start_array();
 377   }
 378 
 379   if (ZapUnusedHeapArea) {
 380     old_gen->object_space()->check_mangled_unused_area_complete();
 381   }
 382 
 383   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 384 
 385   heap->print_heap_after_gc();
 386   heap->trace_heap_after_gc(_gc_tracer);
 387 
 388   heap->post_full_gc_dump(_gc_timer);
 389 
 390 #ifdef TRACESPINNING
 391   ParallelTaskTerminator::print_termination_counts();
 392 #endif
 393 
 394   _gc_timer->register_gc_end(os::elapsed_counter());
 395 
 396   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 397 
 398   return true;
 399 }
 400 
 401 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
 402                                              PSYoungGen* young_gen,
 403                                              PSOldGen* old_gen) {
 404   MutableSpace* const eden_space = young_gen->eden_space();
 405   assert(!eden_space->is_empty(), "eden must be non-empty");
 406   assert(young_gen->virtual_space()->alignment() ==
 407          old_gen->virtual_space()->alignment(), "alignments do not match");
 408 
 409   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
 410     return false;
 411   }
 412 
 413   // Both generations must be completely committed.
 414   if (young_gen->virtual_space()->uncommitted_size() != 0) {
 415     return false;
 416   }
 417   if (old_gen->virtual_space()->uncommitted_size() != 0) {


 495   MutableSpace* to_space = young_gen->to_space();
 496   _preserved_marks = (PreservedMark*)to_space->top();
 497   _preserved_count = 0;
 498 
 499   // We want to calculate the size in bytes first.
 500   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 501   // Now divide by the size of a PreservedMark
 502   _preserved_count_max /= sizeof(PreservedMark);
 503 }
 504 
 505 
 506 void PSMarkSweep::deallocate_stacks() {
 507   _preserved_mark_stack.clear(true);
 508   _preserved_oop_stack.clear(true);
 509   _marking_stack.clear();
 510   _objarray_stack.clear(true);
 511 }
 512 
 513 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 514   // Recursively traverse all live objects and mark them
 515   GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
 516   trace(" 1");
 517 
 518   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 519   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 520 
 521   // Need to clear claim bits before the tracing starts.
 522   ClassLoaderDataGraph::clear_claimed_marks();
 523 
 524   // General strong roots.
 525   {
 526     ParallelScavengeHeap::ParStrongRootsScope psrs;
 527     Universe::oops_do(mark_and_push_closure());
 528     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 529     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
 530     CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
 531     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
 532     ObjectSynchronizer::oops_do(mark_and_push_closure());
 533     FlatProfiler::oops_do(mark_and_push_closure());
 534     Management::oops_do(mark_and_push_closure());
 535     JvmtiExport::oops_do(mark_and_push_closure());
 536     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 537     ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
 538     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 539     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
 540   }
 541 
 542   // Flush marking stack.
 543   follow_stack();
 544 
 545   // Process reference objects found during marking
 546   {
 547     ref_processor()->setup_policy(clear_all_softrefs);
 548     const ReferenceProcessorStats& stats =
 549       ref_processor()->process_discovered_references(
 550         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
 551     gc_tracer()->report_gc_reference_stats(stats);
 552   }
 553 
 554   // This is the point where the entire marking should have completed.
 555   assert(_marking_stack.is_empty(), "Marking should have completed");
 556 
 557   // Unload classes and purge the SystemDictionary.
 558   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 559 
 560   // Unload nmethods.
 561   CodeCache::do_unloading(is_alive_closure(), purged_class);
 562 
 563   // Prune dead klasses from subklass/sibling/implementor lists.
 564   Klass::clean_weak_klass_links(is_alive_closure());
 565 
 566   // Delete entries for dead interned strings.
 567   StringTable::unlink(is_alive_closure());
 568 
 569   // Clean up unreferenced symbols in symbol table.
 570   SymbolTable::unlink();
 571   _gc_tracer->report_object_count_after_gc(is_alive_closure());
 572 }
 573 
 574 
 575 void PSMarkSweep::mark_sweep_phase2() {
 576   GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
 577   trace("2");
 578 
 579   // Now all live objects are marked, compute the new object addresses.
 580 
 581   // It is not required that we traverse spaces in the same order in
 582   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 583   // tracking expects us to do so. See comment under phase4.
 584 
 585   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 586   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 587 
 588   PSOldGen* old_gen = heap->old_gen();
 589 
 590   // Begin compacting into the old gen
 591   PSMarkSweepDecorator::set_destination_decorator_tenured();
 592 
 593   // This will also compact the young gen spaces.
 594   old_gen->precompact();
 595 }
 596 
 597 // This should be moved to the shared markSweep code!
 598 class PSAlwaysTrueClosure: public BoolObjectClosure {
 599 public:
 600   bool do_object_b(oop p) { return true; }
 601 };
 602 static PSAlwaysTrueClosure always_true;
 603 
 604 void PSMarkSweep::mark_sweep_phase3() {
 605   // Adjust the pointers to reflect the new locations
 606   GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
 607   trace("3");
 608 
 609   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 610   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 611 
 612   PSYoungGen* young_gen = heap->young_gen();
 613   PSOldGen* old_gen = heap->old_gen();
 614 
 615   // Need to clear claim bits before the tracing starts.
 616   ClassLoaderDataGraph::clear_claimed_marks();
 617 
 618   // General strong roots.
 619   Universe::oops_do(adjust_pointer_closure());
 620   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 621   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
 622   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
 623   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 624   FlatProfiler::oops_do(adjust_pointer_closure());
 625   Management::oops_do(adjust_pointer_closure());
 626   JvmtiExport::oops_do(adjust_pointer_closure());


 629   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
 630 
 631   // Now adjust pointers in remaining weak roots.  (All of which should
 632   // have been cleared if they pointed to non-surviving objects.)
 633   // Global (weak) JNI handles
 634   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 635 
 636   CodeCache::oops_do(adjust_pointer_closure());
 637   StringTable::oops_do(adjust_pointer_closure());
 638   ref_processor()->weak_oops_do(adjust_pointer_closure());
 639   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 640 
 641   adjust_marks();
 642 
 643   young_gen->adjust_pointers();
 644   old_gen->adjust_pointers();
 645 }
 646 
 647 void PSMarkSweep::mark_sweep_phase4() {
 648   EventMark m("4 compact heap");
 649   GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
 650   trace("4");
 651 
 652   // All pointers are now adjusted, move objects accordingly
 653 
 654   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 655   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 656 
 657   PSYoungGen* young_gen = heap->young_gen();
 658   PSOldGen* old_gen = heap->old_gen();
 659 
 660   old_gen->compact();
 661   young_gen->compact();
 662 }
 663 
 664 jlong PSMarkSweep::millis_since_last_gc() {
 665   // We need a monotonically non-deccreasing time in ms but
 666   // os::javaTimeMillis() does not guarantee monotonicity.
 667   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 668   jlong ret_val = now - _time_of_last_gc;
 669   // XXX See note in genCollectedHeap::millis_since_last_gc().