1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  30 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  31 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  32 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
  33 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  34 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
  35 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
  36 #include "gc_implementation/shared/gcHeapSummary.hpp"
  37 #include "gc_implementation/shared/gcTimer.hpp"
  38 #include "gc_implementation/shared/gcTrace.hpp"
  39 #include "gc_implementation/shared/gcTraceTime.hpp"
  40 #include "gc_implementation/shared/isGCActiveMark.hpp"
  41 #include "gc_implementation/shared/markSweep.hpp"
  42 #include "gc_implementation/shared/spaceDecorator.hpp"
  43 #include "gc_interface/gcCause.hpp"
  44 #include "memory/gcLocker.inline.hpp"
  45 #include "memory/referencePolicy.hpp"
  46 #include "memory/referenceProcessor.hpp"
  47 #include "oops/oop.inline.hpp"
  48 #include "runtime/biasedLocking.hpp"
  49 #include "runtime/fprofiler.hpp"
  50 #include "runtime/safepoint.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "services/management.hpp"
  53 #include "services/memoryService.hpp"
  54 #include "utilities/events.hpp"
  55 #include "utilities/stack.inline.hpp"
  56 #if INCLUDE_JFR
  57 #include "jfr/jfr.hpp"
  58 #endif // INCLUDE_JFR
  59 
  60 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  61 
  62 elapsedTimer        PSMarkSweep::_accumulated_time;
  63 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  64 CollectorCounters*  PSMarkSweep::_counters = NULL;
  65 
  66 void PSMarkSweep::initialize() {
  67   MemRegion mr = Universe::heap()->reserved_region();
  68   _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
  69   _counters = new CollectorCounters("PSMarkSweep", 1);
  70 }
  71 
  72 // This method contains all heap specific policy for invoking mark sweep.
  73 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
  74 // the heap. It will do nothing further. If we need to bail out for policy
  75 // reasons, scavenge before full gc, or any other specialized behavior, it
  76 // needs to be added here.
  77 //
  78 // Note that this method should only be called from the vm_thread while
  79 // at a safepoint!
  80 //
  81 // Note that the all_soft_refs_clear flag in the collector policy
  82 // may be true because this method can be called without intervening
  83 // activity.  For example when the heap space is tight and full measure
  84 // are being taken to free space.
  85 
  86 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  87   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  88   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  89   assert(!Universe::heap()->is_gc_active(), "not reentrant");
  90 
  91   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  92   GCCause::Cause gc_cause = heap->gc_cause();
  93   PSAdaptiveSizePolicy* policy = heap->size_policy();
  94   IsGCActiveMark mark;
  95 
  96   if (ScavengeBeforeFullGC) {
  97     PSScavenge::invoke_no_policy();
  98   }
  99 
 100   const bool clear_all_soft_refs =
 101     heap->collector_policy()->should_clear_all_soft_refs();
 102 
 103   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 104   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 105   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 106 }
 107 
 108 // This method contains no policy. You should probably
 109 // be calling invoke() instead.
 110 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 111   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 112   assert(ref_processor() != NULL, "Sanity");
 113 
 114   if (GC_locker::check_active_before_gc()) {
 115     return false;
 116   }
 117 
 118   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 119   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 120   GCCause::Cause gc_cause = heap->gc_cause();
 121 
 122   _gc_timer->register_gc_start();
 123   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 124 
 125   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 126 
 127   // The scope of casr should end after code that can change
 128   // CollectorPolicy::_should_clear_all_soft_refs.
 129   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 130 
 131   PSYoungGen* young_gen = heap->young_gen();
 132   PSOldGen* old_gen = heap->old_gen();
 133 
 134   // Increment the invocation count
 135   heap->increment_total_collections(true /* full */);
 136 
 137   // Save information needed to minimize mangling
 138   heap->record_gen_tops_before_GC();
 139 
 140   // We need to track unique mark sweep invocations as well.
 141   _total_invocations++;
 142 
 143   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 144 
 145   heap->print_heap_before_gc();
 146   heap->trace_heap_before_gc(_gc_tracer);
 147 
 148   // Fill in TLABs
 149   heap->accumulate_statistics_all_tlabs();
 150   heap->ensure_parsability(true);  // retire TLABs
 151 
 152   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 153     HandleMark hm;  // Discard invalid handles created during verification
 154     Universe::verify(" VerifyBeforeGC:");
 155   }
 156 
 157   // Verify object start arrays
 158   if (VerifyObjectStartArray &&
 159       VerifyBeforeGC) {
 160     old_gen->verify_object_start_array();
 161   }
 162 
 163   heap->pre_full_gc_dump(_gc_timer);
 164 
 165   // Filled in below to track the state of the young gen after the collection.
 166   bool eden_empty;
 167   bool survivors_empty;
 168   bool young_gen_empty;
 169 
 170   {
 171     HandleMark hm;
 172 
 173     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 174     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
 175     TraceCollectorStats tcs(counters());
 176     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 177 
 178     if (TraceGen1Time) accumulated_time()->start();
 179 
 180     // Let the size policy know we're starting
 181     size_policy->major_collection_begin();
 182 
 183     CodeCache::gc_prologue();
 184     Threads::gc_prologue();
 185     BiasedLocking::preserve_marks();
 186 
 187     // Capture heap size before collection for printing.
 188     size_t prev_used = heap->used();
 189 
 190     // Capture metadata size before collection for sizing.
 191     size_t metadata_prev_used = MetaspaceAux::used_bytes();
 192 
 193     // For PrintGCDetails
 194     size_t old_gen_prev_used = old_gen->used_in_bytes();
 195     size_t young_gen_prev_used = young_gen->used_in_bytes();
 196 
 197     allocate_stacks();
 198 
 199     COMPILER2_PRESENT(DerivedPointerTable::clear());
 200 
 201     ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 202     ref_processor()->setup_policy(clear_all_softrefs);
 203 
 204     mark_sweep_phase1(clear_all_softrefs);
 205 
 206     mark_sweep_phase2();
 207 
 208     // Don't add any more derived pointers during phase3
 209     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
 210     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
 211 
 212     mark_sweep_phase3();
 213 
 214     mark_sweep_phase4();
 215 
 216     restore_marks();
 217 
 218     deallocate_stacks();
 219 
 220     if (ZapUnusedHeapArea) {
 221       // Do a complete mangle (top to end) because the usage for
 222       // scratch does not maintain a top pointer.
 223       young_gen->to_space()->mangle_unused_area_complete();
 224     }
 225 
 226     eden_empty = young_gen->eden_space()->is_empty();
 227     if (!eden_empty) {
 228       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
 229     }
 230 
 231     // Update heap occupancy information which is used as
 232     // input to soft ref clearing policy at the next gc.
 233     Universe::update_heap_info_at_gc();
 234 
 235     survivors_empty = young_gen->from_space()->is_empty() &&
 236                       young_gen->to_space()->is_empty();
 237     young_gen_empty = eden_empty && survivors_empty;
 238 
 239     BarrierSet* bs = heap->barrier_set();
 240     if (bs->is_a(BarrierSet::ModRef)) {
 241       ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
 242       MemRegion old_mr = heap->old_gen()->reserved();
 243       if (young_gen_empty) {
 244         modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
 245       } else {
 246         modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
 247       }
 248     }
 249 
 250     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 251     ClassLoaderDataGraph::purge();
 252     MetaspaceAux::verify_metrics();
 253 
 254     BiasedLocking::restore_marks();
 255     Threads::gc_epilogue();
 256     CodeCache::gc_epilogue();
 257     JvmtiExport::gc_epilogue();
 258 
 259     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 260 
 261     ref_processor()->enqueue_discovered_references(NULL);
 262 
 263     // Update time of last GC
 264     reset_millis_since_last_gc();
 265 
 266     // Let the size policy know we're done
 267     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
 268 
 269     if (UseAdaptiveSizePolicy) {
 270 
 271       if (PrintAdaptiveSizePolicy) {
 272         gclog_or_tty->print("AdaptiveSizeStart: ");
 273         gclog_or_tty->stamp();
 274         gclog_or_tty->print_cr(" collection: %d ",
 275                        heap->total_collections());
 276         if (Verbose) {
 277           gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
 278             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 279         }
 280       }
 281 
 282       // Don't check if the size_policy is ready here.  Let
 283       // the size_policy check that internally.
 284       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
 285           ((gc_cause != GCCause::_java_lang_system_gc) ||
 286             UseAdaptiveSizePolicyWithSystemGC)) {
 287         // Calculate optimal free space amounts
 288         assert(young_gen->max_size() >
 289           young_gen->from_space()->capacity_in_bytes() +
 290           young_gen->to_space()->capacity_in_bytes(),
 291           "Sizes of space in young gen are out-of-bounds");
 292 
 293         size_t young_live = young_gen->used_in_bytes();
 294         size_t eden_live = young_gen->eden_space()->used_in_bytes();
 295         size_t old_live = old_gen->used_in_bytes();
 296         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
 297         size_t max_old_gen_size = old_gen->max_gen_size();
 298         size_t max_eden_size = young_gen->max_size() -
 299           young_gen->from_space()->capacity_in_bytes() -
 300           young_gen->to_space()->capacity_in_bytes();
 301 
 302         // Used for diagnostics
 303         size_policy->clear_generation_free_space_flags();
 304 
 305         size_policy->compute_generations_free_space(young_live,
 306                                                     eden_live,
 307                                                     old_live,
 308                                                     cur_eden,
 309                                                     max_old_gen_size,
 310                                                     max_eden_size,
 311                                                     true /* full gc*/);
 312 
 313         size_policy->check_gc_overhead_limit(young_live,
 314                                              eden_live,
 315                                              max_old_gen_size,
 316                                              max_eden_size,
 317                                              true /* full gc*/,
 318                                              gc_cause,
 319                                              heap->collector_policy());
 320 
 321         size_policy->decay_supplemental_growth(true /* full gc*/);
 322 
 323         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
 324 
 325         // Don't resize the young generation at an major collection.  A
 326         // desired young generation size may have been calculated but
 327         // resizing the young generation complicates the code because the
 328         // resizing of the old generation may have moved the boundary
 329         // between the young generation and the old generation.  Let the
 330         // young generation resizing happen at the minor collections.
 331       }
 332       if (PrintAdaptiveSizePolicy) {
 333         gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
 334                        heap->total_collections());
 335       }
 336     }
 337 
 338     if (UsePerfData) {
 339       heap->gc_policy_counters()->update_counters();
 340       heap->gc_policy_counters()->update_old_capacity(
 341         old_gen->capacity_in_bytes());
 342       heap->gc_policy_counters()->update_young_capacity(
 343         young_gen->capacity_in_bytes());
 344     }
 345 
 346     heap->resize_all_tlabs();
 347 
 348     // We collected the heap, recalculate the metaspace capacity
 349     MetaspaceGC::compute_new_size();
 350 
 351     if (TraceGen1Time) accumulated_time()->stop();
 352 
 353     if (PrintGC) {
 354       if (PrintGCDetails) {
 355         // Don't print a GC timestamp here.  This is after the GC so
 356         // would be confusing.
 357         young_gen->print_used_change(young_gen_prev_used);
 358         old_gen->print_used_change(old_gen_prev_used);
 359       }
 360       heap->print_heap_change(prev_used);
 361       if (PrintGCDetails) {
 362         MetaspaceAux::print_metaspace_change(metadata_prev_used);
 363       }
 364     }
 365 
 366     // Track memory usage and detect low memory
 367     MemoryService::track_memory_usage();
 368     heap->update_counters();
 369   }
 370 
 371   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 372     HandleMark hm;  // Discard invalid handles created during verification
 373     Universe::verify(" VerifyAfterGC:");
 374   }
 375 
 376   // Re-verify object start arrays
 377   if (VerifyObjectStartArray &&
 378       VerifyAfterGC) {
 379     old_gen->verify_object_start_array();
 380   }
 381 
 382   if (ZapUnusedHeapArea) {
 383     old_gen->object_space()->check_mangled_unused_area_complete();
 384   }
 385 
 386   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 387 
 388   heap->print_heap_after_gc();
 389   heap->trace_heap_after_gc(_gc_tracer);
 390 
 391   heap->post_full_gc_dump(_gc_timer);
 392 
 393 #ifdef TRACESPINNING
 394   ParallelTaskTerminator::print_termination_counts();
 395 #endif
 396 
 397   _gc_timer->register_gc_end();
 398 
 399   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 400 
 401   return true;
 402 }
 403 
 404 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
 405                                              PSYoungGen* young_gen,
 406                                              PSOldGen* old_gen) {
 407   MutableSpace* const eden_space = young_gen->eden_space();
 408   assert(!eden_space->is_empty(), "eden must be non-empty");
 409   assert(young_gen->virtual_space()->alignment() ==
 410          old_gen->virtual_space()->alignment(), "alignments do not match");
 411 
 412   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
 413     return false;
 414   }
 415 
 416   // Both generations must be completely committed.
 417   if (young_gen->virtual_space()->uncommitted_size() != 0) {
 418     return false;
 419   }
 420   if (old_gen->virtual_space()->uncommitted_size() != 0) {
 421     return false;
 422   }
 423 
 424   // Figure out how much to take from eden.  Include the average amount promoted
 425   // in the total; otherwise the next young gen GC will simply bail out to a
 426   // full GC.
 427   const size_t alignment = old_gen->virtual_space()->alignment();
 428   const size_t eden_used = eden_space->used_in_bytes();
 429   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
 430   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
 431   const size_t eden_capacity = eden_space->capacity_in_bytes();
 432 
 433   if (absorb_size >= eden_capacity) {
 434     return false; // Must leave some space in eden.
 435   }
 436 
 437   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
 438   if (new_young_size < young_gen->min_gen_size()) {
 439     return false; // Respect young gen minimum size.
 440   }
 441 
 442   if (TraceAdaptiveGCBoundary && Verbose) {
 443     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
 444                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
 445                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
 446                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
 447                         absorb_size / K,
 448                         eden_capacity / K, (eden_capacity - absorb_size) / K,
 449                         young_gen->from_space()->used_in_bytes() / K,
 450                         young_gen->to_space()->used_in_bytes() / K,
 451                         young_gen->capacity_in_bytes() / K, new_young_size / K);
 452   }
 453 
 454   // Fill the unused part of the old gen.
 455   MutableSpace* const old_space = old_gen->object_space();
 456   HeapWord* const unused_start = old_space->top();
 457   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
 458 
 459   if (unused_words > 0) {
 460     if (unused_words < CollectedHeap::min_fill_size()) {
 461       return false;  // If the old gen cannot be filled, must give up.
 462     }
 463     CollectedHeap::fill_with_objects(unused_start, unused_words);
 464   }
 465 
 466   // Take the live data from eden and set both top and end in the old gen to
 467   // eden top.  (Need to set end because reset_after_change() mangles the region
 468   // from end to virtual_space->high() in debug builds).
 469   HeapWord* const new_top = eden_space->top();
 470   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
 471                                         absorb_size);
 472   young_gen->reset_after_change();
 473   old_space->set_top(new_top);
 474   old_space->set_end(new_top);
 475   old_gen->reset_after_change();
 476 
 477   // Update the object start array for the filler object and the data from eden.
 478   ObjectStartArray* const start_array = old_gen->start_array();
 479   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
 480     start_array->allocate_block(p);
 481   }
 482 
 483   // Could update the promoted average here, but it is not typically updated at
 484   // full GCs and the value to use is unclear.  Something like
 485   //
 486   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
 487 
 488   size_policy->set_bytes_absorbed_from_eden(absorb_size);
 489   return true;
 490 }
 491 
 492 void PSMarkSweep::allocate_stacks() {
 493   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 494   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 495 
 496   PSYoungGen* young_gen = heap->young_gen();
 497 
 498   MutableSpace* to_space = young_gen->to_space();
 499   _preserved_marks = (PreservedMark*)to_space->top();
 500   _preserved_count = 0;
 501 
 502   // We want to calculate the size in bytes first.
 503   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 504   // Now divide by the size of a PreservedMark
 505   _preserved_count_max /= sizeof(PreservedMark);
 506 }
 507 
 508 
 509 void PSMarkSweep::deallocate_stacks() {
 510   _preserved_mark_stack.clear(true);
 511   _preserved_oop_stack.clear(true);
 512   _marking_stack.clear();
 513   _objarray_stack.clear(true);
 514 }
 515 
 516 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 517   // Recursively traverse all live objects and mark them
 518   GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 519   trace(" 1");
 520 
 521   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 522   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 523 
 524   // Need to clear claim bits before the tracing starts.
 525   ClassLoaderDataGraph::clear_claimed_marks();
 526 
 527   // General strong roots.
 528   {
 529     ParallelScavengeHeap::ParStrongRootsScope psrs;
 530     Universe::oops_do(mark_and_push_closure());
 531     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 532     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
 533     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
 534     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
 535     ObjectSynchronizer::oops_do(mark_and_push_closure());
 536     FlatProfiler::oops_do(mark_and_push_closure());
 537     Management::oops_do(mark_and_push_closure());
 538     JvmtiExport::oops_do(mark_and_push_closure());
 539     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 540     ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
 541     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 542     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
 543   }
 544 
 545   // Flush marking stack.
 546   follow_stack();
 547 
 548   // Process reference objects found during marking
 549   {
 550     ref_processor()->setup_policy(clear_all_softrefs);
 551     const ReferenceProcessorStats& stats =
 552       ref_processor()->process_discovered_references(
 553         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id());
 554     gc_tracer()->report_gc_reference_stats(stats);
 555   }
 556 
 557   // This is the point where the entire marking should have completed.
 558   assert(_marking_stack.is_empty(), "Marking should have completed");
 559 
 560   // Unload classes and purge the SystemDictionary.
 561   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 562 
 563   // Unload nmethods.
 564   CodeCache::do_unloading(is_alive_closure(), purged_class);
 565 
 566   // Prune dead klasses from subklass/sibling/implementor lists.
 567   Klass::clean_weak_klass_links(is_alive_closure());
 568 
 569   // Delete entries for dead interned strings.
 570   StringTable::unlink(is_alive_closure());
 571 
 572   // Clean up unreferenced symbols in symbol table.
 573   SymbolTable::unlink();
 574   _gc_tracer->report_object_count_after_gc(is_alive_closure());
 575 }
 576 
 577 
 578 void PSMarkSweep::mark_sweep_phase2() {
 579   GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 580   trace("2");
 581 
 582   // Now all live objects are marked, compute the new object addresses.
 583 
 584   // It is not required that we traverse spaces in the same order in
 585   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 586   // tracking expects us to do so. See comment under phase4.
 587 
 588   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 589   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 590 
 591   PSOldGen* old_gen = heap->old_gen();
 592 
 593   // Begin compacting into the old gen
 594   PSMarkSweepDecorator::set_destination_decorator_tenured();
 595 
 596   // This will also compact the young gen spaces.
 597   old_gen->precompact();
 598 }
 599 
 600 // This should be moved to the shared markSweep code!
 601 class PSAlwaysTrueClosure: public BoolObjectClosure {
 602 public:
 603   bool do_object_b(oop p) { return true; }
 604 };
 605 static PSAlwaysTrueClosure always_true;
 606 
 607 void PSMarkSweep::mark_sweep_phase3() {
 608   // Adjust the pointers to reflect the new locations
 609   GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 610   trace("3");
 611 
 612   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 613   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 614 
 615   PSYoungGen* young_gen = heap->young_gen();
 616   PSOldGen* old_gen = heap->old_gen();
 617 
 618   // Need to clear claim bits before the tracing starts.
 619   ClassLoaderDataGraph::clear_claimed_marks();
 620 
 621   // General strong roots.
 622   Universe::oops_do(adjust_pointer_closure());
 623   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 624   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
 625   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
 626   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 627   FlatProfiler::oops_do(adjust_pointer_closure());
 628   Management::oops_do(adjust_pointer_closure());
 629   JvmtiExport::oops_do(adjust_pointer_closure());
 630   SystemDictionary::oops_do(adjust_pointer_closure());
 631   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 632 
 633   // Now adjust pointers in remaining weak roots.  (All of which should
 634   // have been cleared if they pointed to non-surviving objects.)
 635   // Global (weak) JNI handles
 636   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 637   JFR_ONLY(Jfr::weak_oops_do(&always_true, adjust_pointer_closure()));
 638 
 639   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 640   CodeCache::blobs_do(&adjust_from_blobs);
 641   StringTable::oops_do(adjust_pointer_closure());
 642   ref_processor()->weak_oops_do(adjust_pointer_closure());
 643   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 644 
 645   adjust_marks();
 646 
 647   young_gen->adjust_pointers();
 648   old_gen->adjust_pointers();
 649 }
 650 
 651 void PSMarkSweep::mark_sweep_phase4() {
 652   EventMark m("4 compact heap");
 653   GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
 654   trace("4");
 655 
 656   // All pointers are now adjusted, move objects accordingly
 657 
 658   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 659   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 660 
 661   PSYoungGen* young_gen = heap->young_gen();
 662   PSOldGen* old_gen = heap->old_gen();
 663 
 664   old_gen->compact();
 665   young_gen->compact();
 666 }
 667 
 668 jlong PSMarkSweep::millis_since_last_gc() {
 669   // We need a monotonically non-deccreasing time in ms but
 670   // os::javaTimeMillis() does not guarantee monotonicity.
 671   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 672   jlong ret_val = now - _time_of_last_gc;
 673   // XXX See note in genCollectedHeap::millis_since_last_gc().
 674   if (ret_val < 0) {
 675     NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);)
 676     return 0;
 677   }
 678   return ret_val;
 679 }
 680 
 681 void PSMarkSweep::reset_millis_since_last_gc() {
 682   // We need a monotonically non-deccreasing time in ms but
 683   // os::javaTimeMillis() does not guarantee monotonicity.
 684   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 685 }