1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "aot/aotLoader.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "gc/parallel/parallelScavengeHeap.hpp"
  32 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  33 #include "gc/parallel/psMarkSweep.hpp"
  34 #include "gc/parallel/psMarkSweepDecorator.hpp"
  35 #include "gc/parallel/psOldGen.hpp"
  36 #include "gc/parallel/psScavenge.hpp"
  37 #include "gc/parallel/psYoungGen.hpp"
  38 #include "gc/serial/markSweep.hpp"
  39 #include "gc/shared/gcCause.hpp"
  40 #include "gc/shared/gcHeapSummary.hpp"
  41 #include "gc/shared/gcId.hpp"
  42 #include "gc/shared/gcLocker.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/isGCActiveMark.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/referenceProcessor.hpp"
  49 #include "gc/shared/spaceDecorator.hpp"
  50 #include "gc/shared/weakProcessor.hpp"
  51 #include "logging/log.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "runtime/biasedLocking.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/safepoint.hpp"
  56 #include "runtime/vmThread.hpp"
  57 #include "services/management.hpp"
  58 #include "services/memoryService.hpp"
  59 #include "utilities/align.hpp"
  60 #include "utilities/events.hpp"
  61 #include "utilities/stack.inline.hpp"
  62 
  63 elapsedTimer        PSMarkSweep::_accumulated_time;
  64 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  65 CollectorCounters*  PSMarkSweep::_counters = NULL;
  66 
  67 void PSMarkSweep::initialize() {
  68   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
  69   set_ref_processor(new SpanReferenceProcessor(mr));     // a vanilla ref proc
  70   _counters = new CollectorCounters("PSMarkSweep", 1);
  71 }
  72 
  73 // This method contains all heap specific policy for invoking mark sweep.
  74 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
  75 // the heap. It will do nothing further. If we need to bail out for policy
  76 // reasons, scavenge before full gc, or any other specialized behavior, it
  77 // needs to be added here.
  78 //
  79 // Note that this method should only be called from the vm_thread while
  80 // at a safepoint!
  81 //
  82 // Note that the all_soft_refs_clear flag in the collector policy
  83 // may be true because this method can be called without intervening
  84 // activity.  For example when the heap space is tight and full measure
  85 // are being taken to free space.
  86 
  87 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  88   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  89   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  90   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
  91 
  92   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  93   GCCause::Cause gc_cause = heap->gc_cause();
  94   PSAdaptiveSizePolicy* policy = heap->size_policy();
  95   IsGCActiveMark mark;
  96 
  97   if (ScavengeBeforeFullGC) {
  98     PSScavenge::invoke_no_policy();
  99   }
 100 
 101   const bool clear_all_soft_refs =
 102     heap->soft_ref_policy()->should_clear_all_soft_refs();
 103 
 104   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 105   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 106   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 107 }
 108 
 109 // This method contains no policy. You should probably
 110 // be calling invoke() instead.
 111 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 112   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 113   assert(ref_processor() != NULL, "Sanity");
 114 
 115   if (GCLocker::check_active_before_gc()) {
 116     return false;
 117   }
 118 
 119   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 120   GCCause::Cause gc_cause = heap->gc_cause();
 121 
 122   GCIdMark gc_id_mark;
 123   _gc_timer->register_gc_start();
 124   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 125 
 126   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 127 
 128   // The scope of casr should end after code that can change
 129   // CollectorPolicy::_should_clear_all_soft_refs.
 130   ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
 131 
 132   PSYoungGen* young_gen = heap->young_gen();
 133   PSOldGen* old_gen = heap->old_gen();
 134 
 135   // Increment the invocation count
 136   heap->increment_total_collections(true /* full */);
 137 
 138   // Save information needed to minimize mangling
 139   heap->record_gen_tops_before_GC();
 140 
 141   // We need to track unique mark sweep invocations as well.
 142   _total_invocations++;
 143 
 144   heap->print_heap_before_gc();
 145   heap->trace_heap_before_gc(_gc_tracer);
 146 
 147   // Fill in TLABs
 148   heap->accumulate_statistics_all_tlabs();
 149   heap->ensure_parsability(true);  // retire TLABs
 150 
 151   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 152     HandleMark hm;  // Discard invalid handles created during verification
 153     Universe::verify("Before GC");
 154   }
 155 
 156   // Verify object start arrays
 157   if (VerifyObjectStartArray &&
 158       VerifyBeforeGC) {
 159     old_gen->verify_object_start_array();
 160   }
 161 
 162   // Filled in below to track the state of the young gen after the collection.
 163   bool eden_empty;
 164   bool survivors_empty;
 165   bool young_gen_empty;
 166 
 167   {
 168     HandleMark hm;
 169 
 170     GCTraceCPUTime tcpu;
 171     GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
 172 
 173     heap->pre_full_gc_dump(_gc_timer);
 174 
 175     TraceCollectorStats tcs(counters());
 176     TraceMemoryManagerStats tms(heap->old_gc_manager(),gc_cause);
 177 
 178     if (log_is_enabled(Debug, gc, heap, exit)) {
 179       accumulated_time()->start();
 180     }
 181 
 182     // Let the size policy know we're starting
 183     size_policy->major_collection_begin();
 184 
 185     CodeCache::gc_prologue();
 186     BiasedLocking::preserve_marks();
 187 
 188     // Capture metadata size before collection for sizing.
 189     size_t metadata_prev_used = MetaspaceUtils::used_bytes();
 190 
 191     size_t old_gen_prev_used = old_gen->used_in_bytes();
 192     size_t young_gen_prev_used = young_gen->used_in_bytes();
 193 
 194     allocate_stacks();
 195 
 196 #if COMPILER2_OR_JVMCI
 197     DerivedPointerTable::clear();
 198 #endif
 199 
 200     ref_processor()->enable_discovery();
 201     ref_processor()->setup_policy(clear_all_softrefs);
 202 
 203     mark_sweep_phase1(clear_all_softrefs);
 204 
 205     mark_sweep_phase2();
 206 
 207 #if COMPILER2_OR_JVMCI
 208     // Don't add any more derived pointers during phase3
 209     assert(DerivedPointerTable::is_active(), "Sanity");
 210     DerivedPointerTable::set_active(false);
 211 #endif
 212 
 213     mark_sweep_phase3();
 214 
 215     mark_sweep_phase4();
 216 
 217     restore_marks();
 218 
 219     deallocate_stacks();
 220 
 221     if (ZapUnusedHeapArea) {
 222       // Do a complete mangle (top to end) because the usage for
 223       // scratch does not maintain a top pointer.
 224       young_gen->to_space()->mangle_unused_area_complete();
 225     }
 226 
 227     eden_empty = young_gen->eden_space()->is_empty();
 228     if (!eden_empty) {
 229       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
 230     }
 231 
 232     // Update heap occupancy information which is used as
 233     // input to soft ref clearing policy at the next gc.
 234     Universe::update_heap_info_at_gc();
 235 
 236     survivors_empty = young_gen->from_space()->is_empty() &&
 237                       young_gen->to_space()->is_empty();
 238     young_gen_empty = eden_empty && survivors_empty;
 239 
 240     PSCardTable* card_table = heap->card_table();
 241     MemRegion old_mr = heap->old_gen()->reserved();
 242     if (young_gen_empty) {
 243       card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
 244     } else {
 245       card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
 246     }
 247 
 248     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 249     ClassLoaderDataGraph::purge();
 250     MetaspaceUtils::verify_metrics();
 251 
 252     BiasedLocking::restore_marks();
 253     CodeCache::gc_epilogue();
 254     JvmtiExport::gc_epilogue();
 255 
 256 #if COMPILER2_OR_JVMCI
 257     DerivedPointerTable::update_pointers();
 258 #endif
 259 
 260     assert(!ref_processor()->discovery_enabled(), "Should have been disabled earlier");
 261 
 262     // Update time of last GC
 263     reset_millis_since_last_gc();
 264 
 265     // Let the size policy know we're done
 266     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
 267 
 268     if (UseAdaptiveSizePolicy) {
 269 
 270      log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
 271      log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
 272                          old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 273 
 274       // Don't check if the size_policy is ready here.  Let
 275       // the size_policy check that internally.
 276       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
 277           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
 278         // Swap the survivor spaces if from_space is empty. The
 279         // resize_young_gen() called below is normally used after
 280         // a successful young GC and swapping of survivor spaces;
 281         // otherwise, it will fail to resize the young gen with
 282         // the current implementation.
 283         if (young_gen->from_space()->is_empty()) {
 284           young_gen->from_space()->clear(SpaceDecorator::Mangle);
 285           young_gen->swap_spaces();
 286         }
 287 
 288         // Calculate optimal free space amounts
 289         assert(young_gen->max_size() >
 290           young_gen->from_space()->capacity_in_bytes() +
 291           young_gen->to_space()->capacity_in_bytes(),
 292           "Sizes of space in young gen are out-of-bounds");
 293 
 294         size_t young_live = young_gen->used_in_bytes();
 295         size_t eden_live = young_gen->eden_space()->used_in_bytes();
 296         size_t old_live = old_gen->used_in_bytes();
 297         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
 298         size_t max_old_gen_size = old_gen->max_gen_size();
 299         size_t max_eden_size = young_gen->max_size() -
 300           young_gen->from_space()->capacity_in_bytes() -
 301           young_gen->to_space()->capacity_in_bytes();
 302 
 303         // Used for diagnostics
 304         size_policy->clear_generation_free_space_flags();
 305 
 306         size_policy->compute_generations_free_space(young_live,
 307                                                     eden_live,
 308                                                     old_live,
 309                                                     cur_eden,
 310                                                     max_old_gen_size,
 311                                                     max_eden_size,
 312                                                     true /* full gc*/);
 313 
 314         size_policy->check_gc_overhead_limit(young_live,
 315                                              eden_live,
 316                                              max_old_gen_size,
 317                                              max_eden_size,
 318                                              true /* full gc*/,
 319                                              gc_cause,
 320                                              heap->soft_ref_policy());
 321 
 322         size_policy->decay_supplemental_growth(true /* full gc*/);
 323 
 324         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
 325 
 326         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 327                                size_policy->calculated_survivor_size_in_bytes());
 328       }
 329       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
 330     }
 331 
 332     if (UsePerfData) {
 333       heap->gc_policy_counters()->update_counters();
 334       heap->gc_policy_counters()->update_old_capacity(
 335         old_gen->capacity_in_bytes());
 336       heap->gc_policy_counters()->update_young_capacity(
 337         young_gen->capacity_in_bytes());
 338     }
 339 
 340     heap->resize_all_tlabs();
 341 
 342     // We collected the heap, recalculate the metaspace capacity
 343     MetaspaceGC::compute_new_size();
 344 
 345     if (log_is_enabled(Debug, gc, heap, exit)) {
 346       accumulated_time()->stop();
 347     }
 348 
 349     young_gen->print_used_change(young_gen_prev_used);
 350     old_gen->print_used_change(old_gen_prev_used);
 351     MetaspaceUtils::print_metaspace_change(metadata_prev_used);
 352 
 353     // Track memory usage and detect low memory
 354     MemoryService::track_memory_usage();
 355     heap->update_counters();
 356 
 357     heap->post_full_gc_dump(_gc_timer);
 358   }
 359 
 360   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 361     HandleMark hm;  // Discard invalid handles created during verification
 362     Universe::verify("After GC");
 363   }
 364 
 365   // Re-verify object start arrays
 366   if (VerifyObjectStartArray &&
 367       VerifyAfterGC) {
 368     old_gen->verify_object_start_array();
 369   }
 370 
 371   if (ZapUnusedHeapArea) {
 372     old_gen->object_space()->check_mangled_unused_area_complete();
 373   }
 374 
 375   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 376 
 377   heap->print_heap_after_gc();
 378   heap->trace_heap_after_gc(_gc_tracer);
 379 
 380 #ifdef TRACESPINNING
 381   ParallelTaskTerminator::print_termination_counts();
 382 #endif
 383 
 384   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
 385 
 386   _gc_timer->register_gc_end();
 387 
 388   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 389 
 390   return true;
 391 }
 392 
 393 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
 394                                              PSYoungGen* young_gen,
 395                                              PSOldGen* old_gen) {
 396   MutableSpace* const eden_space = young_gen->eden_space();
 397   assert(!eden_space->is_empty(), "eden must be non-empty");
 398   assert(young_gen->virtual_space()->alignment() ==
 399          old_gen->virtual_space()->alignment(), "alignments do not match");
 400 
 401   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
 402     return false;
 403   }
 404 
 405   // Both generations must be completely committed.
 406   if (young_gen->virtual_space()->uncommitted_size() != 0) {
 407     return false;
 408   }
 409   if (old_gen->virtual_space()->uncommitted_size() != 0) {
 410     return false;
 411   }
 412 
 413   // Figure out how much to take from eden.  Include the average amount promoted
 414   // in the total; otherwise the next young gen GC will simply bail out to a
 415   // full GC.
 416   const size_t alignment = old_gen->virtual_space()->alignment();
 417   const size_t eden_used = eden_space->used_in_bytes();
 418   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
 419   const size_t absorb_size = align_up(eden_used + promoted, alignment);
 420   const size_t eden_capacity = eden_space->capacity_in_bytes();
 421 
 422   if (absorb_size >= eden_capacity) {
 423     return false; // Must leave some space in eden.
 424   }
 425 
 426   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
 427   if (new_young_size < young_gen->min_gen_size()) {
 428     return false; // Respect young gen minimum size.
 429   }
 430 
 431   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
 432                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
 433                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
 434                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
 435                         absorb_size / K,
 436                         eden_capacity / K, (eden_capacity - absorb_size) / K,
 437                         young_gen->from_space()->used_in_bytes() / K,
 438                         young_gen->to_space()->used_in_bytes() / K,
 439                         young_gen->capacity_in_bytes() / K, new_young_size / K);
 440 
 441   // Fill the unused part of the old gen.
 442   MutableSpace* const old_space = old_gen->object_space();
 443   HeapWord* const unused_start = old_space->top();
 444   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
 445 
 446   if (unused_words > 0) {
 447     if (unused_words < CollectedHeap::min_fill_size()) {
 448       return false;  // If the old gen cannot be filled, must give up.
 449     }
 450     CollectedHeap::fill_with_objects(unused_start, unused_words);
 451   }
 452 
 453   // Take the live data from eden and set both top and end in the old gen to
 454   // eden top.  (Need to set end because reset_after_change() mangles the region
 455   // from end to virtual_space->high() in debug builds).
 456   HeapWord* const new_top = eden_space->top();
 457   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
 458                                         absorb_size);
 459   young_gen->reset_after_change();
 460   old_space->set_top(new_top);
 461   old_space->set_end(new_top);
 462   old_gen->reset_after_change();
 463 
 464   // Update the object start array for the filler object and the data from eden.
 465   ObjectStartArray* const start_array = old_gen->start_array();
 466   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
 467     start_array->allocate_block(p);
 468   }
 469 
 470   // Could update the promoted average here, but it is not typically updated at
 471   // full GCs and the value to use is unclear.  Something like
 472   //
 473   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
 474 
 475   size_policy->set_bytes_absorbed_from_eden(absorb_size);
 476   return true;
 477 }
 478 
 479 void PSMarkSweep::allocate_stacks() {
 480   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 481   PSYoungGen* young_gen = heap->young_gen();
 482 
 483   MutableSpace* to_space = young_gen->to_space();
 484   _preserved_marks = (PreservedMark*)to_space->top();
 485   _preserved_count = 0;
 486 
 487   // We want to calculate the size in bytes first.
 488   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 489   // Now divide by the size of a PreservedMark
 490   _preserved_count_max /= sizeof(PreservedMark);
 491 }
 492 
 493 
 494 void PSMarkSweep::deallocate_stacks() {
 495   _preserved_mark_stack.clear(true);
 496   _preserved_oop_stack.clear(true);
 497   _marking_stack.clear();
 498   _objarray_stack.clear(true);
 499 }
 500 
 501 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 502   // Recursively traverse all live objects and mark them
 503   GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
 504 
 505   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 506 
 507   // Need to clear claim bits before the tracing starts.
 508   ClassLoaderDataGraph::clear_claimed_marks();
 509 
 510   // General strong roots.
 511   {
 512     ParallelScavengeHeap::ParStrongRootsScope psrs;
 513     Universe::oops_do(mark_and_push_closure());
 514     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 515     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
 516     Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
 517     ObjectSynchronizer::oops_do(mark_and_push_closure());
 518     Management::oops_do(mark_and_push_closure());
 519     JvmtiExport::oops_do(mark_and_push_closure());
 520     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 521     ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
 522     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 523     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
 524     AOTLoader::oops_do(mark_and_push_closure());
 525   }
 526 
 527   // Flush marking stack.
 528   follow_stack();
 529 
 530   // Process reference objects found during marking
 531   {
 532     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
 533 
 534     ref_processor()->setup_policy(clear_all_softrefs);
 535     ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
 536     const ReferenceProcessorStats& stats =
 537       ref_processor()->process_discovered_references(
 538         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
 539     gc_tracer()->report_gc_reference_stats(stats);
 540     pt.print_all_references();
 541   }
 542 
 543   // This is the point where the entire marking should have completed.
 544   assert(_marking_stack.is_empty(), "Marking should have completed");
 545 
 546   {
 547     GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer);
 548     WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl);
 549   }
 550 
 551   {
 552     GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
 553 
 554     // Unload classes and purge the SystemDictionary.
 555     bool purged_class = SystemDictionary::do_unloading(is_alive_closure(), _gc_timer);
 556 
 557     // Unload nmethods.
 558     CodeCache::do_unloading(is_alive_closure(), purged_class);
 559 
 560     // Prune dead klasses from subklass/sibling/implementor lists.
 561     Klass::clean_weak_klass_links();
 562   }
 563 
 564   {
 565     GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer);
 566     // Delete entries for dead interned strings.
 567     StringTable::unlink(is_alive_closure());
 568   }
 569 
 570   {
 571     GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer);
 572     // Clean up unreferenced symbols in symbol table.
 573     SymbolTable::unlink();
 574   }
 575 
 576   _gc_tracer->report_object_count_after_gc(is_alive_closure());
 577 }
 578 
 579 
 580 void PSMarkSweep::mark_sweep_phase2() {
 581   GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
 582 
 583   // Now all live objects are marked, compute the new object addresses.
 584 
 585   // It is not required that we traverse spaces in the same order in
 586   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 587   // tracking expects us to do so. See comment under phase4.
 588 
 589   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 590   PSOldGen* old_gen = heap->old_gen();
 591 
 592   // Begin compacting into the old gen
 593   PSMarkSweepDecorator::set_destination_decorator_tenured();
 594 
 595   // This will also compact the young gen spaces.
 596   old_gen->precompact();
 597 }
 598 
 599 void PSMarkSweep::mark_sweep_phase3() {
 600   // Adjust the pointers to reflect the new locations
 601   GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
 602 
 603   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 604   PSYoungGen* young_gen = heap->young_gen();
 605   PSOldGen* old_gen = heap->old_gen();
 606 
 607   // Need to clear claim bits before the tracing starts.
 608   ClassLoaderDataGraph::clear_claimed_marks();
 609 
 610   // General strong roots.
 611   Universe::oops_do(adjust_pointer_closure());
 612   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 613   Threads::oops_do(adjust_pointer_closure(), NULL);
 614   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 615   Management::oops_do(adjust_pointer_closure());
 616   JvmtiExport::oops_do(adjust_pointer_closure());
 617   SystemDictionary::oops_do(adjust_pointer_closure());
 618   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 619 
 620   // Now adjust pointers in remaining weak roots.  (All of which should
 621   // have been cleared if they pointed to non-surviving objects.)
 622   // Global (weak) JNI handles
 623   WeakProcessor::oops_do(adjust_pointer_closure());
 624 
 625   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 626   CodeCache::blobs_do(&adjust_from_blobs);
 627   AOTLoader::oops_do(adjust_pointer_closure());
 628   StringTable::oops_do(adjust_pointer_closure());
 629   ref_processor()->weak_oops_do(adjust_pointer_closure());
 630   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 631 
 632   adjust_marks();
 633 
 634   young_gen->adjust_pointers();
 635   old_gen->adjust_pointers();
 636 }
 637 
 638 void PSMarkSweep::mark_sweep_phase4() {
 639   EventMark m("4 compact heap");
 640   GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
 641 
 642   // All pointers are now adjusted, move objects accordingly
 643 
 644   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 645   PSYoungGen* young_gen = heap->young_gen();
 646   PSOldGen* old_gen = heap->old_gen();
 647 
 648   old_gen->compact();
 649   young_gen->compact();
 650 }
 651 
 652 jlong PSMarkSweep::millis_since_last_gc() {
 653   // We need a monotonically non-decreasing time in ms but
 654   // os::javaTimeMillis() does not guarantee monotonicity.
 655   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 656   jlong ret_val = now - _time_of_last_gc;
 657   // XXX See note in genCollectedHeap::millis_since_last_gc().
 658   if (ret_val < 0) {
 659     NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
 660     return 0;
 661   }
 662   return ret_val;
 663 }
 664 
 665 void PSMarkSweep::reset_millis_since_last_gc() {
 666   // We need a monotonically non-decreasing time in ms but
 667   // os::javaTimeMillis() does not guarantee monotonicity.
 668   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 669 }