rev 47223 : [mq]: heapz8

   1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "aot/aotLoader.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "gc/parallel/parallelScavengeHeap.hpp"
  32 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  33 #include "gc/parallel/psMarkSweep.hpp"
  34 #include "gc/parallel/psMarkSweepDecorator.hpp"
  35 #include "gc/parallel/psOldGen.hpp"
  36 #include "gc/parallel/psScavenge.hpp"
  37 #include "gc/parallel/psYoungGen.hpp"
  38 #include "gc/serial/markSweep.hpp"
  39 #include "gc/shared/gcCause.hpp"
  40 #include "gc/shared/gcHeapSummary.hpp"
  41 #include "gc/shared/gcId.hpp"
  42 #include "gc/shared/gcLocker.inline.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/isGCActiveMark.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/referenceProcessor.hpp"
  49 #include "gc/shared/spaceDecorator.hpp"
  50 #include "logging/log.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/biasedLocking.hpp"
  53 #include "runtime/heapMonitoring.hpp"
  54 #include "runtime/safepoint.hpp"
  55 #include "runtime/vmThread.hpp"
  56 #include "services/management.hpp"
  57 #include "services/memoryService.hpp"
  58 #include "utilities/align.hpp"
  59 #include "utilities/events.hpp"
  60 #include "utilities/stack.inline.hpp"
  61 
  62 elapsedTimer        PSMarkSweep::_accumulated_time;
  63 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  64 CollectorCounters*  PSMarkSweep::_counters = NULL;
  65 
  66 void PSMarkSweep::initialize() {
  67   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
  68   set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
  69   _counters = new CollectorCounters("PSMarkSweep", 1);
  70 }
  71 
  72 // This method contains all heap specific policy for invoking mark sweep.
  73 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
  74 // the heap. It will do nothing further. If we need to bail out for policy
  75 // reasons, scavenge before full gc, or any other specialized behavior, it
  76 // needs to be added here.
  77 //
  78 // Note that this method should only be called from the vm_thread while
  79 // at a safepoint!
  80 //
  81 // Note that the all_soft_refs_clear flag in the collector policy
  82 // may be true because this method can be called without intervening
  83 // activity.  For example when the heap space is tight and full measure
  84 // are being taken to free space.
  85 
  86 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  87   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  88   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  89   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
  90 
  91   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  92   GCCause::Cause gc_cause = heap->gc_cause();
  93   PSAdaptiveSizePolicy* policy = heap->size_policy();
  94   IsGCActiveMark mark;
  95 
  96   if (ScavengeBeforeFullGC) {
  97     PSScavenge::invoke_no_policy();
  98   }
  99 
 100   const bool clear_all_soft_refs =
 101     heap->collector_policy()->should_clear_all_soft_refs();
 102 
 103   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 104   UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 105   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 106 }
 107 
 108 // This method contains no policy. You should probably
 109 // be calling invoke() instead.
 110 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 111   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 112   assert(ref_processor() != NULL, "Sanity");
 113 
 114   if (GCLocker::check_active_before_gc()) {
 115     return false;
 116   }
 117 
 118   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 119   GCCause::Cause gc_cause = heap->gc_cause();
 120 
 121   GCIdMark gc_id_mark;
 122   _gc_timer->register_gc_start();
 123   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 124 
 125   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 126 
 127   // The scope of casr should end after code that can change
 128   // CollectorPolicy::_should_clear_all_soft_refs.
 129   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 130 
 131   PSYoungGen* young_gen = heap->young_gen();
 132   PSOldGen* old_gen = heap->old_gen();
 133 
 134   // Increment the invocation count
 135   heap->increment_total_collections(true /* full */);
 136 
 137   // Save information needed to minimize mangling
 138   heap->record_gen_tops_before_GC();
 139 
 140   // We need to track unique mark sweep invocations as well.
 141   _total_invocations++;
 142 
 143   heap->print_heap_before_gc();
 144   heap->trace_heap_before_gc(_gc_tracer);
 145 
 146   // Fill in TLABs
 147   heap->accumulate_statistics_all_tlabs();
 148   heap->ensure_parsability(true);  // retire TLABs
 149 
 150   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 151     HandleMark hm;  // Discard invalid handles created during verification
 152     Universe::verify("Before GC");
 153   }
 154 
 155   // Verify object start arrays
 156   if (VerifyObjectStartArray &&
 157       VerifyBeforeGC) {
 158     old_gen->verify_object_start_array();
 159   }
 160 
 161   // Filled in below to track the state of the young gen after the collection.
 162   bool eden_empty;
 163   bool survivors_empty;
 164   bool young_gen_empty;
 165 
 166   {
 167     HandleMark hm;
 168 
 169     GCTraceCPUTime tcpu;
 170     GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
 171 
 172     heap->pre_full_gc_dump(_gc_timer);
 173 
 174     TraceCollectorStats tcs(counters());
 175     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 176 
 177     if (TraceOldGenTime) accumulated_time()->start();
 178 
 179     // Let the size policy know we're starting
 180     size_policy->major_collection_begin();
 181 
 182     CodeCache::gc_prologue();
 183     BiasedLocking::preserve_marks();
 184 
 185     // Capture metadata size before collection for sizing.
 186     size_t metadata_prev_used = MetaspaceAux::used_bytes();
 187 
 188     size_t old_gen_prev_used = old_gen->used_in_bytes();
 189     size_t young_gen_prev_used = young_gen->used_in_bytes();
 190 
 191     allocate_stacks();
 192 
 193 #if defined(COMPILER2) || INCLUDE_JVMCI
 194     DerivedPointerTable::clear();
 195 #endif
 196 
 197     ref_processor()->enable_discovery();
 198     ref_processor()->setup_policy(clear_all_softrefs);
 199 
 200     mark_sweep_phase1(clear_all_softrefs);
 201 
 202     mark_sweep_phase2();
 203 
 204 #if defined(COMPILER2) || INCLUDE_JVMCI
 205     // Don't add any more derived pointers during phase3
 206     assert(DerivedPointerTable::is_active(), "Sanity");
 207     DerivedPointerTable::set_active(false);
 208 #endif
 209 
 210     mark_sweep_phase3();
 211 
 212     mark_sweep_phase4();
 213 
 214     restore_marks();
 215 
 216     deallocate_stacks();
 217 
 218     if (ZapUnusedHeapArea) {
 219       // Do a complete mangle (top to end) because the usage for
 220       // scratch does not maintain a top pointer.
 221       young_gen->to_space()->mangle_unused_area_complete();
 222     }
 223 
 224     eden_empty = young_gen->eden_space()->is_empty();
 225     if (!eden_empty) {
 226       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
 227     }
 228 
 229     // Update heap occupancy information which is used as
 230     // input to soft ref clearing policy at the next gc.
 231     Universe::update_heap_info_at_gc();
 232 
 233     survivors_empty = young_gen->from_space()->is_empty() &&
 234                       young_gen->to_space()->is_empty();
 235     young_gen_empty = eden_empty && survivors_empty;
 236 
 237     ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
 238     MemRegion old_mr = heap->old_gen()->reserved();
 239     if (young_gen_empty) {
 240       modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
 241     } else {
 242       modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
 243     }
 244 
 245     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 246     ClassLoaderDataGraph::purge();
 247     MetaspaceAux::verify_metrics();
 248 
 249     BiasedLocking::restore_marks();
 250     CodeCache::gc_epilogue();
 251     JvmtiExport::gc_epilogue();
 252 
 253 #if defined(COMPILER2) || INCLUDE_JVMCI
 254     DerivedPointerTable::update_pointers();
 255 #endif
 256 
 257     ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
 258 
 259     ref_processor()->enqueue_discovered_references(NULL, &pt);
 260 
 261     pt.print_enqueue_phase();
 262 
 263     // Update time of last GC
 264     reset_millis_since_last_gc();
 265 
 266     // Let the size policy know we're done
 267     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
 268 
 269     if (UseAdaptiveSizePolicy) {
 270 
 271      log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
 272      log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
 273                          old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 274 
 275       // Don't check if the size_policy is ready here.  Let
 276       // the size_policy check that internally.
 277       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
 278           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
 279         // Swap the survivor spaces if from_space is empty. The
 280         // resize_young_gen() called below is normally used after
 281         // a successful young GC and swapping of survivor spaces;
 282         // otherwise, it will fail to resize the young gen with
 283         // the current implementation.
 284         if (young_gen->from_space()->is_empty()) {
 285           young_gen->from_space()->clear(SpaceDecorator::Mangle);
 286           young_gen->swap_spaces();
 287         }
 288 
 289         // Calculate optimal free space amounts
 290         assert(young_gen->max_size() >
 291           young_gen->from_space()->capacity_in_bytes() +
 292           young_gen->to_space()->capacity_in_bytes(),
 293           "Sizes of space in young gen are out-of-bounds");
 294 
 295         size_t young_live = young_gen->used_in_bytes();
 296         size_t eden_live = young_gen->eden_space()->used_in_bytes();
 297         size_t old_live = old_gen->used_in_bytes();
 298         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
 299         size_t max_old_gen_size = old_gen->max_gen_size();
 300         size_t max_eden_size = young_gen->max_size() -
 301           young_gen->from_space()->capacity_in_bytes() -
 302           young_gen->to_space()->capacity_in_bytes();
 303 
 304         // Used for diagnostics
 305         size_policy->clear_generation_free_space_flags();
 306 
 307         size_policy->compute_generations_free_space(young_live,
 308                                                     eden_live,
 309                                                     old_live,
 310                                                     cur_eden,
 311                                                     max_old_gen_size,
 312                                                     max_eden_size,
 313                                                     true /* full gc*/);
 314 
 315         size_policy->check_gc_overhead_limit(young_live,
 316                                              eden_live,
 317                                              max_old_gen_size,
 318                                              max_eden_size,
 319                                              true /* full gc*/,
 320                                              gc_cause,
 321                                              heap->collector_policy());
 322 
 323         size_policy->decay_supplemental_growth(true /* full gc*/);
 324 
 325         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
 326 
 327         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 328                                size_policy->calculated_survivor_size_in_bytes());
 329       }
 330       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
 331     }
 332 
 333     if (UsePerfData) {
 334       heap->gc_policy_counters()->update_counters();
 335       heap->gc_policy_counters()->update_old_capacity(
 336         old_gen->capacity_in_bytes());
 337       heap->gc_policy_counters()->update_young_capacity(
 338         young_gen->capacity_in_bytes());
 339     }
 340 
 341     heap->resize_all_tlabs();
 342 
 343     // We collected the heap, recalculate the metaspace capacity
 344     MetaspaceGC::compute_new_size();
 345 
 346     if (TraceOldGenTime) accumulated_time()->stop();
 347 
 348     young_gen->print_used_change(young_gen_prev_used);
 349     old_gen->print_used_change(old_gen_prev_used);
 350     MetaspaceAux::print_metaspace_change(metadata_prev_used);
 351 
 352     // Track memory usage and detect low memory
 353     MemoryService::track_memory_usage();
 354     heap->update_counters();
 355 
 356     heap->post_full_gc_dump(_gc_timer);
 357   }
 358 
 359   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 360     HandleMark hm;  // Discard invalid handles created during verification
 361     Universe::verify("After GC");
 362   }
 363 
 364   // Re-verify object start arrays
 365   if (VerifyObjectStartArray &&
 366       VerifyAfterGC) {
 367     old_gen->verify_object_start_array();
 368   }
 369 
 370   if (ZapUnusedHeapArea) {
 371     old_gen->object_space()->check_mangled_unused_area_complete();
 372   }
 373 
 374   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 375 
 376   heap->print_heap_after_gc();
 377   heap->trace_heap_after_gc(_gc_tracer);
 378 
 379 #ifdef TRACESPINNING
 380   ParallelTaskTerminator::print_termination_counts();
 381 #endif
 382 
 383   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
 384 
 385   _gc_timer->register_gc_end();
 386 
 387   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 388 
 389   return true;
 390 }
 391 
 392 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
 393                                              PSYoungGen* young_gen,
 394                                              PSOldGen* old_gen) {
 395   MutableSpace* const eden_space = young_gen->eden_space();
 396   assert(!eden_space->is_empty(), "eden must be non-empty");
 397   assert(young_gen->virtual_space()->alignment() ==
 398          old_gen->virtual_space()->alignment(), "alignments do not match");
 399 
 400   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
 401     return false;
 402   }
 403 
 404   // Both generations must be completely committed.
 405   if (young_gen->virtual_space()->uncommitted_size() != 0) {
 406     return false;
 407   }
 408   if (old_gen->virtual_space()->uncommitted_size() != 0) {
 409     return false;
 410   }
 411 
 412   // Figure out how much to take from eden.  Include the average amount promoted
 413   // in the total; otherwise the next young gen GC will simply bail out to a
 414   // full GC.
 415   const size_t alignment = old_gen->virtual_space()->alignment();
 416   const size_t eden_used = eden_space->used_in_bytes();
 417   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
 418   const size_t absorb_size = align_up(eden_used + promoted, alignment);
 419   const size_t eden_capacity = eden_space->capacity_in_bytes();
 420 
 421   if (absorb_size >= eden_capacity) {
 422     return false; // Must leave some space in eden.
 423   }
 424 
 425   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
 426   if (new_young_size < young_gen->min_gen_size()) {
 427     return false; // Respect young gen minimum size.
 428   }
 429 
 430   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
 431                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
 432                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
 433                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
 434                         absorb_size / K,
 435                         eden_capacity / K, (eden_capacity - absorb_size) / K,
 436                         young_gen->from_space()->used_in_bytes() / K,
 437                         young_gen->to_space()->used_in_bytes() / K,
 438                         young_gen->capacity_in_bytes() / K, new_young_size / K);
 439 
 440   // Fill the unused part of the old gen.
 441   MutableSpace* const old_space = old_gen->object_space();
 442   HeapWord* const unused_start = old_space->top();
 443   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
 444 
 445   if (unused_words > 0) {
 446     if (unused_words < CollectedHeap::min_fill_size()) {
 447       return false;  // If the old gen cannot be filled, must give up.
 448     }
 449     CollectedHeap::fill_with_objects(unused_start, unused_words);
 450   }
 451 
 452   // Take the live data from eden and set both top and end in the old gen to
 453   // eden top.  (Need to set end because reset_after_change() mangles the region
 454   // from end to virtual_space->high() in debug builds).
 455   HeapWord* const new_top = eden_space->top();
 456   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
 457                                         absorb_size);
 458   young_gen->reset_after_change();
 459   old_space->set_top(new_top);
 460   old_space->set_end(new_top);
 461   old_gen->reset_after_change();
 462 
 463   // Update the object start array for the filler object and the data from eden.
 464   ObjectStartArray* const start_array = old_gen->start_array();
 465   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
 466     start_array->allocate_block(p);
 467   }
 468 
 469   // Could update the promoted average here, but it is not typically updated at
 470   // full GCs and the value to use is unclear.  Something like
 471   //
 472   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
 473 
 474   size_policy->set_bytes_absorbed_from_eden(absorb_size);
 475   return true;
 476 }
 477 
 478 void PSMarkSweep::allocate_stacks() {
 479   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 480   PSYoungGen* young_gen = heap->young_gen();
 481 
 482   MutableSpace* to_space = young_gen->to_space();
 483   _preserved_marks = (PreservedMark*)to_space->top();
 484   _preserved_count = 0;
 485 
 486   // We want to calculate the size in bytes first.
 487   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 488   // Now divide by the size of a PreservedMark
 489   _preserved_count_max /= sizeof(PreservedMark);
 490 }
 491 
 492 
 493 void PSMarkSweep::deallocate_stacks() {
 494   _preserved_mark_stack.clear(true);
 495   _preserved_oop_stack.clear(true);
 496   _marking_stack.clear();
 497   _objarray_stack.clear(true);
 498 }
 499 
 500 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 501   // Recursively traverse all live objects and mark them
 502   GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
 503 
 504   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 505 
 506   // Need to clear claim bits before the tracing starts.
 507   ClassLoaderDataGraph::clear_claimed_marks();
 508 
 509   // General strong roots.
 510   {
 511     ParallelScavengeHeap::ParStrongRootsScope psrs;
 512     Universe::oops_do(mark_and_push_closure());
 513     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 514     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
 515     Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
 516     ObjectSynchronizer::oops_do(mark_and_push_closure());
 517     Management::oops_do(mark_and_push_closure());
 518     JvmtiExport::oops_do(mark_and_push_closure());
 519     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 520     ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
 521     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 522     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
 523     AOTLoader::oops_do(mark_and_push_closure());
 524   }
 525 
 526   // Flush marking stack.
 527   follow_stack();
 528 
 529   // Process reference objects found during marking
 530   {
 531     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
 532 
 533     ref_processor()->setup_policy(clear_all_softrefs);
 534     ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
 535     const ReferenceProcessorStats& stats =
 536       ref_processor()->process_discovered_references(
 537         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
 538     gc_tracer()->report_gc_reference_stats(stats);
 539     pt.print_all_references();
 540   }
 541 
 542   // This is the point where the entire marking should have completed.
 543   assert(_marking_stack.is_empty(), "Marking should have completed");
 544 
 545   {
 546     GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
 547 
 548     // Unload classes and purge the SystemDictionary.
 549     bool purged_class = SystemDictionary::do_unloading(is_alive_closure(), _gc_timer);
 550 
 551     // Unload nmethods.
 552     CodeCache::do_unloading(is_alive_closure(), purged_class);
 553 
 554     // Prune dead klasses from subklass/sibling/implementor lists.
 555     Klass::clean_weak_klass_links(is_alive_closure());
 556   }
 557 
 558   {
 559     GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer);
 560     // Delete entries for dead interned strings.
 561     StringTable::unlink(is_alive_closure());
 562   }
 563 
 564   {
 565     GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer);
 566     // Clean up unreferenced symbols in symbol table.
 567     SymbolTable::unlink();
 568   }
 569 
 570   _gc_tracer->report_object_count_after_gc(is_alive_closure());
 571 }
 572 
 573 
 574 void PSMarkSweep::mark_sweep_phase2() {
 575   GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
 576 
 577   // Now all live objects are marked, compute the new object addresses.
 578 
 579   // It is not required that we traverse spaces in the same order in
 580   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 581   // tracking expects us to do so. See comment under phase4.
 582 
 583   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 584   PSOldGen* old_gen = heap->old_gen();
 585 
 586   // Begin compacting into the old gen
 587   PSMarkSweepDecorator::set_destination_decorator_tenured();
 588 
 589   // This will also compact the young gen spaces.
 590   old_gen->precompact();
 591 }
 592 
 593 void PSMarkSweep::mark_sweep_phase3() {
 594   // Adjust the pointers to reflect the new locations
 595   GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
 596 
 597   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 598   PSYoungGen* young_gen = heap->young_gen();
 599   PSOldGen* old_gen = heap->old_gen();
 600 
 601   // Need to clear claim bits before the tracing starts.
 602   ClassLoaderDataGraph::clear_claimed_marks();
 603 
 604   // General strong roots.
 605   Universe::oops_do(adjust_pointer_closure());
 606   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 607   Threads::oops_do(adjust_pointer_closure(), NULL);
 608   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 609   Management::oops_do(adjust_pointer_closure());
 610   JvmtiExport::oops_do(adjust_pointer_closure());
 611   SystemDictionary::oops_do(adjust_pointer_closure());
 612   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 613 
 614   // Now adjust pointers in remaining weak roots.  (All of which should
 615   // have been cleared if they pointed to non-surviving objects.)
 616   // Global (weak) JNI handles
 617   JNIHandles::weak_oops_do(adjust_pointer_closure());
 618   HeapMonitoring::weak_oops_do(adjust_pointer_closure());
 619 
 620   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 621   CodeCache::blobs_do(&adjust_from_blobs);
 622   AOTLoader::oops_do(adjust_pointer_closure());
 623   StringTable::oops_do(adjust_pointer_closure());
 624   ref_processor()->weak_oops_do(adjust_pointer_closure());
 625   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 626 
 627   adjust_marks();
 628 
 629   young_gen->adjust_pointers();
 630   old_gen->adjust_pointers();
 631 }
 632 
 633 void PSMarkSweep::mark_sweep_phase4() {
 634   EventMark m("4 compact heap");
 635   GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
 636 
 637   // All pointers are now adjusted, move objects accordingly
 638 
 639   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 640   PSYoungGen* young_gen = heap->young_gen();
 641   PSOldGen* old_gen = heap->old_gen();
 642 
 643   old_gen->compact();
 644   young_gen->compact();
 645 }
 646 
 647 jlong PSMarkSweep::millis_since_last_gc() {
 648   // We need a monotonically non-decreasing time in ms but
 649   // os::javaTimeMillis() does not guarantee monotonicity.
 650   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 651   jlong ret_val = now - _time_of_last_gc;
 652   // XXX See note in genCollectedHeap::millis_since_last_gc().
 653   if (ret_val < 0) {
 654     NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
 655     return 0;
 656   }
 657   return ret_val;
 658 }
 659 
 660 void PSMarkSweep::reset_millis_since_last_gc() {
 661   // We need a monotonically non-decreasing time in ms but
 662   // os::javaTimeMillis() does not guarantee monotonicity.
 663   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 664 }
--- EOF ---