1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc/parallel/parallelScavengeHeap.hpp"
  31 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  32 #include "gc/parallel/psMarkSweep.hpp"
  33 #include "gc/parallel/psMarkSweepDecorator.hpp"
  34 #include "gc/parallel/psOldGen.hpp"
  35 #include "gc/parallel/psScavenge.hpp"
  36 #include "gc/parallel/psYoungGen.hpp"
  37 #include "gc/serial/markSweep.hpp"
  38 #include "gc/shared/gcCause.hpp"
  39 #include "gc/shared/gcHeapSummary.hpp"
  40 #include "gc/shared/gcId.hpp"
  41 #include "gc/shared/gcLocker.inline.hpp"
  42 #include "gc/shared/gcTimer.hpp"
  43 #include "gc/shared/gcTrace.hpp"
  44 #include "gc/shared/gcTraceTime.inline.hpp"
  45 #include "gc/shared/isGCActiveMark.hpp"
  46 #include "gc/shared/referencePolicy.hpp"
  47 #include "gc/shared/referenceProcessor.hpp"
  48 #include "gc/shared/spaceDecorator.hpp"
  49 #include "logging/log.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/biasedLocking.hpp"
  52 #include "runtime/fprofiler.hpp"
  53 #include "runtime/safepoint.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "services/management.hpp"
  56 #include "services/memoryService.hpp"
  57 #include "utilities/events.hpp"
  58 #include "utilities/stack.inline.hpp"
  59 
  60 elapsedTimer        PSMarkSweep::_accumulated_time;
  61 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  62 CollectorCounters*  PSMarkSweep::_counters = NULL;
  63 
  64 void PSMarkSweep::initialize() {
  65   MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
  66   set_ref_processor(new ReferenceProcessor(mr));     // a vanilla ref proc
  67   _counters = new CollectorCounters("PSMarkSweep", 1);
  68 }
  69 
  70 // This method contains all heap specific policy for invoking mark sweep.
  71 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
  72 // the heap. It will do nothing further. If we need to bail out for policy
  73 // reasons, scavenge before full gc, or any other specialized behavior, it
  74 // needs to be added here.
  75 //
  76 // Note that this method should only be called from the vm_thread while
  77 // at a safepoint!
  78 //
  79 // Note that the all_soft_refs_clear flag in the collector policy
  80 // may be true because this method can be called without intervening
  81 // activity.  For example when the heap space is tight and full measure
  82 // are being taken to free space.
  83 
  84 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  85   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  86   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  87   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
  88 
  89   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  90   GCCause::Cause gc_cause = heap->gc_cause();
  91   PSAdaptiveSizePolicy* policy = heap->size_policy();
  92   IsGCActiveMark mark;
  93 
  94   if (ScavengeBeforeFullGC) {
  95     PSScavenge::invoke_no_policy();
  96   }
  97 
  98   const bool clear_all_soft_refs =
  99     heap->collector_policy()->should_clear_all_soft_refs();
 100 
 101   uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
 102   UIntXFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
 103   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 104 }
 105 
 106 // This method contains no policy. You should probably
 107 // be calling invoke() instead.
 108 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
 109   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 110   assert(ref_processor() != NULL, "Sanity");
 111 
 112   if (GC_locker::check_active_before_gc()) {
 113     return false;
 114   }
 115 
 116   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 117   GCCause::Cause gc_cause = heap->gc_cause();
 118 
 119   GCIdMark gc_id_mark;
 120   _gc_timer->register_gc_start();
 121   _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
 122 
 123   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 124 
 125   // The scope of casr should end after code that can change
 126   // CollectorPolicy::_should_clear_all_soft_refs.
 127   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
 128 
 129   PSYoungGen* young_gen = heap->young_gen();
 130   PSOldGen* old_gen = heap->old_gen();
 131 
 132   // Increment the invocation count
 133   heap->increment_total_collections(true /* full */);
 134 
 135   // Save information needed to minimize mangling
 136   heap->record_gen_tops_before_GC();
 137 
 138   // We need to track unique mark sweep invocations as well.
 139   _total_invocations++;
 140 
 141   heap->print_heap_before_gc();
 142   heap->trace_heap_before_gc(_gc_tracer);
 143 
 144   // Fill in TLABs
 145   heap->accumulate_statistics_all_tlabs();
 146   heap->ensure_parsability(true);  // retire TLABs
 147 
 148   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 149     HandleMark hm;  // Discard invalid handles created during verification
 150     Universe::verify("Before GC");
 151   }
 152 
 153   // Verify object start arrays
 154   if (VerifyObjectStartArray &&
 155       VerifyBeforeGC) {
 156     old_gen->verify_object_start_array();
 157   }
 158 
 159   heap->pre_full_gc_dump(_gc_timer);
 160 
 161   // Filled in below to track the state of the young gen after the collection.
 162   bool eden_empty;
 163   bool survivors_empty;
 164   bool young_gen_empty;
 165 
 166   {
 167     HandleMark hm;
 168 
 169     GCTraceCPUTime tcpu;
 170     GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
 171     TraceCollectorStats tcs(counters());
 172     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 173 
 174     if (TraceOldGenTime) accumulated_time()->start();
 175 
 176     // Let the size policy know we're starting
 177     size_policy->major_collection_begin();
 178 
 179     CodeCache::gc_prologue();
 180     BiasedLocking::preserve_marks();
 181 
 182     // Capture metadata size before collection for sizing.
 183     size_t metadata_prev_used = MetaspaceAux::used_bytes();
 184 
 185     size_t old_gen_prev_used = old_gen->used_in_bytes();
 186     size_t young_gen_prev_used = young_gen->used_in_bytes();
 187 
 188     allocate_stacks();
 189 
 190 #if defined(COMPILER2) || INCLUDE_JVMCI
 191     DerivedPointerTable::clear();
 192 #endif
 193 
 194     ref_processor()->enable_discovery();
 195     ref_processor()->setup_policy(clear_all_softrefs);
 196 
 197     mark_sweep_phase1(clear_all_softrefs);
 198 
 199     mark_sweep_phase2();
 200 
 201 #if defined(COMPILER2) || INCLUDE_JVMCI
 202     // Don't add any more derived pointers during phase3
 203     assert(DerivedPointerTable::is_active(), "Sanity");
 204     DerivedPointerTable::set_active(false);
 205 #endif
 206 
 207     mark_sweep_phase3();
 208 
 209     mark_sweep_phase4();
 210 
 211     restore_marks();
 212 
 213     deallocate_stacks();
 214 
 215     if (ZapUnusedHeapArea) {
 216       // Do a complete mangle (top to end) because the usage for
 217       // scratch does not maintain a top pointer.
 218       young_gen->to_space()->mangle_unused_area_complete();
 219     }
 220 
 221     eden_empty = young_gen->eden_space()->is_empty();
 222     if (!eden_empty) {
 223       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
 224     }
 225 
 226     // Update heap occupancy information which is used as
 227     // input to soft ref clearing policy at the next gc.
 228     Universe::update_heap_info_at_gc();
 229 
 230     survivors_empty = young_gen->from_space()->is_empty() &&
 231                       young_gen->to_space()->is_empty();
 232     young_gen_empty = eden_empty && survivors_empty;
 233 
 234     ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
 235     MemRegion old_mr = heap->old_gen()->reserved();
 236     if (young_gen_empty) {
 237       modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
 238     } else {
 239       modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
 240     }
 241 
 242     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 243     ClassLoaderDataGraph::purge();
 244     MetaspaceAux::verify_metrics();
 245 
 246     BiasedLocking::restore_marks();
 247     CodeCache::gc_epilogue();
 248     JvmtiExport::gc_epilogue();
 249 
 250 #if defined(COMPILER2) || INCLUDE_JVMCI
 251     DerivedPointerTable::update_pointers();
 252 #endif
 253 
 254     ref_processor()->enqueue_discovered_references(NULL);
 255 
 256     // Update time of last GC
 257     reset_millis_since_last_gc();
 258 
 259     // Let the size policy know we're done
 260     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
 261 
 262     if (UseAdaptiveSizePolicy) {
 263 
 264      log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
 265      log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
 266                          old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 267 
 268       // Don't check if the size_policy is ready here.  Let
 269       // the size_policy check that internally.
 270       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
 271           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
 272         // Swap the survivor spaces if from_space is empty. The
 273         // resize_young_gen() called below is normally used after
 274         // a successful young GC and swapping of survivor spaces;
 275         // otherwise, it will fail to resize the young gen with
 276         // the current implementation.
 277         if (young_gen->from_space()->is_empty()) {
 278           young_gen->from_space()->clear(SpaceDecorator::Mangle);
 279           young_gen->swap_spaces();
 280         }
 281 
 282         // Calculate optimal free space amounts
 283         assert(young_gen->max_size() >
 284           young_gen->from_space()->capacity_in_bytes() +
 285           young_gen->to_space()->capacity_in_bytes(),
 286           "Sizes of space in young gen are out-of-bounds");
 287 
 288         size_t young_live = young_gen->used_in_bytes();
 289         size_t eden_live = young_gen->eden_space()->used_in_bytes();
 290         size_t old_live = old_gen->used_in_bytes();
 291         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
 292         size_t max_old_gen_size = old_gen->max_gen_size();
 293         size_t max_eden_size = young_gen->max_size() -
 294           young_gen->from_space()->capacity_in_bytes() -
 295           young_gen->to_space()->capacity_in_bytes();
 296 
 297         // Used for diagnostics
 298         size_policy->clear_generation_free_space_flags();
 299 
 300         size_policy->compute_generations_free_space(young_live,
 301                                                     eden_live,
 302                                                     old_live,
 303                                                     cur_eden,
 304                                                     max_old_gen_size,
 305                                                     max_eden_size,
 306                                                     true /* full gc*/);
 307 
 308         size_policy->check_gc_overhead_limit(young_live,
 309                                              eden_live,
 310                                              max_old_gen_size,
 311                                              max_eden_size,
 312                                              true /* full gc*/,
 313                                              gc_cause,
 314                                              heap->collector_policy());
 315 
 316         size_policy->decay_supplemental_growth(true /* full gc*/);
 317 
 318         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
 319 
 320         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 321                                size_policy->calculated_survivor_size_in_bytes());
 322       }
 323       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
 324     }
 325 
 326     if (UsePerfData) {
 327       heap->gc_policy_counters()->update_counters();
 328       heap->gc_policy_counters()->update_old_capacity(
 329         old_gen->capacity_in_bytes());
 330       heap->gc_policy_counters()->update_young_capacity(
 331         young_gen->capacity_in_bytes());
 332     }
 333 
 334     heap->resize_all_tlabs();
 335 
 336     // We collected the heap, recalculate the metaspace capacity
 337     MetaspaceGC::compute_new_size();
 338 
 339     if (TraceOldGenTime) accumulated_time()->stop();
 340 
 341     young_gen->print_used_change(young_gen_prev_used);
 342     old_gen->print_used_change(old_gen_prev_used);
 343     MetaspaceAux::print_metaspace_change(metadata_prev_used);
 344 
 345     // Track memory usage and detect low memory
 346     MemoryService::track_memory_usage();
 347     heap->update_counters();
 348   }
 349 
 350   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 351     HandleMark hm;  // Discard invalid handles created during verification
 352     Universe::verify("After GC");
 353   }
 354 
 355   // Re-verify object start arrays
 356   if (VerifyObjectStartArray &&
 357       VerifyAfterGC) {
 358     old_gen->verify_object_start_array();
 359   }
 360 
 361   if (ZapUnusedHeapArea) {
 362     old_gen->object_space()->check_mangled_unused_area_complete();
 363   }
 364 
 365   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 366 
 367   heap->print_heap_after_gc();
 368   heap->trace_heap_after_gc(_gc_tracer);
 369 
 370   heap->post_full_gc_dump(_gc_timer);
 371 
 372 #ifdef TRACESPINNING
 373   ParallelTaskTerminator::print_termination_counts();
 374 #endif
 375 
 376   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
 377 
 378   _gc_timer->register_gc_end();
 379 
 380   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 381 
 382   return true;
 383 }
 384 
 385 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
 386                                              PSYoungGen* young_gen,
 387                                              PSOldGen* old_gen) {
 388   MutableSpace* const eden_space = young_gen->eden_space();
 389   assert(!eden_space->is_empty(), "eden must be non-empty");
 390   assert(young_gen->virtual_space()->alignment() ==
 391          old_gen->virtual_space()->alignment(), "alignments do not match");
 392 
 393   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
 394     return false;
 395   }
 396 
 397   // Both generations must be completely committed.
 398   if (young_gen->virtual_space()->uncommitted_size() != 0) {
 399     return false;
 400   }
 401   if (old_gen->virtual_space()->uncommitted_size() != 0) {
 402     return false;
 403   }
 404 
 405   // Figure out how much to take from eden.  Include the average amount promoted
 406   // in the total; otherwise the next young gen GC will simply bail out to a
 407   // full GC.
 408   const size_t alignment = old_gen->virtual_space()->alignment();
 409   const size_t eden_used = eden_space->used_in_bytes();
 410   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
 411   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
 412   const size_t eden_capacity = eden_space->capacity_in_bytes();
 413 
 414   if (absorb_size >= eden_capacity) {
 415     return false; // Must leave some space in eden.
 416   }
 417 
 418   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
 419   if (new_young_size < young_gen->min_gen_size()) {
 420     return false; // Respect young gen minimum size.
 421   }
 422 
 423   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
 424                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
 425                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
 426                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
 427                         absorb_size / K,
 428                         eden_capacity / K, (eden_capacity - absorb_size) / K,
 429                         young_gen->from_space()->used_in_bytes() / K,
 430                         young_gen->to_space()->used_in_bytes() / K,
 431                         young_gen->capacity_in_bytes() / K, new_young_size / K);
 432 
 433   // Fill the unused part of the old gen.
 434   MutableSpace* const old_space = old_gen->object_space();
 435   HeapWord* const unused_start = old_space->top();
 436   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
 437 
 438   if (unused_words > 0) {
 439     if (unused_words < CollectedHeap::min_fill_size()) {
 440       return false;  // If the old gen cannot be filled, must give up.
 441     }
 442     CollectedHeap::fill_with_objects(unused_start, unused_words);
 443   }
 444 
 445   // Take the live data from eden and set both top and end in the old gen to
 446   // eden top.  (Need to set end because reset_after_change() mangles the region
 447   // from end to virtual_space->high() in debug builds).
 448   HeapWord* const new_top = eden_space->top();
 449   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
 450                                         absorb_size);
 451   young_gen->reset_after_change();
 452   old_space->set_top(new_top);
 453   old_space->set_end(new_top);
 454   old_gen->reset_after_change();
 455 
 456   // Update the object start array for the filler object and the data from eden.
 457   ObjectStartArray* const start_array = old_gen->start_array();
 458   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
 459     start_array->allocate_block(p);
 460   }
 461 
 462   // Could update the promoted average here, but it is not typically updated at
 463   // full GCs and the value to use is unclear.  Something like
 464   //
 465   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
 466 
 467   size_policy->set_bytes_absorbed_from_eden(absorb_size);
 468   return true;
 469 }
 470 
 471 void PSMarkSweep::allocate_stacks() {
 472   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 473   PSYoungGen* young_gen = heap->young_gen();
 474 
 475   MutableSpace* to_space = young_gen->to_space();
 476   _preserved_marks = (PreservedMark*)to_space->top();
 477   _preserved_count = 0;
 478 
 479   // We want to calculate the size in bytes first.
 480   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 481   // Now divide by the size of a PreservedMark
 482   _preserved_count_max /= sizeof(PreservedMark);
 483 }
 484 
 485 
 486 void PSMarkSweep::deallocate_stacks() {
 487   _preserved_mark_stack.clear(true);
 488   _preserved_oop_stack.clear(true);
 489   _marking_stack.clear();
 490   _objarray_stack.clear(true);
 491 }
 492 
 493 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 494   // Recursively traverse all live objects and mark them
 495   GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer);
 496 
 497   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 498 
 499   // Need to clear claim bits before the tracing starts.
 500   ClassLoaderDataGraph::clear_claimed_marks();
 501 
 502   // General strong roots.
 503   {
 504     ParallelScavengeHeap::ParStrongRootsScope psrs;
 505     Universe::oops_do(mark_and_push_closure());
 506     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 507     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
 508     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
 509     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
 510     ObjectSynchronizer::oops_do(mark_and_push_closure());
 511     FlatProfiler::oops_do(mark_and_push_closure());
 512     Management::oops_do(mark_and_push_closure());
 513     JvmtiExport::oops_do(mark_and_push_closure());
 514     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 515     ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
 516     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 517     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
 518   }
 519 
 520   // Flush marking stack.
 521   follow_stack();
 522 
 523   // Process reference objects found during marking
 524   {
 525     ref_processor()->setup_policy(clear_all_softrefs);
 526     const ReferenceProcessorStats& stats =
 527       ref_processor()->process_discovered_references(
 528         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
 529     gc_tracer()->report_gc_reference_stats(stats);
 530   }
 531 
 532   // This is the point where the entire marking should have completed.
 533   assert(_marking_stack.is_empty(), "Marking should have completed");
 534 
 535   // Unload classes and purge the SystemDictionary.
 536   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 537 
 538   // Unload nmethods.
 539   CodeCache::do_unloading(is_alive_closure(), purged_class);
 540 
 541   // Prune dead klasses from subklass/sibling/implementor lists.
 542   Klass::clean_weak_klass_links(is_alive_closure());
 543 
 544   // Delete entries for dead interned strings.
 545   StringTable::unlink(is_alive_closure());
 546 
 547   // Clean up unreferenced symbols in symbol table.
 548   SymbolTable::unlink();
 549   _gc_tracer->report_object_count_after_gc(is_alive_closure());
 550 }
 551 
 552 
 553 void PSMarkSweep::mark_sweep_phase2() {
 554   GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", _gc_timer);
 555 
 556   // Now all live objects are marked, compute the new object addresses.
 557 
 558   // It is not required that we traverse spaces in the same order in
 559   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 560   // tracking expects us to do so. See comment under phase4.
 561 
 562   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 563   PSOldGen* old_gen = heap->old_gen();
 564 
 565   // Begin compacting into the old gen
 566   PSMarkSweepDecorator::set_destination_decorator_tenured();
 567 
 568   // This will also compact the young gen spaces.
 569   old_gen->precompact();
 570 }
 571 
 572 // This should be moved to the shared markSweep code!
 573 class PSAlwaysTrueClosure: public BoolObjectClosure {
 574 public:
 575   bool do_object_b(oop p) { return true; }
 576 };
 577 static PSAlwaysTrueClosure always_true;
 578 
 579 void PSMarkSweep::mark_sweep_phase3() {
 580   // Adjust the pointers to reflect the new locations
 581   GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", _gc_timer);
 582 
 583   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 584   PSYoungGen* young_gen = heap->young_gen();
 585   PSOldGen* old_gen = heap->old_gen();
 586 
 587   // Need to clear claim bits before the tracing starts.
 588   ClassLoaderDataGraph::clear_claimed_marks();
 589 
 590   // General strong roots.
 591   Universe::oops_do(adjust_pointer_closure());
 592   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
 593   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
 594   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
 595   ObjectSynchronizer::oops_do(adjust_pointer_closure());
 596   FlatProfiler::oops_do(adjust_pointer_closure());
 597   Management::oops_do(adjust_pointer_closure());
 598   JvmtiExport::oops_do(adjust_pointer_closure());
 599   SystemDictionary::oops_do(adjust_pointer_closure());
 600   ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 601 
 602   // Now adjust pointers in remaining weak roots.  (All of which should
 603   // have been cleared if they pointed to non-surviving objects.)
 604   // Global (weak) JNI handles
 605   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 606 
 607   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
 608   CodeCache::blobs_do(&adjust_from_blobs);
 609   StringTable::oops_do(adjust_pointer_closure());
 610   ref_processor()->weak_oops_do(adjust_pointer_closure());
 611   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 612 
 613   adjust_marks();
 614 
 615   young_gen->adjust_pointers();
 616   old_gen->adjust_pointers();
 617 }
 618 
 619 void PSMarkSweep::mark_sweep_phase4() {
 620   EventMark m("4 compact heap");
 621   GCTraceTime(Trace, gc) tm("Phase 4: Move objects", _gc_timer);
 622 
 623   // All pointers are now adjusted, move objects accordingly
 624 
 625   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 626   PSYoungGen* young_gen = heap->young_gen();
 627   PSOldGen* old_gen = heap->old_gen();
 628 
 629   old_gen->compact();
 630   young_gen->compact();
 631 }
 632 
 633 jlong PSMarkSweep::millis_since_last_gc() {
 634   // We need a monotonically non-decreasing time in ms but
 635   // os::javaTimeMillis() does not guarantee monotonicity.
 636   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 637   jlong ret_val = now - _time_of_last_gc;
 638   // XXX See note in genCollectedHeap::millis_since_last_gc().
 639   if (ret_val < 0) {
 640     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
 641     return 0;
 642   }
 643   return ret_val;
 644 }
 645 
 646 void PSMarkSweep::reset_millis_since_last_gc() {
 647   // We need a monotonically non-decreasing time in ms but
 648   // os::javaTimeMillis() does not guarantee monotonicity.
 649   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 650 }