1 /*
   2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_psMarkSweep.cpp.incl"
  27 
  28 elapsedTimer        PSMarkSweep::_accumulated_time;
  29 unsigned int        PSMarkSweep::_total_invocations = 0;
  30 jlong               PSMarkSweep::_time_of_last_gc   = 0;
  31 CollectorCounters*  PSMarkSweep::_counters = NULL;
  32 
  33 void PSMarkSweep::initialize() {
  34   MemRegion mr = Universe::heap()->reserved_region();
  35   _ref_processor = new ReferenceProcessor(mr,
  36                                           true,    // atomic_discovery
  37                                           false);  // mt_discovery
  38   _counters = new CollectorCounters("PSMarkSweep", 1);
  39 }
  40 
  41 // This method contains all heap specific policy for invoking mark sweep.
  42 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
  43 // the heap. It will do nothing further. If we need to bail out for policy
  44 // reasons, scavenge before full gc, or any other specialized behavior, it
  45 // needs to be added here.
  46 //
  47 // Note that this method should only be called from the vm_thread while
  48 // at a safepoint!
  49 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
  50   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  51   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  52   assert(!Universe::heap()->is_gc_active(), "not reentrant");
  53 
  54   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  55   GCCause::Cause gc_cause = heap->gc_cause();
  56   PSAdaptiveSizePolicy* policy = heap->size_policy();
  57 
  58   // Before each allocation/collection attempt, find out from the
  59   // policy object if GCs are, on the whole, taking too long. If so,
  60   // bail out without attempting a collection.  The exceptions are
  61   // for explicitly requested GC's.
  62   if (!policy->gc_time_limit_exceeded() ||
  63       GCCause::is_user_requested_gc(gc_cause) ||
  64       GCCause::is_serviceability_requested_gc(gc_cause)) {
  65     IsGCActiveMark mark;
  66 
  67     if (ScavengeBeforeFullGC) {
  68       PSScavenge::invoke_no_policy();
  69     }
  70 
  71     int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
  72     IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
  73     PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
  74   }
  75 }
  76 
  77 // This method contains no policy. You should probably
  78 // be calling invoke() instead.
  79 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
  80   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  81   assert(ref_processor() != NULL, "Sanity");
  82 
  83   if (GC_locker::check_active_before_gc()) {
  84     return;
  85   }
  86 
  87   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  88   GCCause::Cause gc_cause = heap->gc_cause();
  89   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  90   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
  91 
  92   PSYoungGen* young_gen = heap->young_gen();
  93   PSOldGen* old_gen = heap->old_gen();
  94   PSPermGen* perm_gen = heap->perm_gen();
  95 
  96   // Increment the invocation count
  97   heap->increment_total_collections(true /* full */);
  98 
  99   // Save information needed to minimize mangling
 100   heap->record_gen_tops_before_GC();
 101 
 102   // We need to track unique mark sweep invocations as well.
 103   _total_invocations++;
 104 
 105   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 106 
 107   if (PrintHeapAtGC) {
 108     Universe::print_heap_before_gc();
 109   }
 110 
 111   // Fill in TLABs
 112   heap->accumulate_statistics_all_tlabs();
 113   heap->ensure_parsability(true);  // retire TLABs
 114 
 115   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 116     HandleMark hm;  // Discard invalid handles created during verification
 117     gclog_or_tty->print(" VerifyBeforeGC:");
 118     Universe::verify(true);
 119   }
 120 
 121   // Verify object start arrays
 122   if (VerifyObjectStartArray &&
 123       VerifyBeforeGC) {
 124     old_gen->verify_object_start_array();
 125     perm_gen->verify_object_start_array();
 126   }
 127 
 128   heap->pre_full_gc_dump();
 129 
 130   // Filled in below to track the state of the young gen after the collection.
 131   bool eden_empty;
 132   bool survivors_empty;
 133   bool young_gen_empty;
 134 
 135   {
 136     HandleMark hm;
 137     const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
 138     // This is useful for debugging but don't change the output the
 139     // the customer sees.
 140     const char* gc_cause_str = "Full GC";
 141     if (is_system_gc && PrintGCDetails) {
 142       gc_cause_str = "Full GC (System)";
 143     }
 144     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 145     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 146     TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
 147     TraceCollectorStats tcs(counters());
 148     TraceMemoryManagerStats tms(true /* Full GC */);
 149 
 150     if (TraceGen1Time) accumulated_time()->start();
 151 
 152     // Let the size policy know we're starting
 153     size_policy->major_collection_begin();
 154 
 155     // When collecting the permanent generation methodOops may be moving,
 156     // so we either have to flush all bcp data or convert it into bci.
 157     CodeCache::gc_prologue();
 158     Threads::gc_prologue();
 159     BiasedLocking::preserve_marks();
 160 
 161     // Capture heap size before collection for printing.
 162     size_t prev_used = heap->used();
 163 
 164     // Capture perm gen size before collection for sizing.
 165     size_t perm_gen_prev_used = perm_gen->used_in_bytes();
 166 
 167     // For PrintGCDetails
 168     size_t old_gen_prev_used = old_gen->used_in_bytes();
 169     size_t young_gen_prev_used = young_gen->used_in_bytes();
 170 
 171     allocate_stacks();
 172 
 173     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 174     COMPILER2_PRESENT(DerivedPointerTable::clear());
 175 
 176     ref_processor()->enable_discovery();
 177     ref_processor()->setup_policy(clear_all_softrefs);
 178 
 179     mark_sweep_phase1(clear_all_softrefs);
 180 
 181     mark_sweep_phase2();
 182 
 183     // Don't add any more derived pointers during phase3
 184     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
 185     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
 186 
 187     mark_sweep_phase3();
 188 
 189     mark_sweep_phase4();
 190 
 191     restore_marks();
 192 
 193     deallocate_stacks();
 194 
 195     if (ZapUnusedHeapArea) {
 196       // Do a complete mangle (top to end) because the usage for
 197       // scratch does not maintain a top pointer.
 198       young_gen->to_space()->mangle_unused_area_complete();
 199     }
 200 
 201     eden_empty = young_gen->eden_space()->is_empty();
 202     if (!eden_empty) {
 203       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
 204     }
 205 
 206     // Update heap occupancy information which is used as
 207     // input to soft ref clearing policy at the next gc.
 208     Universe::update_heap_info_at_gc();
 209 
 210     survivors_empty = young_gen->from_space()->is_empty() &&
 211                       young_gen->to_space()->is_empty();
 212     young_gen_empty = eden_empty && survivors_empty;
 213 
 214     BarrierSet* bs = heap->barrier_set();
 215     if (bs->is_a(BarrierSet::ModRef)) {
 216       ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
 217       MemRegion old_mr = heap->old_gen()->reserved();
 218       MemRegion perm_mr = heap->perm_gen()->reserved();
 219       assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
 220 
 221       if (young_gen_empty) {
 222         modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
 223       } else {
 224         modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
 225       }
 226     }
 227 
 228     BiasedLocking::restore_marks();
 229     Threads::gc_epilogue();
 230     CodeCache::gc_epilogue();
 231 
 232     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 233 
 234     ref_processor()->enqueue_discovered_references(NULL);
 235 
 236     // Update time of last GC
 237     reset_millis_since_last_gc();
 238 
 239     // Let the size policy know we're done
 240     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
 241 
 242     bool free_ratio_in_effect = false;
 243     if ((UseFreeRatioForParallelGC ||
 244          (UseFreeRatioOnlyInSystemGCForParallelGC &&
 245           gc_cause == GCCause::_java_lang_system_gc))) {
 246       ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 247       free_ratio_in_effect = heap->try_to_shrink_by_free_ratio(true);
 248     }
 249 
 250     if (!free_ratio_in_effect && UseAdaptiveSizePolicy) {
 251 
 252       if (PrintAdaptiveSizePolicy) {
 253         gclog_or_tty->print("AdaptiveSizeStart: ");
 254         gclog_or_tty->stamp();
 255         gclog_or_tty->print_cr(" collection: %d ",
 256                        heap->total_collections());
 257         if (Verbose) {
 258           gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
 259             " perm_gen_capacity: %d ",
 260             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
 261             perm_gen->capacity_in_bytes());
 262         }
 263       }
 264 
 265       // Don't check if the size_policy is ready here.  Let
 266       // the size_policy check that internally.
 267       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
 268           ((gc_cause != GCCause::_java_lang_system_gc) ||
 269             UseAdaptiveSizePolicyWithSystemGC)) {
 270         // Calculate optimal free space amounts
 271         assert(young_gen->max_size() >
 272           young_gen->from_space()->capacity_in_bytes() +
 273           young_gen->to_space()->capacity_in_bytes(),
 274           "Sizes of space in young gen are out-of-bounds");
 275         size_t max_eden_size = young_gen->max_size() -
 276           young_gen->from_space()->capacity_in_bytes() -
 277           young_gen->to_space()->capacity_in_bytes();
 278         size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
 279                                  young_gen->eden_space()->used_in_bytes(),
 280                                  old_gen->used_in_bytes(),
 281                                  perm_gen->used_in_bytes(),
 282                                  young_gen->eden_space()->capacity_in_bytes(),
 283                                  old_gen->max_gen_size(),
 284                                  max_eden_size,
 285                                  true /* full gc*/,
 286                                  gc_cause);
 287 
 288         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
 289 
 290         // Don't resize the young generation at an major collection.  A
 291         // desired young generation size may have been calculated but
 292         // resizing the young generation complicates the code because the
 293         // resizing of the old generation may have moved the boundary
 294         // between the young generation and the old generation.  Let the
 295         // young generation resizing happen at the minor collections.
 296       }
 297       if (PrintAdaptiveSizePolicy) {
 298         gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
 299                        heap->total_collections());
 300       }
 301     }
 302 
 303     if (UsePerfData) {
 304       heap->gc_policy_counters()->update_counters();
 305       heap->gc_policy_counters()->update_old_capacity(
 306         old_gen->capacity_in_bytes());
 307       heap->gc_policy_counters()->update_young_capacity(
 308         young_gen->capacity_in_bytes());
 309     }
 310 
 311     heap->resize_all_tlabs();
 312 
 313     // We collected the perm gen, so we'll resize it here.
 314     perm_gen->compute_new_size(perm_gen_prev_used);
 315 
 316     if (TraceGen1Time) accumulated_time()->stop();
 317 
 318     if (PrintGC) {
 319       if (PrintGCDetails) {
 320         // Don't print a GC timestamp here.  This is after the GC so
 321         // would be confusing.
 322         young_gen->print_used_change(young_gen_prev_used);
 323         old_gen->print_used_change(old_gen_prev_used);
 324       }
 325       heap->print_heap_change(prev_used);
 326       // Do perm gen after heap becase prev_used does
 327       // not include the perm gen (done this way in the other
 328       // collectors).
 329       if (PrintGCDetails) {
 330         perm_gen->print_used_change(perm_gen_prev_used);
 331       }
 332     }
 333 
 334     // Track memory usage and detect low memory
 335     MemoryService::track_memory_usage();
 336     heap->update_counters();
 337 
 338     if (PrintGCDetails) {
 339       if (size_policy->print_gc_time_limit_would_be_exceeded()) {
 340         if (size_policy->gc_time_limit_exceeded()) {
 341           gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
 342             "of %d%%", GCTimeLimit);
 343         } else {
 344           gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
 345             "of %d%%", GCTimeLimit);
 346         }
 347       }
 348       size_policy->set_print_gc_time_limit_would_be_exceeded(false);
 349     }
 350   }
 351 
 352   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 353     HandleMark hm;  // Discard invalid handles created during verification
 354     gclog_or_tty->print(" VerifyAfterGC:");
 355     Universe::verify(false);
 356   }
 357 
 358   // Re-verify object start arrays
 359   if (VerifyObjectStartArray &&
 360       VerifyAfterGC) {
 361     old_gen->verify_object_start_array();
 362     perm_gen->verify_object_start_array();
 363   }
 364 
 365   if (ZapUnusedHeapArea) {
 366     old_gen->object_space()->check_mangled_unused_area_complete();
 367     perm_gen->object_space()->check_mangled_unused_area_complete();
 368   }
 369 
 370   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 371 
 372   if (PrintHeapAtGC) {
 373     Universe::print_heap_after_gc();
 374   }
 375 
 376   heap->post_full_gc_dump();
 377 
 378 #ifdef TRACESPINNING
 379   ParallelTaskTerminator::print_termination_counts();
 380 #endif
 381 }
 382 
 383 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
 384                                              PSYoungGen* young_gen,
 385                                              PSOldGen* old_gen) {
 386   MutableSpace* const eden_space = young_gen->eden_space();
 387   assert(!eden_space->is_empty(), "eden must be non-empty");
 388   assert(young_gen->virtual_space()->alignment() ==
 389          old_gen->virtual_space()->alignment(), "alignments do not match");
 390 
 391   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
 392     return false;
 393   }
 394 
 395   // Both generations must be completely committed.
 396   if (young_gen->virtual_space()->uncommitted_size() != 0) {
 397     return false;
 398   }
 399   if (old_gen->virtual_space()->uncommitted_size() != 0) {
 400     return false;
 401   }
 402 
 403   // Figure out how much to take from eden.  Include the average amount promoted
 404   // in the total; otherwise the next young gen GC will simply bail out to a
 405   // full GC.
 406   const size_t alignment = old_gen->virtual_space()->alignment();
 407   const size_t eden_used = eden_space->used_in_bytes();
 408   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
 409   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
 410   const size_t eden_capacity = eden_space->capacity_in_bytes();
 411 
 412   if (absorb_size >= eden_capacity) {
 413     return false; // Must leave some space in eden.
 414   }
 415 
 416   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
 417   if (new_young_size < young_gen->min_gen_size()) {
 418     return false; // Respect young gen minimum size.
 419   }
 420 
 421   if (TraceAdaptiveGCBoundary && Verbose) {
 422     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
 423                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
 424                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
 425                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
 426                         absorb_size / K,
 427                         eden_capacity / K, (eden_capacity - absorb_size) / K,
 428                         young_gen->from_space()->used_in_bytes() / K,
 429                         young_gen->to_space()->used_in_bytes() / K,
 430                         young_gen->capacity_in_bytes() / K, new_young_size / K);
 431   }
 432 
 433   // Fill the unused part of the old gen.
 434   MutableSpace* const old_space = old_gen->object_space();
 435   HeapWord* const unused_start = old_space->top();
 436   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
 437 
 438   if (unused_words > 0) {
 439     if (unused_words < CollectedHeap::min_fill_size()) {
 440       return false;  // If the old gen cannot be filled, must give up.
 441     }
 442     CollectedHeap::fill_with_objects(unused_start, unused_words);
 443   }
 444 
 445   // Take the live data from eden and set both top and end in the old gen to
 446   // eden top.  (Need to set end because reset_after_change() mangles the region
 447   // from end to virtual_space->high() in debug builds).
 448   HeapWord* const new_top = eden_space->top();
 449   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
 450                                         absorb_size);
 451   young_gen->reset_after_change();
 452   old_space->set_top(new_top);
 453   old_space->set_end(new_top);
 454   old_gen->reset_after_change();
 455 
 456   // Update the object start array for the filler object and the data from eden.
 457   ObjectStartArray* const start_array = old_gen->start_array();
 458   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
 459     start_array->allocate_block(p);
 460   }
 461 
 462   // Could update the promoted average here, but it is not typically updated at
 463   // full GCs and the value to use is unclear.  Something like
 464   //
 465   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
 466 
 467   size_policy->set_bytes_absorbed_from_eden(absorb_size);
 468   return true;
 469 }
 470 
 471 void PSMarkSweep::allocate_stacks() {
 472   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 473   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 474 
 475   PSYoungGen* young_gen = heap->young_gen();
 476 
 477   MutableSpace* to_space = young_gen->to_space();
 478   _preserved_marks = (PreservedMark*)to_space->top();
 479   _preserved_count = 0;
 480 
 481   // We want to calculate the size in bytes first.
 482   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
 483   // Now divide by the size of a PreservedMark
 484   _preserved_count_max /= sizeof(PreservedMark);
 485 
 486   _preserved_mark_stack = NULL;
 487   _preserved_oop_stack = NULL;
 488 
 489   _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
 490 
 491   int size = SystemDictionary::number_of_classes() * 2;
 492   _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
 493   // (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
 494   // now until we investigate a more optimal setting.
 495   _revisit_mdo_stack   = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
 496 }
 497 
 498 
 499 void PSMarkSweep::deallocate_stacks() {
 500   if (_preserved_oop_stack) {
 501     delete _preserved_mark_stack;
 502     _preserved_mark_stack = NULL;
 503     delete _preserved_oop_stack;
 504     _preserved_oop_stack = NULL;
 505   }
 506 
 507   delete _marking_stack;
 508   delete _revisit_klass_stack;
 509   delete _revisit_mdo_stack;
 510 }
 511 
 512 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
 513   // Recursively traverse all live objects and mark them
 514   EventMark m("1 mark object");
 515   TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
 516   trace(" 1");
 517 
 518   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 519   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 520 
 521   // General strong roots.
 522   {
 523     ParallelScavengeHeap::ParStrongRootsScope psrs;
 524     Universe::oops_do(mark_and_push_closure());
 525     ReferenceProcessor::oops_do(mark_and_push_closure());
 526     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
 527     CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
 528     Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
 529     ObjectSynchronizer::oops_do(mark_and_push_closure());
 530     FlatProfiler::oops_do(mark_and_push_closure());
 531     Management::oops_do(mark_and_push_closure());
 532     JvmtiExport::oops_do(mark_and_push_closure());
 533     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
 534     vmSymbols::oops_do(mark_and_push_closure());
 535     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 536     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
 537   }
 538 
 539   // Flush marking stack.
 540   follow_stack();
 541 
 542   // Process reference objects found during marking
 543   {
 544     ref_processor()->setup_policy(clear_all_softrefs);
 545     ref_processor()->process_discovered_references(
 546       is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
 547   }
 548 
 549   // Follow system dictionary roots and unload classes
 550   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 551 
 552   // Follow code cache roots
 553   CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
 554                           purged_class);
 555   follow_stack(); // Flush marking stack
 556 
 557   // Update subklass/sibling/implementor links of live klasses
 558   follow_weak_klass_links();
 559   assert(_marking_stack->is_empty(), "just drained");
 560 
 561   // Visit memoized mdo's and clear unmarked weak refs
 562   follow_mdo_weak_refs();
 563   assert(_marking_stack->is_empty(), "just drained");
 564 
 565   // Visit symbol and interned string tables and delete unmarked oops
 566   SymbolTable::unlink(is_alive_closure());
 567   StringTable::unlink(is_alive_closure());
 568 
 569   assert(_marking_stack->is_empty(), "stack should be empty by now");
 570 }
 571 
 572 
 573 void PSMarkSweep::mark_sweep_phase2() {
 574   EventMark m("2 compute new addresses");
 575   TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
 576   trace("2");
 577 
 578   // Now all live objects are marked, compute the new object addresses.
 579 
 580   // It is imperative that we traverse perm_gen LAST. If dead space is
 581   // allowed a range of dead object may get overwritten by a dead int
 582   // array. If perm_gen is not traversed last a klassOop may get
 583   // overwritten. This is fine since it is dead, but if the class has dead
 584   // instances we have to skip them, and in order to find their size we
 585   // need the klassOop!
 586   //
 587   // It is not required that we traverse spaces in the same order in
 588   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
 589   // tracking expects us to do so. See comment under phase4.
 590 
 591   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 592   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 593 
 594   PSOldGen* old_gen = heap->old_gen();
 595   PSPermGen* perm_gen = heap->perm_gen();
 596 
 597   // Begin compacting into the old gen
 598   PSMarkSweepDecorator::set_destination_decorator_tenured();
 599 
 600   // This will also compact the young gen spaces.
 601   old_gen->precompact();
 602 
 603   // Compact the perm gen into the perm gen
 604   PSMarkSweepDecorator::set_destination_decorator_perm_gen();
 605 
 606   perm_gen->precompact();
 607 }
 608 
 609 // This should be moved to the shared markSweep code!
 610 class PSAlwaysTrueClosure: public BoolObjectClosure {
 611 public:
 612   void do_object(oop p) { ShouldNotReachHere(); }
 613   bool do_object_b(oop p) { return true; }
 614 };
 615 static PSAlwaysTrueClosure always_true;
 616 
 617 void PSMarkSweep::mark_sweep_phase3() {
 618   // Adjust the pointers to reflect the new locations
 619   EventMark m("3 adjust pointers");
 620   TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
 621   trace("3");
 622 
 623   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 624   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 625 
 626   PSYoungGen* young_gen = heap->young_gen();
 627   PSOldGen* old_gen = heap->old_gen();
 628   PSPermGen* perm_gen = heap->perm_gen();
 629 
 630   // General strong roots.
 631   Universe::oops_do(adjust_root_pointer_closure());
 632   ReferenceProcessor::oops_do(adjust_root_pointer_closure());
 633   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
 634   Threads::oops_do(adjust_root_pointer_closure(), NULL);
 635   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
 636   FlatProfiler::oops_do(adjust_root_pointer_closure());
 637   Management::oops_do(adjust_root_pointer_closure());
 638   JvmtiExport::oops_do(adjust_root_pointer_closure());
 639   // SO_AllClasses
 640   SystemDictionary::oops_do(adjust_root_pointer_closure());
 641   vmSymbols::oops_do(adjust_root_pointer_closure());
 642   //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
 643 
 644   // Now adjust pointers in remaining weak roots.  (All of which should
 645   // have been cleared if they pointed to non-surviving objects.)
 646   // Global (weak) JNI handles
 647   JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
 648 
 649   CodeCache::oops_do(adjust_pointer_closure());
 650   SymbolTable::oops_do(adjust_root_pointer_closure());
 651   StringTable::oops_do(adjust_root_pointer_closure());
 652   ref_processor()->weak_oops_do(adjust_root_pointer_closure());
 653   PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
 654 
 655   adjust_marks();
 656 
 657   young_gen->adjust_pointers();
 658   old_gen->adjust_pointers();
 659   perm_gen->adjust_pointers();
 660 }
 661 
 662 void PSMarkSweep::mark_sweep_phase4() {
 663   EventMark m("4 compact heap");
 664   TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
 665   trace("4");
 666 
 667   // All pointers are now adjusted, move objects accordingly
 668 
 669   // It is imperative that we traverse perm_gen first in phase4. All
 670   // classes must be allocated earlier than their instances, and traversing
 671   // perm_gen first makes sure that all klassOops have moved to their new
 672   // location before any instance does a dispatch through it's klass!
 673   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 674   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 675 
 676   PSYoungGen* young_gen = heap->young_gen();
 677   PSOldGen* old_gen = heap->old_gen();
 678   PSPermGen* perm_gen = heap->perm_gen();
 679 
 680   perm_gen->compact();
 681   old_gen->compact();
 682   young_gen->compact();
 683 }
 684 
 685 jlong PSMarkSweep::millis_since_last_gc() {
 686   jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
 687   // XXX See note in genCollectedHeap::millis_since_last_gc().
 688   if (ret_val < 0) {
 689     NOT_PRODUCT(warning("time warp: %d", ret_val);)
 690     return 0;
 691   }
 692   return ret_val;
 693 }
 694 
 695 void PSMarkSweep::reset_millis_since_last_gc() {
 696   _time_of_last_gc = os::javaTimeMillis();
 697 }