1 /*
   2  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
  29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  35 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
  36 #include "gc_implementation/parallelScavenge/psTasks.hpp"
  37 #include "gc_implementation/shared/isGCActiveMark.hpp"
  38 #include "gc_implementation/shared/spaceDecorator.hpp"
  39 #include "gc_interface/gcCause.hpp"
  40 #include "memory/collectorPolicy.hpp"
  41 #include "memory/gcLocker.inline.hpp"
  42 #include "memory/referencePolicy.hpp"
  43 #include "memory/referenceProcessor.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/oop.psgc.inline.hpp"
  47 #include "runtime/biasedLocking.hpp"
  48 #include "runtime/fprofiler.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 #include "runtime/threadCritical.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "runtime/vm_operations.hpp"
  53 #include "services/memoryService.hpp"
  54 #include "utilities/stack.inline.hpp"
  55 
  56 
  57 HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
  58 int                        PSScavenge::_consecutive_skipped_scavenges = 0;
  59 ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
  60 CardTableExtension*        PSScavenge::_card_table = NULL;
  61 bool                       PSScavenge::_survivor_overflow = false;
  62 uint                       PSScavenge::_tenuring_threshold = 0;
  63 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
  64 elapsedTimer               PSScavenge::_accumulated_time;
  65 Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
  66 Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
  67 CollectorCounters*         PSScavenge::_counters = NULL;
  68 bool                       PSScavenge::_promotion_failed = false;
  69 
  70 // Define before use
  71 class PSIsAliveClosure: public BoolObjectClosure {
  72 public:
  73   bool do_object_b(oop p) {
  74     return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
  75   }
  76 };
  77 
  78 PSIsAliveClosure PSScavenge::_is_alive_closure;
  79 
  80 class PSKeepAliveClosure: public OopClosure {
  81 protected:
  82   MutableSpace* _to_space;
  83   PSPromotionManager* _promotion_manager;
  84 
  85 public:
  86   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
  87     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  88     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  89     _to_space = heap->young_gen()->to_space();
  90 
  91     assert(_promotion_manager != NULL, "Sanity");
  92   }
  93 
  94   template <class T> void do_oop_work(T* p) {
  95     assert (!oopDesc::is_null(*p), "expected non-null ref");
  96     assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
  97             "expected an oop while scanning weak refs");
  98 
  99     // Weak refs may be visited more than once.
 100     if (PSScavenge::should_scavenge(p, _to_space)) {
 101       PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p);
 102     }
 103   }
 104   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
 105   virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
 106 };
 107 
 108 class PSEvacuateFollowersClosure: public VoidClosure {
 109  private:
 110   PSPromotionManager* _promotion_manager;
 111  public:
 112   PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
 113 
 114   virtual void do_void() {
 115     assert(_promotion_manager != NULL, "Sanity");
 116     _promotion_manager->drain_stacks(true);
 117     guarantee(_promotion_manager->stacks_empty(),
 118               "stacks should be empty at this point");
 119   }
 120 };
 121 
 122 class PSPromotionFailedClosure : public ObjectClosure {
 123   virtual void do_object(oop obj) {
 124     if (obj->is_forwarded()) {
 125       obj->init_mark();
 126     }
 127   }
 128 };
 129 
 130 class PSRefProcTaskProxy: public GCTask {
 131   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 132   ProcessTask & _rp_task;
 133   uint          _work_id;
 134 public:
 135   PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
 136     : _rp_task(rp_task),
 137       _work_id(work_id)
 138   { }
 139 
 140 private:
 141   virtual char* name() { return (char *)"Process referents by policy in parallel"; }
 142   virtual void do_it(GCTaskManager* manager, uint which);
 143 };
 144 
 145 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
 146 {
 147   PSPromotionManager* promotion_manager =
 148     PSPromotionManager::gc_thread_promotion_manager(which);
 149   assert(promotion_manager != NULL, "sanity check");
 150   PSKeepAliveClosure keep_alive(promotion_manager);
 151   PSEvacuateFollowersClosure evac_followers(promotion_manager);
 152   PSIsAliveClosure is_alive;
 153   _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
 154 }
 155 
 156 class PSRefEnqueueTaskProxy: public GCTask {
 157   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 158   EnqueueTask& _enq_task;
 159   uint         _work_id;
 160 
 161 public:
 162   PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
 163     : _enq_task(enq_task),
 164       _work_id(work_id)
 165   { }
 166 
 167   virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
 168   virtual void do_it(GCTaskManager* manager, uint which)
 169   {
 170     _enq_task.work(_work_id);
 171   }
 172 };
 173 
 174 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 175   virtual void execute(ProcessTask& task);
 176   virtual void execute(EnqueueTask& task);
 177 };
 178 
 179 void PSRefProcTaskExecutor::execute(ProcessTask& task)
 180 {
 181   GCTaskQueue* q = GCTaskQueue::create();
 182   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
 183   for(uint i=0; i < manager->active_workers(); i++) {
 184     q->enqueue(new PSRefProcTaskProxy(task, i));
 185   }
 186   ParallelTaskTerminator terminator(manager->active_workers(),
 187                  (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
 188   if (task.marks_oops_alive() && manager->active_workers() > 1) {
 189     for (uint j = 0; j < manager->active_workers(); j++) {
 190       q->enqueue(new StealTask(&terminator));
 191     }
 192   }
 193   manager->execute_and_wait(q);
 194 }
 195 
 196 
 197 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
 198 {
 199   GCTaskQueue* q = GCTaskQueue::create();
 200   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
 201   for(uint i=0; i < manager->active_workers(); i++) {
 202     q->enqueue(new PSRefEnqueueTaskProxy(task, i));
 203   }
 204   manager->execute_and_wait(q);
 205 }
 206 
 207 // This method contains all heap specific policy for invoking scavenge.
 208 // PSScavenge::invoke_no_policy() will do nothing but attempt to
 209 // scavenge. It will not clean up after failed promotions, bail out if
 210 // we've exceeded policy time limits, or any other special behavior.
 211 // All such policy should be placed here.
 212 //
 213 // Note that this method should only be called from the vm_thread while
 214 // at a safepoint!
 215 bool PSScavenge::invoke() {
 216   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 217   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 218   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 219 
 220   ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
 221   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 222 
 223   PSAdaptiveSizePolicy* policy = heap->size_policy();
 224   IsGCActiveMark mark;
 225 
 226   const bool scavenge_done = PSScavenge::invoke_no_policy();
 227   const bool need_full_gc = !scavenge_done ||
 228     policy->should_full_GC(heap->old_gen()->free_in_bytes());
 229   bool full_gc_done = false;
 230 
 231   if (UsePerfData) {
 232     PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
 233     const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
 234     counters->update_full_follows_scavenge(ffs_val);
 235   }
 236 
 237   if (need_full_gc) {
 238     GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
 239     CollectorPolicy* cp = heap->collector_policy();
 240     const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
 241 
 242     if (UseParallelOldGC) {
 243       full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
 244     } else {
 245       full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
 246     }
 247   }
 248 
 249   return full_gc_done;
 250 }
 251 
 252 // This method contains no policy. You should probably
 253 // be calling invoke() instead.
 254 bool PSScavenge::invoke_no_policy() {
 255   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 256   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 257 
 258   assert(_preserved_mark_stack.is_empty(), "should be empty");
 259   assert(_preserved_oop_stack.is_empty(), "should be empty");
 260 
 261   TimeStamp scavenge_entry;
 262   TimeStamp scavenge_midpoint;
 263   TimeStamp scavenge_exit;
 264 
 265   scavenge_entry.update();
 266 
 267   if (GC_locker::check_active_before_gc()) {
 268     return false;
 269   }
 270 
 271   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 272   GCCause::Cause gc_cause = heap->gc_cause();
 273   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 274 
 275   // Check for potential problems.
 276   if (!should_attempt_scavenge()) {
 277     return false;
 278   }
 279 
 280   bool promotion_failure_occurred = false;
 281 
 282   PSYoungGen* young_gen = heap->young_gen();
 283   PSOldGen* old_gen = heap->old_gen();
 284   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 285   heap->increment_total_collections();
 286 
 287   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 288 
 289   if ((gc_cause != GCCause::_java_lang_system_gc) ||
 290        UseAdaptiveSizePolicyWithSystemGC) {
 291     // Gather the feedback data for eden occupancy.
 292     young_gen->eden_space()->accumulate_statistics();
 293   }
 294 
 295   if (ZapUnusedHeapArea) {
 296     // Save information needed to minimize mangling
 297     heap->record_gen_tops_before_GC();
 298   }
 299 
 300   heap->print_heap_before_gc();
 301 
 302   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
 303   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
 304 
 305   size_t prev_used = heap->used();
 306   assert(promotion_failed() == false, "Sanity");
 307 
 308   // Fill in TLABs
 309   heap->accumulate_statistics_all_tlabs();
 310   heap->ensure_parsability(true);  // retire TLABs
 311 
 312   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 313     HandleMark hm;  // Discard invalid handles created during verification
 314     Universe::verify(" VerifyBeforeGC:");
 315   }
 316 
 317   {
 318     ResourceMark rm;
 319     HandleMark hm;
 320 
 321     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 322     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 323     TraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
 324     TraceCollectorStats tcs(counters());
 325     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 326 
 327     if (TraceGen0Time) accumulated_time()->start();
 328 
 329     // Let the size policy know we're starting
 330     size_policy->minor_collection_begin();
 331 
 332     // Verify the object start arrays.
 333     if (VerifyObjectStartArray &&
 334         VerifyBeforeGC) {
 335       old_gen->verify_object_start_array();
 336     }
 337 
 338     // Verify no unmarked old->young roots
 339     if (VerifyRememberedSets) {
 340       CardTableExtension::verify_all_young_refs_imprecise();
 341     }
 342 
 343     if (!ScavengeWithObjectsInToSpace) {
 344       assert(young_gen->to_space()->is_empty(),
 345              "Attempt to scavenge with live objects in to_space");
 346       young_gen->to_space()->clear(SpaceDecorator::Mangle);
 347     } else if (ZapUnusedHeapArea) {
 348       young_gen->to_space()->mangle_unused_area();
 349     }
 350     save_to_space_top_before_gc();
 351 
 352     COMPILER2_PRESENT(DerivedPointerTable::clear());
 353 
 354     reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 355     reference_processor()->setup_policy(false);
 356 
 357     // We track how much was promoted to the next generation for
 358     // the AdaptiveSizePolicy.
 359     size_t old_gen_used_before = old_gen->used_in_bytes();
 360 
 361     // For PrintGCDetails
 362     size_t young_gen_used_before = young_gen->used_in_bytes();
 363 
 364     // Reset our survivor overflow.
 365     set_survivor_overflow(false);
 366 
 367     // We need to save the old top values before
 368     // creating the promotion_manager. We pass the top
 369     // values to the card_table, to prevent it from
 370     // straying into the promotion labs.
 371     HeapWord* old_top = old_gen->object_space()->top();
 372 
 373     // Release all previously held resources
 374     gc_task_manager()->release_all_resources();
 375 
 376     // Set the number of GC threads to be used in this collection
 377     gc_task_manager()->set_active_gang();
 378     gc_task_manager()->task_idle_workers();
 379     // Get the active number of workers here and use that value
 380     // throughout the methods.
 381     uint active_workers = gc_task_manager()->active_workers();
 382     heap->set_par_threads(active_workers);
 383 
 384     PSPromotionManager::pre_scavenge();
 385 
 386     // We'll use the promotion manager again later.
 387     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
 388     {
 389       // TraceTime("Roots");
 390       ParallelScavengeHeap::ParStrongRootsScope psrs;
 391 
 392       GCTaskQueue* q = GCTaskQueue::create();
 393 
 394       if (!old_gen->object_space()->is_empty()) {
 395         // There are only old-to-young pointers if there are objects
 396         // in the old gen.
 397         uint stripe_total = active_workers;
 398         for(uint i=0; i < stripe_total; i++) {
 399           q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
 400         }
 401       }
 402 
 403       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
 404       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
 405       // We scan the thread roots in parallel
 406       Threads::create_thread_roots_tasks(q);
 407       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
 408       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
 409       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
 410       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
 411       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
 412       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
 413       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
 414 
 415       ParallelTaskTerminator terminator(
 416         active_workers,
 417                   (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
 418       if (active_workers > 1) {
 419         for (uint j = 0; j < active_workers; j++) {
 420           q->enqueue(new StealTask(&terminator));
 421         }
 422       }
 423 
 424       gc_task_manager()->execute_and_wait(q);
 425     }
 426 
 427     scavenge_midpoint.update();
 428 
 429     // Process reference objects discovered during scavenge
 430     {
 431       reference_processor()->setup_policy(false); // not always_clear
 432       reference_processor()->set_active_mt_degree(active_workers);
 433       PSKeepAliveClosure keep_alive(promotion_manager);
 434       PSEvacuateFollowersClosure evac_followers(promotion_manager);
 435       if (reference_processor()->processing_is_mt()) {
 436         PSRefProcTaskExecutor task_executor;
 437         reference_processor()->process_discovered_references(
 438           &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
 439       } else {
 440         reference_processor()->process_discovered_references(
 441           &_is_alive_closure, &keep_alive, &evac_followers, NULL);
 442       }
 443     }
 444 
 445     // Enqueue reference objects discovered during scavenge.
 446     if (reference_processor()->processing_is_mt()) {
 447       PSRefProcTaskExecutor task_executor;
 448       reference_processor()->enqueue_discovered_references(&task_executor);
 449     } else {
 450       reference_processor()->enqueue_discovered_references(NULL);
 451     }
 452 
 453       // Unlink any dead interned Strings
 454       StringTable::unlink(&_is_alive_closure);
 455       // Process the remaining live ones
 456       PSScavengeRootsClosure root_closure(promotion_manager);
 457       StringTable::oops_do(&root_closure);
 458 
 459     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
 460     PSPromotionManager::post_scavenge();
 461 
 462     promotion_failure_occurred = promotion_failed();
 463     if (promotion_failure_occurred) {
 464       clean_up_failed_promotion();
 465       if (PrintGC) {
 466         gclog_or_tty->print("--");
 467       }
 468     }
 469 
 470     // Let the size policy know we're done.  Note that we count promotion
 471     // failure cleanup time as part of the collection (otherwise, we're
 472     // implicitly saying it's mutator time).
 473     size_policy->minor_collection_end(gc_cause);
 474 
 475     if (!promotion_failure_occurred) {
 476       // Swap the survivor spaces.
 477 
 478 
 479       young_gen->eden_space()->clear(SpaceDecorator::Mangle);
 480       young_gen->from_space()->clear(SpaceDecorator::Mangle);
 481       young_gen->swap_spaces();
 482 
 483       size_t survived = young_gen->from_space()->used_in_bytes();
 484       size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
 485       size_policy->update_averages(_survivor_overflow, survived, promoted);
 486 
 487       // A successful scavenge should restart the GC time limit count which is
 488       // for full GC's.
 489       size_policy->reset_gc_overhead_limit_count();
 490       if (UseAdaptiveSizePolicy) {
 491         // Calculate the new survivor size and tenuring threshold
 492 
 493         if (PrintAdaptiveSizePolicy) {
 494           gclog_or_tty->print("AdaptiveSizeStart: ");
 495           gclog_or_tty->stamp();
 496           gclog_or_tty->print_cr(" collection: %d ",
 497                          heap->total_collections());
 498 
 499           if (Verbose) {
 500             gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
 501               old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 502           }
 503         }
 504 
 505 
 506         if (UsePerfData) {
 507           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 508           counters->update_old_eden_size(
 509             size_policy->calculated_eden_size_in_bytes());
 510           counters->update_old_promo_size(
 511             size_policy->calculated_promo_size_in_bytes());
 512           counters->update_old_capacity(old_gen->capacity_in_bytes());
 513           counters->update_young_capacity(young_gen->capacity_in_bytes());
 514           counters->update_survived(survived);
 515           counters->update_promoted(promoted);
 516           counters->update_survivor_overflowed(_survivor_overflow);
 517         }
 518 
 519         size_t survivor_limit =
 520           size_policy->max_survivor_size(young_gen->max_size());
 521         _tenuring_threshold =
 522           size_policy->compute_survivor_space_size_and_threshold(
 523                                                            _survivor_overflow,
 524                                                            _tenuring_threshold,
 525                                                            survivor_limit);
 526 
 527        if (PrintTenuringDistribution) {
 528          gclog_or_tty->cr();
 529          gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
 530                                 size_policy->calculated_survivor_size_in_bytes(),
 531                                 _tenuring_threshold, MaxTenuringThreshold);
 532        }
 533 
 534         if (UsePerfData) {
 535           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 536           counters->update_tenuring_threshold(_tenuring_threshold);
 537           counters->update_survivor_size_counters();
 538         }
 539 
 540         // Do call at minor collections?
 541         // Don't check if the size_policy is ready at this
 542         // level.  Let the size_policy check that internally.
 543         if (UseAdaptiveSizePolicy &&
 544             UseAdaptiveGenerationSizePolicyAtMinorCollection &&
 545             ((gc_cause != GCCause::_java_lang_system_gc) ||
 546               UseAdaptiveSizePolicyWithSystemGC)) {
 547 
 548           // Calculate optimial free space amounts
 549           assert(young_gen->max_size() >
 550             young_gen->from_space()->capacity_in_bytes() +
 551             young_gen->to_space()->capacity_in_bytes(),
 552             "Sizes of space in young gen are out-of-bounds");
 553 
 554           size_t young_live = young_gen->used_in_bytes();
 555           size_t eden_live = young_gen->eden_space()->used_in_bytes();
 556           size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
 557           size_t max_old_gen_size = old_gen->max_gen_size();
 558           size_t max_eden_size = young_gen->max_size() -
 559             young_gen->from_space()->capacity_in_bytes() -
 560             young_gen->to_space()->capacity_in_bytes();
 561 
 562           // Used for diagnostics
 563           size_policy->clear_generation_free_space_flags();
 564 
 565           size_policy->compute_eden_space_size(young_live,
 566                                                eden_live,
 567                                                cur_eden,
 568                                                max_eden_size,
 569                                                false /* not full gc*/);
 570 
 571           size_policy->check_gc_overhead_limit(young_live,
 572                                                eden_live,
 573                                                max_old_gen_size,
 574                                                max_eden_size,
 575                                                false /* not full gc*/,
 576                                                gc_cause,
 577                                                heap->collector_policy());
 578 
 579           size_policy->decay_supplemental_growth(false /* not full gc*/);
 580         }
 581         // Resize the young generation at every collection
 582         // even if new sizes have not been calculated.  This is
 583         // to allow resizes that may have been inhibited by the
 584         // relative location of the "to" and "from" spaces.
 585 
 586         // Resizing the old gen at minor collects can cause increases
 587         // that don't feed back to the generation sizing policy until
 588         // a major collection.  Don't resize the old gen here.
 589 
 590         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 591                         size_policy->calculated_survivor_size_in_bytes());
 592 
 593         if (PrintAdaptiveSizePolicy) {
 594           gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
 595                          heap->total_collections());
 596         }
 597       }
 598 
 599       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
 600       // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
 601       // Also update() will case adaptive NUMA chunk resizing.
 602       assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
 603       young_gen->eden_space()->update();
 604 
 605       heap->gc_policy_counters()->update_counters();
 606 
 607       heap->resize_all_tlabs();
 608 
 609       assert(young_gen->to_space()->is_empty(), "to space should be empty now");
 610     }
 611 
 612     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 613 
 614     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 615 
 616     CodeCache::prune_scavenge_root_nmethods();
 617 
 618     // Re-verify object start arrays
 619     if (VerifyObjectStartArray &&
 620         VerifyAfterGC) {
 621       old_gen->verify_object_start_array();
 622     }
 623 
 624     // Verify all old -> young cards are now precise
 625     if (VerifyRememberedSets) {
 626       // Precise verification will give false positives. Until this is fixed,
 627       // use imprecise verification.
 628       // CardTableExtension::verify_all_young_refs_precise();
 629       CardTableExtension::verify_all_young_refs_imprecise();
 630     }
 631 
 632     if (TraceGen0Time) accumulated_time()->stop();
 633 
 634     if (PrintGC) {
 635       if (PrintGCDetails) {
 636         // Don't print a GC timestamp here.  This is after the GC so
 637         // would be confusing.
 638         young_gen->print_used_change(young_gen_used_before);
 639       }
 640       heap->print_heap_change(prev_used);
 641     }
 642 
 643     // Track memory usage and detect low memory
 644     MemoryService::track_memory_usage();
 645     heap->update_counters();
 646 
 647     gc_task_manager()->release_idle_workers();
 648   }
 649 
 650   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 651     HandleMark hm;  // Discard invalid handles created during verification
 652     Universe::verify(" VerifyAfterGC:");
 653   }
 654 
 655   heap->print_heap_after_gc();
 656 
 657   if (ZapUnusedHeapArea) {
 658     young_gen->eden_space()->check_mangled_unused_area_complete();
 659     young_gen->from_space()->check_mangled_unused_area_complete();
 660     young_gen->to_space()->check_mangled_unused_area_complete();
 661   }
 662 
 663   scavenge_exit.update();
 664 
 665   if (PrintGCTaskTimeStamps) {
 666     tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
 667                   scavenge_entry.ticks(), scavenge_midpoint.ticks(),
 668                   scavenge_exit.ticks());
 669     gc_task_manager()->print_task_time_stamps();
 670   }
 671 
 672 #ifdef TRACESPINNING
 673   ParallelTaskTerminator::print_termination_counts();
 674 #endif
 675 
 676   return !promotion_failure_occurred;
 677 }
 678 
 679 // This method iterates over all objects in the young generation,
 680 // unforwarding markOops. It then restores any preserved mark oops,
 681 // and clears the _preserved_mark_stack.
 682 void PSScavenge::clean_up_failed_promotion() {
 683   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 684   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 685   assert(promotion_failed(), "Sanity");
 686 
 687   PSYoungGen* young_gen = heap->young_gen();
 688 
 689   {
 690     ResourceMark rm;
 691 
 692     // Unforward all pointers in the young gen.
 693     PSPromotionFailedClosure unforward_closure;
 694     young_gen->object_iterate(&unforward_closure);
 695 
 696     if (PrintGC && Verbose) {
 697       gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
 698     }
 699 
 700     // Restore any saved marks.
 701     while (!_preserved_oop_stack.is_empty()) {
 702       oop obj      = _preserved_oop_stack.pop();
 703       markOop mark = _preserved_mark_stack.pop();
 704       obj->set_mark(mark);
 705     }
 706 
 707     // Clear the preserved mark and oop stack caches.
 708     _preserved_mark_stack.clear(true);
 709     _preserved_oop_stack.clear(true);
 710     _promotion_failed = false;
 711   }
 712 
 713   // Reset the PromotionFailureALot counters.
 714   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 715 }
 716 
 717 // This method is called whenever an attempt to promote an object
 718 // fails. Some markOops will need preservation, some will not. Note
 719 // that the entire eden is traversed after a failed promotion, with
 720 // all forwarded headers replaced by the default markOop. This means
 721 // it is not neccessary to preserve most markOops.
 722 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
 723   _promotion_failed = true;
 724   if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
 725     // Should use per-worker private stakcs hetre rather than
 726     // locking a common pair of stacks.
 727     ThreadCritical tc;
 728     _preserved_oop_stack.push(obj);
 729     _preserved_mark_stack.push(obj_mark);
 730   }
 731 }
 732 
 733 bool PSScavenge::should_attempt_scavenge() {
 734   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 735   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 736   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 737 
 738   if (UsePerfData) {
 739     counters->update_scavenge_skipped(not_skipped);
 740   }
 741 
 742   PSYoungGen* young_gen = heap->young_gen();
 743   PSOldGen* old_gen = heap->old_gen();
 744 
 745   if (!ScavengeWithObjectsInToSpace) {
 746     // Do not attempt to promote unless to_space is empty
 747     if (!young_gen->to_space()->is_empty()) {
 748       _consecutive_skipped_scavenges++;
 749       if (UsePerfData) {
 750         counters->update_scavenge_skipped(to_space_not_empty);
 751       }
 752       return false;
 753     }
 754   }
 755 
 756   // Test to see if the scavenge will likely fail.
 757   PSAdaptiveSizePolicy* policy = heap->size_policy();
 758 
 759   // A similar test is done in the policy's should_full_GC().  If this is
 760   // changed, decide if that test should also be changed.
 761   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
 762   size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
 763   bool result = promotion_estimate < old_gen->free_in_bytes();
 764 
 765   if (PrintGCDetails && Verbose) {
 766     gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
 767     gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
 768       " padded_average_promoted " SIZE_FORMAT
 769       " free in old gen " SIZE_FORMAT,
 770       (size_t) policy->average_promoted_in_bytes(),
 771       (size_t) policy->padded_average_promoted_in_bytes(),
 772       old_gen->free_in_bytes());
 773     if (young_gen->used_in_bytes() <
 774         (size_t) policy->padded_average_promoted_in_bytes()) {
 775       gclog_or_tty->print_cr(" padded_promoted_average is greater"
 776         " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
 777     }
 778   }
 779 
 780   if (result) {
 781     _consecutive_skipped_scavenges = 0;
 782   } else {
 783     _consecutive_skipped_scavenges++;
 784     if (UsePerfData) {
 785       counters->update_scavenge_skipped(promoted_too_large);
 786     }
 787   }
 788   return result;
 789 }
 790 
 791   // Used to add tasks
 792 GCTaskManager* const PSScavenge::gc_task_manager() {
 793   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
 794    "shouldn't return NULL");
 795   return ParallelScavengeHeap::gc_task_manager();
 796 }
 797 
 798 void PSScavenge::initialize() {
 799   // Arguments must have been parsed
 800 
 801   if (AlwaysTenure) {
 802     _tenuring_threshold = 0;
 803   } else if (NeverTenure) {
 804     _tenuring_threshold = markOopDesc::max_age + 1;
 805   } else {
 806     // We want to smooth out our startup times for the AdaptiveSizePolicy
 807     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
 808                                                     MaxTenuringThreshold;
 809   }
 810 
 811   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 812   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 813 
 814   PSYoungGen* young_gen = heap->young_gen();
 815   PSOldGen* old_gen = heap->old_gen();
 816 
 817   // Set boundary between young_gen and old_gen
 818   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
 819          "old above young");
 820   _young_generation_boundary = young_gen->eden_space()->bottom();
 821 
 822   // Initialize ref handling object for scavenging.
 823   MemRegion mr = young_gen->reserved();
 824 
 825   _ref_processor =
 826     new ReferenceProcessor(mr,                         // span
 827                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
 828                            (int) ParallelGCThreads,    // mt processing degree
 829                            true,                       // mt discovery
 830                            (int) ParallelGCThreads,    // mt discovery degree
 831                            true,                       // atomic_discovery
 832                            NULL,                       // header provides liveness info
 833                            false);                     // next field updates do not need write barrier
 834 
 835   // Cache the cardtable
 836   BarrierSet* bs = Universe::heap()->barrier_set();
 837   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
 838   _card_table = (CardTableExtension*)bs;
 839 
 840   _counters = new CollectorCounters("PSScavenge", 0);
 841 }