1 /*
   2  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
  29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  33 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  34 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
  35 #include "gc_implementation/parallelScavenge/psTasks.hpp"
  36 #include "gc_implementation/shared/gcHeapSummary.hpp"
  37 #include "gc_implementation/shared/gcTimer.hpp"
  38 #include "gc_implementation/shared/gcTrace.hpp"
  39 #include "gc_implementation/shared/gcTraceTime.hpp"
  40 #include "gc_implementation/shared/isGCActiveMark.hpp"
  41 #include "gc_implementation/shared/spaceDecorator.hpp"
  42 #include "gc_interface/gcCause.hpp"
  43 #include "memory/collectorPolicy.hpp"
  44 #include "memory/gcLocker.inline.hpp"
  45 #include "memory/referencePolicy.hpp"
  46 #include "memory/referenceProcessor.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "oops/oop.psgc.inline.hpp"
  50 #include "runtime/biasedLocking.hpp"
  51 #include "runtime/fprofiler.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/threadCritical.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "runtime/vm_operations.hpp"
  56 #include "services/memoryService.hpp"
  57 #include "utilities/stack.inline.hpp"
  58 
  59 
  60 HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
  61 int                        PSScavenge::_consecutive_skipped_scavenges = 0;
  62 ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
  63 CardTableExtension*        PSScavenge::_card_table = NULL;
  64 bool                       PSScavenge::_survivor_overflow = false;
  65 uint                       PSScavenge::_tenuring_threshold = 0;
  66 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
  67 uintptr_t                  PSScavenge::_young_generation_boundary_compressed = 0;
  68 elapsedTimer               PSScavenge::_accumulated_time;
  69 STWGCTimer                 PSScavenge::_gc_timer;
  70 ParallelScavengeTracer     PSScavenge::_gc_tracer;
  71 Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
  72 Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
  73 CollectorCounters*         PSScavenge::_counters = NULL;
  74 
  75 // Define before use
  76 class PSIsAliveClosure: public BoolObjectClosure {
  77 public:
  78   bool do_object_b(oop p) {
  79     return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
  80   }
  81 };
  82 
  83 PSIsAliveClosure PSScavenge::_is_alive_closure;
  84 
  85 class PSKeepAliveClosure: public OopClosure {
  86 protected:
  87   MutableSpace* _to_space;
  88   PSPromotionManager* _promotion_manager;
  89 
  90 public:
  91   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
  92     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  93     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  94     _to_space = heap->young_gen()->to_space();
  95 
  96     assert(_promotion_manager != NULL, "Sanity");
  97   }
  98 
  99   template <class T> void do_oop_work(T* p) {
 100     assert (!oopDesc::is_null(*p), "expected non-null ref");
 101     assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
 102             "expected an oop while scanning weak refs");
 103 
 104     // Weak refs may be visited more than once.
 105     if (PSScavenge::should_scavenge(p, _to_space)) {
 106       PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p);
 107     }
 108   }
 109   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
 110   virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
 111 };
 112 
 113 class PSEvacuateFollowersClosure: public VoidClosure {
 114  private:
 115   PSPromotionManager* _promotion_manager;
 116  public:
 117   PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
 118 
 119   virtual void do_void() {
 120     assert(_promotion_manager != NULL, "Sanity");
 121     _promotion_manager->drain_stacks(true);
 122     guarantee(_promotion_manager->stacks_empty(),
 123               "stacks should be empty at this point");
 124   }
 125 };
 126 
 127 class PSPromotionFailedClosure : public ObjectClosure {
 128   virtual void do_object(oop obj) {
 129     if (obj->is_forwarded()) {
 130       obj->init_mark();
 131     }
 132   }
 133 };
 134 
 135 class PSRefProcTaskProxy: public GCTask {
 136   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 137   ProcessTask & _rp_task;
 138   uint          _work_id;
 139 public:
 140   PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
 141     : _rp_task(rp_task),
 142       _work_id(work_id)
 143   { }
 144 
 145 private:
 146   virtual char* name() { return (char *)"Process referents by policy in parallel"; }
 147   virtual void do_it(GCTaskManager* manager, uint which);
 148 };
 149 
 150 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
 151 {
 152   PSPromotionManager* promotion_manager =
 153     PSPromotionManager::gc_thread_promotion_manager(which);
 154   assert(promotion_manager != NULL, "sanity check");
 155   PSKeepAliveClosure keep_alive(promotion_manager);
 156   PSEvacuateFollowersClosure evac_followers(promotion_manager);
 157   PSIsAliveClosure is_alive;
 158   _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
 159 }
 160 
 161 class PSRefEnqueueTaskProxy: public GCTask {
 162   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 163   EnqueueTask& _enq_task;
 164   uint         _work_id;
 165 
 166 public:
 167   PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
 168     : _enq_task(enq_task),
 169       _work_id(work_id)
 170   { }
 171 
 172   virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
 173   virtual void do_it(GCTaskManager* manager, uint which)
 174   {
 175     _enq_task.work(_work_id);
 176   }
 177 };
 178 
 179 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 180   virtual void execute(ProcessTask& task);
 181   virtual void execute(EnqueueTask& task);
 182 };
 183 
 184 void PSRefProcTaskExecutor::execute(ProcessTask& task)
 185 {
 186   GCTaskQueue* q = GCTaskQueue::create();
 187   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
 188   for(uint i=0; i < manager->active_workers(); i++) {
 189     q->enqueue(new PSRefProcTaskProxy(task, i));
 190   }
 191   ParallelTaskTerminator terminator(manager->active_workers(),
 192                  (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
 193   if (task.marks_oops_alive() && manager->active_workers() > 1) {
 194     for (uint j = 0; j < manager->active_workers(); j++) {
 195       q->enqueue(new StealTask(&terminator));
 196     }
 197   }
 198   manager->execute_and_wait(q);
 199 }
 200 
 201 
 202 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
 203 {
 204   GCTaskQueue* q = GCTaskQueue::create();
 205   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
 206   for(uint i=0; i < manager->active_workers(); i++) {
 207     q->enqueue(new PSRefEnqueueTaskProxy(task, i));
 208   }
 209   manager->execute_and_wait(q);
 210 }
 211 
 212 // This method contains all heap specific policy for invoking scavenge.
 213 // PSScavenge::invoke_no_policy() will do nothing but attempt to
 214 // scavenge. It will not clean up after failed promotions, bail out if
 215 // we've exceeded policy time limits, or any other special behavior.
 216 // All such policy should be placed here.
 217 //
 218 // Note that this method should only be called from the vm_thread while
 219 // at a safepoint!
 220 bool PSScavenge::invoke() {
 221   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 222   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 223   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 224 
 225   ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
 226   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 227 
 228   PSAdaptiveSizePolicy* policy = heap->size_policy();
 229   IsGCActiveMark mark;
 230 
 231   const bool scavenge_done = PSScavenge::invoke_no_policy();
 232   const bool need_full_gc = !scavenge_done ||
 233     policy->should_full_GC(heap->old_gen()->free_in_bytes());
 234   bool full_gc_done = false;
 235 
 236   if (UsePerfData) {
 237     PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
 238     const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
 239     counters->update_full_follows_scavenge(ffs_val);
 240   }
 241 
 242   if (need_full_gc) {
 243     GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
 244     CollectorPolicy* cp = heap->collector_policy();
 245     const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
 246 
 247     if (UseParallelOldGC) {
 248       full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
 249     } else {
 250       full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
 251     }
 252   }
 253 
 254   return full_gc_done;
 255 }
 256 
 257 // This method contains no policy. You should probably
 258 // be calling invoke() instead.
 259 bool PSScavenge::invoke_no_policy() {
 260   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 261   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 262 
 263   assert(_preserved_mark_stack.is_empty(), "should be empty");
 264   assert(_preserved_oop_stack.is_empty(), "should be empty");
 265 
 266   _gc_timer.register_gc_start(os::elapsed_counter());
 267 
 268   TimeStamp scavenge_entry;
 269   TimeStamp scavenge_midpoint;
 270   TimeStamp scavenge_exit;
 271 
 272   scavenge_entry.update();
 273 
 274   if (GC_locker::check_active_before_gc()) {
 275     return false;
 276   }
 277 
 278   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 279   GCCause::Cause gc_cause = heap->gc_cause();
 280   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 281 
 282   // Check for potential problems.
 283   if (!should_attempt_scavenge()) {
 284     return false;
 285   }
 286 
 287   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 288 
 289   bool promotion_failure_occurred = false;
 290 
 291   PSYoungGen* young_gen = heap->young_gen();
 292   PSOldGen* old_gen = heap->old_gen();
 293   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 294 
 295   heap->increment_total_collections();
 296 
 297   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 298 
 299   if ((gc_cause != GCCause::_java_lang_system_gc) ||
 300        UseAdaptiveSizePolicyWithSystemGC) {
 301     // Gather the feedback data for eden occupancy.
 302     young_gen->eden_space()->accumulate_statistics();
 303   }
 304 
 305   if (ZapUnusedHeapArea) {
 306     // Save information needed to minimize mangling
 307     heap->record_gen_tops_before_GC();
 308   }
 309 
 310   heap->print_heap_before_gc();
 311   heap->trace_heap_before_gc(&_gc_tracer);
 312 
 313   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
 314   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
 315 
 316   size_t prev_used = heap->used();
 317 
 318   // Fill in TLABs
 319   heap->accumulate_statistics_all_tlabs();
 320   heap->ensure_parsability(true);  // retire TLABs
 321 
 322   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 323     HandleMark hm;  // Discard invalid handles created during verification
 324     Universe::verify(" VerifyBeforeGC:");
 325   }
 326 
 327   {
 328     ResourceMark rm;
 329     HandleMark hm;
 330 
 331     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
 332     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
 333     GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
 334     TraceCollectorStats tcs(counters());
 335     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 336 
 337     if (TraceGen0Time) accumulated_time()->start();
 338 
 339     // Let the size policy know we're starting
 340     size_policy->minor_collection_begin();
 341 
 342     // Verify the object start arrays.
 343     if (VerifyObjectStartArray &&
 344         VerifyBeforeGC) {
 345       old_gen->verify_object_start_array();
 346     }
 347 
 348     // Verify no unmarked old->young roots
 349     if (VerifyRememberedSets) {
 350       CardTableExtension::verify_all_young_refs_imprecise();
 351     }
 352 
 353     if (!ScavengeWithObjectsInToSpace) {
 354       assert(young_gen->to_space()->is_empty(),
 355              "Attempt to scavenge with live objects in to_space");
 356       young_gen->to_space()->clear(SpaceDecorator::Mangle);
 357     } else if (ZapUnusedHeapArea) {
 358       young_gen->to_space()->mangle_unused_area();
 359     }
 360     save_to_space_top_before_gc();
 361 
 362     COMPILER2_PRESENT(DerivedPointerTable::clear());
 363 
 364     reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 365     reference_processor()->setup_policy(false);
 366 
 367     // We track how much was promoted to the next generation for
 368     // the AdaptiveSizePolicy.
 369     size_t old_gen_used_before = old_gen->used_in_bytes();
 370 
 371     // For PrintGCDetails
 372     size_t young_gen_used_before = young_gen->used_in_bytes();
 373 
 374     // Reset our survivor overflow.
 375     set_survivor_overflow(false);
 376 
 377     // We need to save the old top values before
 378     // creating the promotion_manager. We pass the top
 379     // values to the card_table, to prevent it from
 380     // straying into the promotion labs.
 381     HeapWord* old_top = old_gen->object_space()->top();
 382 
 383     // Release all previously held resources
 384     gc_task_manager()->release_all_resources();
 385 
 386     // Set the number of GC threads to be used in this collection
 387     gc_task_manager()->set_active_gang();
 388     gc_task_manager()->task_idle_workers();
 389     // Get the active number of workers here and use that value
 390     // throughout the methods.
 391     uint active_workers = gc_task_manager()->active_workers();
 392     heap->set_par_threads(active_workers);
 393 
 394     PSPromotionManager::pre_scavenge();
 395 
 396     // We'll use the promotion manager again later.
 397     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
 398     {
 399       GCTraceTime tm("Scavenge", false, false, &_gc_timer);
 400       ParallelScavengeHeap::ParStrongRootsScope psrs;
 401 
 402       GCTaskQueue* q = GCTaskQueue::create();
 403 
 404       if (!old_gen->object_space()->is_empty()) {
 405         // There are only old-to-young pointers if there are objects
 406         // in the old gen.
 407         uint stripe_total = active_workers;
 408         for(uint i=0; i < stripe_total; i++) {
 409           q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
 410         }
 411       }
 412 
 413       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
 414       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
 415       // We scan the thread roots in parallel
 416       Threads::create_thread_roots_tasks(q);
 417       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
 418       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
 419       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
 420       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
 421       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
 422       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
 423       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
 424 
 425       ParallelTaskTerminator terminator(
 426         active_workers,
 427                   (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
 428       if (active_workers > 1) {
 429         for (uint j = 0; j < active_workers; j++) {
 430           q->enqueue(new StealTask(&terminator));
 431         }
 432       }
 433 
 434       gc_task_manager()->execute_and_wait(q);
 435     }
 436 
 437     scavenge_midpoint.update();
 438 
 439     // Process reference objects discovered during scavenge
 440     {
 441       GCTraceTime tm("References", false, false, &_gc_timer);
 442 
 443       reference_processor()->setup_policy(false); // not always_clear
 444       reference_processor()->set_active_mt_degree(active_workers);
 445       PSKeepAliveClosure keep_alive(promotion_manager);
 446       PSEvacuateFollowersClosure evac_followers(promotion_manager);
 447       ReferenceProcessorStats stats;
 448       if (reference_processor()->processing_is_mt()) {
 449         PSRefProcTaskExecutor task_executor;
 450         stats = reference_processor()->process_discovered_references(
 451           &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
 452           &_gc_timer);
 453       } else {
 454         stats = reference_processor()->process_discovered_references(
 455           &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
 456       }
 457 
 458       _gc_tracer.report_gc_reference_stats(stats);
 459 
 460       // Enqueue reference objects discovered during scavenge.
 461       if (reference_processor()->processing_is_mt()) {
 462         PSRefProcTaskExecutor task_executor;
 463         reference_processor()->enqueue_discovered_references(&task_executor);
 464       } else {
 465         reference_processor()->enqueue_discovered_references(NULL);
 466       }
 467     }
 468 
 469     GCTraceTime tm("StringTable", false, false, &_gc_timer);
 470     // Unlink any dead interned Strings and process the remaining live ones.
 471     PSScavengeRootsClosure root_closure(promotion_manager);
 472     StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
 473 
 474     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
 475     promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
 476     if (promotion_failure_occurred) {
 477       clean_up_failed_promotion();
 478       if (PrintGC) {
 479         gclog_or_tty->print("--");
 480       }
 481     }
 482 
 483     // Let the size policy know we're done.  Note that we count promotion
 484     // failure cleanup time as part of the collection (otherwise, we're
 485     // implicitly saying it's mutator time).
 486     size_policy->minor_collection_end(gc_cause);
 487 
 488     if (!promotion_failure_occurred) {
 489       // Swap the survivor spaces.
 490       young_gen->eden_space()->clear(SpaceDecorator::Mangle);
 491       young_gen->from_space()->clear(SpaceDecorator::Mangle);
 492       young_gen->swap_spaces();
 493 
 494       size_t survived = young_gen->from_space()->used_in_bytes();
 495       size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
 496       size_policy->update_averages(_survivor_overflow, survived, promoted);
 497 
 498       // A successful scavenge should restart the GC time limit count which is
 499       // for full GC's.
 500       size_policy->reset_gc_overhead_limit_count();
 501       if (UseAdaptiveSizePolicy) {
 502         // Calculate the new survivor size and tenuring threshold
 503 
 504         if (PrintAdaptiveSizePolicy) {
 505           gclog_or_tty->print("AdaptiveSizeStart: ");
 506           gclog_or_tty->stamp();
 507           gclog_or_tty->print_cr(" collection: %d ",
 508                          heap->total_collections());
 509 
 510           if (Verbose) {
 511             gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
 512               old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 513           }
 514         }
 515 
 516 
 517         if (UsePerfData) {
 518           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 519           counters->update_old_eden_size(
 520             size_policy->calculated_eden_size_in_bytes());
 521           counters->update_old_promo_size(
 522             size_policy->calculated_promo_size_in_bytes());
 523           counters->update_old_capacity(old_gen->capacity_in_bytes());
 524           counters->update_young_capacity(young_gen->capacity_in_bytes());
 525           counters->update_survived(survived);
 526           counters->update_promoted(promoted);
 527           counters->update_survivor_overflowed(_survivor_overflow);
 528         }
 529 
 530         size_t survivor_limit =
 531           size_policy->max_survivor_size(young_gen->max_size());
 532         _tenuring_threshold =
 533           size_policy->compute_survivor_space_size_and_threshold(
 534                                                            _survivor_overflow,
 535                                                            _tenuring_threshold,
 536                                                            survivor_limit);
 537 
 538        if (PrintTenuringDistribution) {
 539          gclog_or_tty->cr();
 540          gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
 541                                 size_policy->calculated_survivor_size_in_bytes(),
 542                                 _tenuring_threshold, MaxTenuringThreshold);
 543        }
 544 
 545         if (UsePerfData) {
 546           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 547           counters->update_tenuring_threshold(_tenuring_threshold);
 548           counters->update_survivor_size_counters();
 549         }
 550 
 551         // Do call at minor collections?
 552         // Don't check if the size_policy is ready at this
 553         // level.  Let the size_policy check that internally.
 554         if (UseAdaptiveSizePolicy &&
 555             UseAdaptiveGenerationSizePolicyAtMinorCollection &&
 556             ((gc_cause != GCCause::_java_lang_system_gc) ||
 557               UseAdaptiveSizePolicyWithSystemGC)) {
 558 
 559           // Calculate optimial free space amounts
 560           assert(young_gen->max_size() >
 561             young_gen->from_space()->capacity_in_bytes() +
 562             young_gen->to_space()->capacity_in_bytes(),
 563             "Sizes of space in young gen are out-of-bounds");
 564 
 565           size_t young_live = young_gen->used_in_bytes();
 566           size_t eden_live = young_gen->eden_space()->used_in_bytes();
 567           size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
 568           size_t max_old_gen_size = old_gen->max_gen_size();
 569           size_t max_eden_size = young_gen->max_size() -
 570             young_gen->from_space()->capacity_in_bytes() -
 571             young_gen->to_space()->capacity_in_bytes();
 572 
 573           // Used for diagnostics
 574           size_policy->clear_generation_free_space_flags();
 575 
 576           size_policy->compute_eden_space_size(young_live,
 577                                                eden_live,
 578                                                cur_eden,
 579                                                max_eden_size,
 580                                                false /* not full gc*/);
 581 
 582           size_policy->check_gc_overhead_limit(young_live,
 583                                                eden_live,
 584                                                max_old_gen_size,
 585                                                max_eden_size,
 586                                                false /* not full gc*/,
 587                                                gc_cause,
 588                                                heap->collector_policy());
 589 
 590           size_policy->decay_supplemental_growth(false /* not full gc*/);
 591         }
 592         // Resize the young generation at every collection
 593         // even if new sizes have not been calculated.  This is
 594         // to allow resizes that may have been inhibited by the
 595         // relative location of the "to" and "from" spaces.
 596 
 597         // Resizing the old gen at minor collects can cause increases
 598         // that don't feed back to the generation sizing policy until
 599         // a major collection.  Don't resize the old gen here.
 600 
 601         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 602                         size_policy->calculated_survivor_size_in_bytes());
 603 
 604         if (PrintAdaptiveSizePolicy) {
 605           gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
 606                          heap->total_collections());
 607         }
 608       }
 609 
 610       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
 611       // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
 612       // Also update() will case adaptive NUMA chunk resizing.
 613       assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
 614       young_gen->eden_space()->update();
 615 
 616       heap->gc_policy_counters()->update_counters();
 617 
 618       heap->resize_all_tlabs();
 619 
 620       assert(young_gen->to_space()->is_empty(), "to space should be empty now");
 621     }
 622 
 623     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 624 
 625     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 626 
 627     {
 628       GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
 629 
 630       CodeCache::prune_scavenge_root_nmethods();
 631     }
 632 
 633     // Re-verify object start arrays
 634     if (VerifyObjectStartArray &&
 635         VerifyAfterGC) {
 636       old_gen->verify_object_start_array();
 637     }
 638 
 639     // Verify all old -> young cards are now precise
 640     if (VerifyRememberedSets) {
 641       // Precise verification will give false positives. Until this is fixed,
 642       // use imprecise verification.
 643       // CardTableExtension::verify_all_young_refs_precise();
 644       CardTableExtension::verify_all_young_refs_imprecise();
 645     }
 646 
 647     if (TraceGen0Time) accumulated_time()->stop();
 648 
 649     if (PrintGC) {
 650       if (PrintGCDetails) {
 651         // Don't print a GC timestamp here.  This is after the GC so
 652         // would be confusing.
 653         young_gen->print_used_change(young_gen_used_before);
 654       }
 655       heap->print_heap_change(prev_used);
 656     }
 657 
 658     // Track memory usage and detect low memory
 659     MemoryService::track_memory_usage();
 660     heap->update_counters();
 661 
 662     gc_task_manager()->release_idle_workers();
 663   }
 664 
 665   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 666     HandleMark hm;  // Discard invalid handles created during verification
 667     Universe::verify(" VerifyAfterGC:");
 668   }
 669 
 670   heap->print_heap_after_gc();
 671   heap->trace_heap_after_gc(&_gc_tracer);
 672   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
 673 
 674   if (ZapUnusedHeapArea) {
 675     young_gen->eden_space()->check_mangled_unused_area_complete();
 676     young_gen->from_space()->check_mangled_unused_area_complete();
 677     young_gen->to_space()->check_mangled_unused_area_complete();
 678   }
 679 
 680   scavenge_exit.update();
 681 
 682   if (PrintGCTaskTimeStamps) {
 683     tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
 684                   scavenge_entry.ticks(), scavenge_midpoint.ticks(),
 685                   scavenge_exit.ticks());
 686     gc_task_manager()->print_task_time_stamps();
 687   }
 688 
 689 #ifdef TRACESPINNING
 690   ParallelTaskTerminator::print_termination_counts();
 691 #endif
 692 
 693 
 694   _gc_timer.register_gc_end(os::elapsed_counter());
 695 
 696   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
 697 
 698   return !promotion_failure_occurred;
 699 }
 700 
 701 // This method iterates over all objects in the young generation,
 702 // unforwarding markOops. It then restores any preserved mark oops,
 703 // and clears the _preserved_mark_stack.
 704 void PSScavenge::clean_up_failed_promotion() {
 705   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 706   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 707 
 708   PSYoungGen* young_gen = heap->young_gen();
 709 
 710   {
 711     ResourceMark rm;
 712 
 713     // Unforward all pointers in the young gen.
 714     PSPromotionFailedClosure unforward_closure;
 715     young_gen->object_iterate(&unforward_closure);
 716 
 717     if (PrintGC && Verbose) {
 718       gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
 719     }
 720 
 721     // Restore any saved marks.
 722     while (!_preserved_oop_stack.is_empty()) {
 723       oop obj      = _preserved_oop_stack.pop();
 724       markOop mark = _preserved_mark_stack.pop();
 725       obj->set_mark(mark);
 726     }
 727 
 728     // Clear the preserved mark and oop stack caches.
 729     _preserved_mark_stack.clear(true);
 730     _preserved_oop_stack.clear(true);
 731   }
 732 
 733   // Reset the PromotionFailureALot counters.
 734   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 735 }
 736 
 737 // This method is called whenever an attempt to promote an object
 738 // fails. Some markOops will need preservation, some will not. Note
 739 // that the entire eden is traversed after a failed promotion, with
 740 // all forwarded headers replaced by the default markOop. This means
 741 // it is not necessary to preserve most markOops.
 742 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
 743   if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
 744     // Should use per-worker private stacks here rather than
 745     // locking a common pair of stacks.
 746     ThreadCritical tc;
 747     _preserved_oop_stack.push(obj);
 748     _preserved_mark_stack.push(obj_mark);
 749   }
 750 }
 751 
 752 bool PSScavenge::should_attempt_scavenge() {
 753   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 754   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 755   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 756 
 757   if (UsePerfData) {
 758     counters->update_scavenge_skipped(not_skipped);
 759   }
 760 
 761   PSYoungGen* young_gen = heap->young_gen();
 762   PSOldGen* old_gen = heap->old_gen();
 763 
 764   if (!ScavengeWithObjectsInToSpace) {
 765     // Do not attempt to promote unless to_space is empty
 766     if (!young_gen->to_space()->is_empty()) {
 767       _consecutive_skipped_scavenges++;
 768       if (UsePerfData) {
 769         counters->update_scavenge_skipped(to_space_not_empty);
 770       }
 771       return false;
 772     }
 773   }
 774 
 775   // Test to see if the scavenge will likely fail.
 776   PSAdaptiveSizePolicy* policy = heap->size_policy();
 777 
 778   // A similar test is done in the policy's should_full_GC().  If this is
 779   // changed, decide if that test should also be changed.
 780   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
 781   size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
 782   bool result = promotion_estimate < old_gen->free_in_bytes();
 783 
 784   if (PrintGCDetails && Verbose) {
 785     gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
 786     gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
 787       " padded_average_promoted " SIZE_FORMAT
 788       " free in old gen " SIZE_FORMAT,
 789       (size_t) policy->average_promoted_in_bytes(),
 790       (size_t) policy->padded_average_promoted_in_bytes(),
 791       old_gen->free_in_bytes());
 792     if (young_gen->used_in_bytes() <
 793         (size_t) policy->padded_average_promoted_in_bytes()) {
 794       gclog_or_tty->print_cr(" padded_promoted_average is greater"
 795         " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
 796     }
 797   }
 798 
 799   if (result) {
 800     _consecutive_skipped_scavenges = 0;
 801   } else {
 802     _consecutive_skipped_scavenges++;
 803     if (UsePerfData) {
 804       counters->update_scavenge_skipped(promoted_too_large);
 805     }
 806   }
 807   return result;
 808 }
 809 
 810   // Used to add tasks
 811 GCTaskManager* const PSScavenge::gc_task_manager() {
 812   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
 813    "shouldn't return NULL");
 814   return ParallelScavengeHeap::gc_task_manager();
 815 }
 816 
 817 void PSScavenge::initialize() {
 818   // Arguments must have been parsed
 819 
 820   if (AlwaysTenure) {
 821     _tenuring_threshold = 0;
 822   } else if (NeverTenure) {
 823     _tenuring_threshold = markOopDesc::max_age + 1;
 824   } else {
 825     // We want to smooth out our startup times for the AdaptiveSizePolicy
 826     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
 827                                                     MaxTenuringThreshold;
 828   }
 829 
 830   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 831   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 832 
 833   PSYoungGen* young_gen = heap->young_gen();
 834   PSOldGen* old_gen = heap->old_gen();
 835 
 836   // Set boundary between young_gen and old_gen
 837   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
 838          "old above young");
 839   set_young_generation_boundary(young_gen->eden_space()->bottom());
 840 
 841   // Initialize ref handling object for scavenging.
 842   MemRegion mr = young_gen->reserved();
 843 
 844   _ref_processor =
 845     new ReferenceProcessor(mr,                         // span
 846                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
 847                            (int) ParallelGCThreads,    // mt processing degree
 848                            true,                       // mt discovery
 849                            (int) ParallelGCThreads,    // mt discovery degree
 850                            true,                       // atomic_discovery
 851                            NULL,                       // header provides liveness info
 852                            false);                     // next field updates do not need write barrier
 853 
 854   // Cache the cardtable
 855   BarrierSet* bs = Universe::heap()->barrier_set();
 856   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
 857   _card_table = (CardTableExtension*)bs;
 858 
 859   _counters = new CollectorCounters("PSScavenge", 0);
 860 }