1 /*
   2  * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/parallel/cardTableExtension.hpp"
  29 #include "gc/parallel/gcTaskManager.hpp"
  30 #include "gc/parallel/parallelScavengeHeap.hpp"
  31 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  32 #include "gc/parallel/psMarkSweep.hpp"
  33 #include "gc/parallel/psParallelCompact.inline.hpp"
  34 #include "gc/parallel/psScavenge.inline.hpp"
  35 #include "gc/parallel/psTasks.hpp"
  36 #include "gc/shared/collectorPolicy.hpp"
  37 #include "gc/shared/gcCause.hpp"
  38 #include "gc/shared/gcHeapSummary.hpp"
  39 #include "gc/shared/gcId.hpp"
  40 #include "gc/shared/gcLocker.inline.hpp"
  41 #include "gc/shared/gcTimer.hpp"
  42 #include "gc/shared/gcTrace.hpp"
  43 #include "gc/shared/gcTraceTime.inline.hpp"
  44 #include "gc/shared/isGCActiveMark.hpp"
  45 #include "gc/shared/referencePolicy.hpp"
  46 #include "gc/shared/referenceProcessor.hpp"
  47 #include "gc/shared/spaceDecorator.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "logging/log.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/biasedLocking.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 #include "runtime/threadCritical.hpp"
  54 #include "runtime/vmThread.hpp"
  55 #include "runtime/vm_operations.hpp"
  56 #include "services/memoryService.hpp"
  57 #include "utilities/stack.inline.hpp"
  58 
  59 HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
  60 int                        PSScavenge::_consecutive_skipped_scavenges = 0;
  61 ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
  62 CardTableExtension*        PSScavenge::_card_table = NULL;
  63 bool                       PSScavenge::_survivor_overflow = false;
  64 uint                       PSScavenge::_tenuring_threshold = 0;
  65 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
  66 uintptr_t                  PSScavenge::_young_generation_boundary_compressed = 0;
  67 elapsedTimer               PSScavenge::_accumulated_time;
  68 STWGCTimer                 PSScavenge::_gc_timer;
  69 ParallelScavengeTracer     PSScavenge::_gc_tracer;
  70 CollectorCounters*         PSScavenge::_counters = NULL;
  71 
  72 // Define before use
  73 class PSIsAliveClosure: public BoolObjectClosure {
  74 public:
  75   bool do_object_b(oop p) {
  76     return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
  77   }
  78 };
  79 
  80 PSIsAliveClosure PSScavenge::_is_alive_closure;
  81 
  82 class PSKeepAliveClosure: public OopClosure {
  83 protected:
  84   MutableSpace* _to_space;
  85   PSPromotionManager* _promotion_manager;
  86 
  87 public:
  88   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
  89     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  90     _to_space = heap->young_gen()->to_space();
  91 
  92     assert(_promotion_manager != NULL, "Sanity");
  93   }
  94 
  95   template <class T> void do_oop_work(T* p) {
  96     assert (!oopDesc::is_null(*p), "expected non-null ref");
  97     assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
  98             "expected an oop while scanning weak refs");
  99 
 100     // Weak refs may be visited more than once.
 101     if (PSScavenge::should_scavenge(p, _to_space)) {
 102       _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p);
 103     }
 104   }
 105   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
 106   virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
 107 };
 108 
 109 class PSEvacuateFollowersClosure: public VoidClosure {
 110  private:
 111   PSPromotionManager* _promotion_manager;
 112  public:
 113   PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
 114 
 115   virtual void do_void() {
 116     assert(_promotion_manager != NULL, "Sanity");
 117     _promotion_manager->drain_stacks(true);
 118     guarantee(_promotion_manager->stacks_empty(),
 119               "stacks should be empty at this point");
 120   }
 121 };
 122 
 123 class PSRefProcTaskProxy: public GCTask {
 124   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 125   ProcessTask & _rp_task;
 126   uint          _work_id;
 127 public:
 128   PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
 129     : _rp_task(rp_task),
 130       _work_id(work_id)
 131   { }
 132 
 133 private:
 134   virtual char* name() { return (char *)"Process referents by policy in parallel"; }
 135   virtual void do_it(GCTaskManager* manager, uint which);
 136 };
 137 
 138 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
 139 {
 140   PSPromotionManager* promotion_manager =
 141     PSPromotionManager::gc_thread_promotion_manager(which);
 142   assert(promotion_manager != NULL, "sanity check");
 143   PSKeepAliveClosure keep_alive(promotion_manager);
 144   PSEvacuateFollowersClosure evac_followers(promotion_manager);
 145   PSIsAliveClosure is_alive;
 146   _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
 147 }
 148 
 149 class PSRefEnqueueTaskProxy: public GCTask {
 150   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 151   EnqueueTask& _enq_task;
 152   uint         _work_id;
 153 
 154 public:
 155   PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
 156     : _enq_task(enq_task),
 157       _work_id(work_id)
 158   { }
 159 
 160   virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
 161   virtual void do_it(GCTaskManager* manager, uint which)
 162   {
 163     _enq_task.work(_work_id);
 164   }
 165 };
 166 
 167 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 168   virtual void execute(ProcessTask& task);
 169   virtual void execute(EnqueueTask& task);
 170 };
 171 
 172 void PSRefProcTaskExecutor::execute(ProcessTask& task)
 173 {
 174   GCTaskQueue* q = GCTaskQueue::create();
 175   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
 176   for(uint i=0; i < manager->active_workers(); i++) {
 177     q->enqueue(new PSRefProcTaskProxy(task, i));
 178   }
 179   ParallelTaskTerminator terminator(manager->active_workers(),
 180                  (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
 181   if (task.marks_oops_alive() && manager->active_workers() > 1) {
 182     for (uint j = 0; j < manager->active_workers(); j++) {
 183       q->enqueue(new StealTask(&terminator));
 184     }
 185   }
 186   manager->execute_and_wait(q);
 187 }
 188 
 189 
 190 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
 191 {
 192   GCTaskQueue* q = GCTaskQueue::create();
 193   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
 194   for(uint i=0; i < manager->active_workers(); i++) {
 195     q->enqueue(new PSRefEnqueueTaskProxy(task, i));
 196   }
 197   manager->execute_and_wait(q);
 198 }
 199 
 200 // This method contains all heap specific policy for invoking scavenge.
 201 // PSScavenge::invoke_no_policy() will do nothing but attempt to
 202 // scavenge. It will not clean up after failed promotions, bail out if
 203 // we've exceeded policy time limits, or any other special behavior.
 204 // All such policy should be placed here.
 205 //
 206 // Note that this method should only be called from the vm_thread while
 207 // at a safepoint!
 208 bool PSScavenge::invoke() {
 209   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 210   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 211   assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
 212 
 213   ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
 214   PSAdaptiveSizePolicy* policy = heap->size_policy();
 215   IsGCActiveMark mark;
 216 
 217   const bool scavenge_done = PSScavenge::invoke_no_policy();
 218   const bool need_full_gc = !scavenge_done ||
 219     policy->should_full_GC(heap->old_gen()->free_in_bytes());
 220   bool full_gc_done = false;
 221 
 222   if (UsePerfData) {
 223     PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
 224     const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
 225     counters->update_full_follows_scavenge(ffs_val);
 226   }
 227 
 228   if (need_full_gc) {
 229     GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
 230     CollectorPolicy* cp = heap->collector_policy();
 231     const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
 232 
 233     if (UseParallelOldGC) {
 234       full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
 235     } else {
 236       full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
 237     }
 238   }
 239 
 240   return full_gc_done;
 241 }
 242 
 243 // This method contains no policy. You should probably
 244 // be calling invoke() instead.
 245 bool PSScavenge::invoke_no_policy() {
 246   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 247   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 248 
 249   _gc_timer.register_gc_start();
 250 
 251   TimeStamp scavenge_entry;
 252   TimeStamp scavenge_midpoint;
 253   TimeStamp scavenge_exit;
 254 
 255   scavenge_entry.update();
 256 
 257   if (GCLocker::check_active_before_gc()) {
 258     return false;
 259   }
 260 
 261   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 262   GCCause::Cause gc_cause = heap->gc_cause();
 263 
 264   // Check for potential problems.
 265   if (!should_attempt_scavenge()) {
 266     return false;
 267   }
 268 
 269   GCIdMark gc_id_mark;
 270   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 271 
 272   bool promotion_failure_occurred = false;
 273 
 274   PSYoungGen* young_gen = heap->young_gen();
 275   PSOldGen* old_gen = heap->old_gen();
 276   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 277 
 278   heap->increment_total_collections();
 279 
 280   if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) {
 281     // Gather the feedback data for eden occupancy.
 282     young_gen->eden_space()->accumulate_statistics();
 283   }
 284 
 285   heap->print_heap_before_gc();
 286   heap->trace_heap_before_gc(&_gc_tracer);
 287 
 288   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
 289   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
 290 
 291   // Fill in TLABs
 292   heap->accumulate_statistics_all_tlabs();
 293   heap->ensure_parsability(true);  // retire TLABs
 294 
 295   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 296     HandleMark hm;  // Discard invalid handles created during verification
 297     Universe::verify("Before GC");
 298   }
 299 
 300   {
 301     ResourceMark rm;
 302     HandleMark hm;
 303 
 304     GCTraceCPUTime tcpu;
 305     GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true);
 306     TraceCollectorStats tcs(counters());
 307     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 308 
 309     if (TraceYoungGenTime) accumulated_time()->start();
 310 
 311     // Let the size policy know we're starting
 312     size_policy->minor_collection_begin();
 313 
 314     // Verify the object start arrays.
 315     if (VerifyObjectStartArray &&
 316         VerifyBeforeGC) {
 317       old_gen->verify_object_start_array();
 318     }
 319 
 320     // Verify no unmarked old->young roots
 321     if (VerifyRememberedSets) {
 322       CardTableExtension::verify_all_young_refs_imprecise();
 323     }
 324 
 325     assert(young_gen->to_space()->is_empty(),
 326            "Attempt to scavenge with live objects in to_space");
 327     young_gen->to_space()->clear(SpaceDecorator::Mangle);
 328 
 329     save_to_space_top_before_gc();
 330 
 331 #if defined(COMPILER2) || INCLUDE_JVMCI
 332     DerivedPointerTable::clear();
 333 #endif
 334 
 335     reference_processor()->enable_discovery();
 336     reference_processor()->setup_policy(false);
 337 
 338     PreGCValues pre_gc_values(heap);
 339 
 340     // Reset our survivor overflow.
 341     set_survivor_overflow(false);
 342 
 343     // We need to save the old top values before
 344     // creating the promotion_manager. We pass the top
 345     // values to the card_table, to prevent it from
 346     // straying into the promotion labs.
 347     HeapWord* old_top = old_gen->object_space()->top();
 348 
 349     // Release all previously held resources
 350     gc_task_manager()->release_all_resources();
 351 
 352     // Set the number of GC threads to be used in this collection
 353     gc_task_manager()->set_active_gang();
 354     gc_task_manager()->task_idle_workers();
 355     // Get the active number of workers here and use that value
 356     // throughout the methods.
 357     uint active_workers = gc_task_manager()->active_workers();
 358 
 359     PSPromotionManager::pre_scavenge();
 360 
 361     // We'll use the promotion manager again later.
 362     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
 363     {
 364       GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
 365       ParallelScavengeHeap::ParStrongRootsScope psrs;
 366 
 367       GCTaskQueue* q = GCTaskQueue::create();
 368 
 369       if (!old_gen->object_space()->is_empty()) {
 370         // There are only old-to-young pointers if there are objects
 371         // in the old gen.
 372         uint stripe_total = active_workers;
 373         for(uint i=0; i < stripe_total; i++) {
 374           q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
 375         }
 376       }
 377 
 378       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
 379       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
 380       // We scan the thread roots in parallel
 381       Threads::create_thread_roots_tasks(q);
 382       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
 383       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
 384       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
 385       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
 386       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
 387       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
 388 
 389       ParallelTaskTerminator terminator(
 390         active_workers,
 391                   (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
 392         // If active_workers can exceed 1, add a StrealTask.
 393         // PSPromotionManager::drain_stacks_depth() does not fully drain its
 394         // stacks and expects a StealTask to complete the draining if
 395         // ParallelGCThreads is > 1.
 396         if (gc_task_manager()->workers() > 1) {
 397           for (uint j = 0; j < active_workers; j++) {
 398             q->enqueue(new StealTask(&terminator));
 399           }
 400         }
 401 
 402       gc_task_manager()->execute_and_wait(q);
 403     }
 404 
 405     scavenge_midpoint.update();
 406 
 407     // Process reference objects discovered during scavenge
 408     {
 409       GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
 410 
 411       reference_processor()->setup_policy(false); // not always_clear
 412       reference_processor()->set_active_mt_degree(active_workers);
 413       PSKeepAliveClosure keep_alive(promotion_manager);
 414       PSEvacuateFollowersClosure evac_followers(promotion_manager);
 415       ReferenceProcessorStats stats;
 416       if (reference_processor()->processing_is_mt()) {
 417         PSRefProcTaskExecutor task_executor;
 418         stats = reference_processor()->process_discovered_references(
 419           &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
 420           &_gc_timer);
 421       } else {
 422         stats = reference_processor()->process_discovered_references(
 423           &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
 424       }
 425 
 426       _gc_tracer.report_gc_reference_stats(stats);
 427 
 428       // Enqueue reference objects discovered during scavenge.
 429       if (reference_processor()->processing_is_mt()) {
 430         PSRefProcTaskExecutor task_executor;
 431         reference_processor()->enqueue_discovered_references(&task_executor);
 432       } else {
 433         reference_processor()->enqueue_discovered_references(NULL);
 434       }
 435     }
 436 
 437     {
 438       GCTraceTime(Debug, gc, phases) tm("Scrub String Table", &_gc_timer);
 439       // Unlink any dead interned Strings and process the remaining live ones.
 440       PSScavengeRootsClosure root_closure(promotion_manager);
 441       StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
 442     }
 443 
 444     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
 445     promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
 446     if (promotion_failure_occurred) {
 447       clean_up_failed_promotion();
 448       log_info(gc, promotion)("Promotion failed");
 449     }
 450 
 451     _gc_tracer.report_tenuring_threshold(tenuring_threshold());
 452 
 453     // Let the size policy know we're done.  Note that we count promotion
 454     // failure cleanup time as part of the collection (otherwise, we're
 455     // implicitly saying it's mutator time).
 456     size_policy->minor_collection_end(gc_cause);
 457 
 458     if (!promotion_failure_occurred) {
 459       // Swap the survivor spaces.
 460       young_gen->eden_space()->clear(SpaceDecorator::Mangle);
 461       young_gen->from_space()->clear(SpaceDecorator::Mangle);
 462       young_gen->swap_spaces();
 463 
 464       size_t survived = young_gen->from_space()->used_in_bytes();
 465       size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used();
 466       size_policy->update_averages(_survivor_overflow, survived, promoted);
 467 
 468       // A successful scavenge should restart the GC time limit count which is
 469       // for full GC's.
 470       size_policy->reset_gc_overhead_limit_count();
 471       if (UseAdaptiveSizePolicy) {
 472         // Calculate the new survivor size and tenuring threshold
 473 
 474         log_debug(gc, ergo)("AdaptiveSizeStart:  collection: %d ", heap->total_collections());
 475         log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
 476                             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 477 
 478         if (UsePerfData) {
 479           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 480           counters->update_old_eden_size(
 481             size_policy->calculated_eden_size_in_bytes());
 482           counters->update_old_promo_size(
 483             size_policy->calculated_promo_size_in_bytes());
 484           counters->update_old_capacity(old_gen->capacity_in_bytes());
 485           counters->update_young_capacity(young_gen->capacity_in_bytes());
 486           counters->update_survived(survived);
 487           counters->update_promoted(promoted);
 488           counters->update_survivor_overflowed(_survivor_overflow);
 489         }
 490 
 491         size_t max_young_size = young_gen->max_size();
 492 
 493         // Deciding a free ratio in the young generation is tricky, so if
 494         // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
 495         // that the old generation size may have been limited because of them) we
 496         // should then limit our young generation size using NewRatio to have it
 497         // follow the old generation size.
 498         if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
 499           max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
 500         }
 501 
 502         size_t survivor_limit =
 503           size_policy->max_survivor_size(max_young_size);
 504         _tenuring_threshold =
 505           size_policy->compute_survivor_space_size_and_threshold(
 506                                                            _survivor_overflow,
 507                                                            _tenuring_threshold,
 508                                                            survivor_limit);
 509 
 510        log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold " UINTX_FORMAT ")",
 511                           size_policy->calculated_survivor_size_in_bytes(),
 512                           _tenuring_threshold, MaxTenuringThreshold);
 513 
 514         if (UsePerfData) {
 515           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 516           counters->update_tenuring_threshold(_tenuring_threshold);
 517           counters->update_survivor_size_counters();
 518         }
 519 
 520         // Do call at minor collections?
 521         // Don't check if the size_policy is ready at this
 522         // level.  Let the size_policy check that internally.
 523         if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
 524             (AdaptiveSizePolicy::should_update_eden_stats(gc_cause))) {
 525           // Calculate optimal free space amounts
 526           assert(young_gen->max_size() >
 527             young_gen->from_space()->capacity_in_bytes() +
 528             young_gen->to_space()->capacity_in_bytes(),
 529             "Sizes of space in young gen are out-of-bounds");
 530 
 531           size_t young_live = young_gen->used_in_bytes();
 532           size_t eden_live = young_gen->eden_space()->used_in_bytes();
 533           size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
 534           size_t max_old_gen_size = old_gen->max_gen_size();
 535           size_t max_eden_size = max_young_size -
 536             young_gen->from_space()->capacity_in_bytes() -
 537             young_gen->to_space()->capacity_in_bytes();
 538 
 539           // Used for diagnostics
 540           size_policy->clear_generation_free_space_flags();
 541 
 542           size_policy->compute_eden_space_size(young_live,
 543                                                eden_live,
 544                                                cur_eden,
 545                                                max_eden_size,
 546                                                false /* not full gc*/);
 547 
 548           size_policy->check_gc_overhead_limit(young_live,
 549                                                eden_live,
 550                                                max_old_gen_size,
 551                                                max_eden_size,
 552                                                false /* not full gc*/,
 553                                                gc_cause,
 554                                                heap->collector_policy());
 555 
 556           size_policy->decay_supplemental_growth(false /* not full gc*/);
 557         }
 558         // Resize the young generation at every collection
 559         // even if new sizes have not been calculated.  This is
 560         // to allow resizes that may have been inhibited by the
 561         // relative location of the "to" and "from" spaces.
 562 
 563         // Resizing the old gen at young collections can cause increases
 564         // that don't feed back to the generation sizing policy until
 565         // a full collection.  Don't resize the old gen here.
 566 
 567         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 568                         size_policy->calculated_survivor_size_in_bytes());
 569 
 570         log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
 571       }
 572 
 573       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
 574       // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
 575       // Also update() will case adaptive NUMA chunk resizing.
 576       assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
 577       young_gen->eden_space()->update();
 578 
 579       heap->gc_policy_counters()->update_counters();
 580 
 581       heap->resize_all_tlabs();
 582 
 583       assert(young_gen->to_space()->is_empty(), "to space should be empty now");
 584     }
 585 
 586 #if defined(COMPILER2) || INCLUDE_JVMCI
 587     DerivedPointerTable::update_pointers();
 588 #endif
 589 
 590     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 591 
 592     // Re-verify object start arrays
 593     if (VerifyObjectStartArray &&
 594         VerifyAfterGC) {
 595       old_gen->verify_object_start_array();
 596     }
 597 
 598     // Verify all old -> young cards are now precise
 599     if (VerifyRememberedSets) {
 600       // Precise verification will give false positives. Until this is fixed,
 601       // use imprecise verification.
 602       // CardTableExtension::verify_all_young_refs_precise();
 603       CardTableExtension::verify_all_young_refs_imprecise();
 604     }
 605 
 606     if (TraceYoungGenTime) accumulated_time()->stop();
 607 
 608     young_gen->print_used_change(pre_gc_values.young_gen_used());
 609     old_gen->print_used_change(pre_gc_values.old_gen_used());
 610     MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
 611 
 612     // Track memory usage and detect low memory
 613     MemoryService::track_memory_usage();
 614     heap->update_counters();
 615 
 616     gc_task_manager()->release_idle_workers();
 617   }
 618 
 619   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 620     HandleMark hm;  // Discard invalid handles created during verification
 621     Universe::verify("After GC");
 622   }
 623 
 624   heap->print_heap_after_gc();
 625   heap->trace_heap_after_gc(&_gc_tracer);
 626 
 627   scavenge_exit.update();
 628 
 629   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
 630                             scavenge_entry.ticks(), scavenge_midpoint.ticks(),
 631                             scavenge_exit.ticks());
 632   gc_task_manager()->print_task_time_stamps();
 633 
 634 #ifdef TRACESPINNING
 635   ParallelTaskTerminator::print_termination_counts();
 636 #endif
 637 
 638   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
 639 
 640   _gc_timer.register_gc_end();
 641 
 642   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
 643 
 644   return !promotion_failure_occurred;
 645 }
 646 
 647 // This method iterates over all objects in the young generation,
 648 // removing all forwarding references. It then restores any preserved marks.
 649 void PSScavenge::clean_up_failed_promotion() {
 650   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 651   PSYoungGen* young_gen = heap->young_gen();
 652 
 653   RemoveForwardedPointerClosure remove_fwd_ptr_closure;
 654   young_gen->object_iterate(&remove_fwd_ptr_closure);
 655 
 656   PSPromotionManager::restore_preserved_marks();
 657 
 658   // Reset the PromotionFailureALot counters.
 659   NOT_PRODUCT(heap->reset_promotion_should_fail();)
 660 }
 661 
 662 bool PSScavenge::should_attempt_scavenge() {
 663   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 664   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
 665 
 666   if (UsePerfData) {
 667     counters->update_scavenge_skipped(not_skipped);
 668   }
 669 
 670   PSYoungGen* young_gen = heap->young_gen();
 671   PSOldGen* old_gen = heap->old_gen();
 672 
 673   // Do not attempt to promote unless to_space is empty
 674   if (!young_gen->to_space()->is_empty()) {
 675     _consecutive_skipped_scavenges++;
 676     if (UsePerfData) {
 677       counters->update_scavenge_skipped(to_space_not_empty);
 678     }
 679     return false;
 680   }
 681 
 682   // Test to see if the scavenge will likely fail.
 683   PSAdaptiveSizePolicy* policy = heap->size_policy();
 684 
 685   // A similar test is done in the policy's should_full_GC().  If this is
 686   // changed, decide if that test should also be changed.
 687   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
 688   size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
 689   bool result = promotion_estimate < old_gen->free_in_bytes();
 690 
 691   log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT,
 692                 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
 693                 (size_t) policy->padded_average_promoted_in_bytes(),
 694                 old_gen->free_in_bytes());
 695   if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
 696     log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
 697   }
 698 
 699   if (result) {
 700     _consecutive_skipped_scavenges = 0;
 701   } else {
 702     _consecutive_skipped_scavenges++;
 703     if (UsePerfData) {
 704       counters->update_scavenge_skipped(promoted_too_large);
 705     }
 706   }
 707   return result;
 708 }
 709 
 710   // Used to add tasks
 711 GCTaskManager* const PSScavenge::gc_task_manager() {
 712   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
 713    "shouldn't return NULL");
 714   return ParallelScavengeHeap::gc_task_manager();
 715 }
 716 
 717 // Adaptive size policy support.  When the young generation/old generation
 718 // boundary moves, _young_generation_boundary must be reset
 719 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
 720   _young_generation_boundary = v;
 721   if (UseCompressedOops) {
 722     _young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
 723   }
 724 }
 725 
 726 void PSScavenge::initialize() {
 727   // Arguments must have been parsed
 728 
 729   if (AlwaysTenure || NeverTenure) {
 730     assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
 731            "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
 732     _tenuring_threshold = MaxTenuringThreshold;
 733   } else {
 734     // We want to smooth out our startup times for the AdaptiveSizePolicy
 735     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
 736                                                     MaxTenuringThreshold;
 737   }
 738 
 739   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 740   PSYoungGen* young_gen = heap->young_gen();
 741   PSOldGen* old_gen = heap->old_gen();
 742 
 743   // Set boundary between young_gen and old_gen
 744   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
 745          "old above young");
 746   set_young_generation_boundary(young_gen->eden_space()->bottom());
 747 
 748   // Initialize ref handling object for scavenging.
 749   MemRegion mr = young_gen->reserved();
 750 
 751   _ref_processor =
 752     new ReferenceProcessor(mr,                         // span
 753                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
 754                            ParallelGCThreads,          // mt processing degree
 755                            true,                       // mt discovery
 756                            ParallelGCThreads,          // mt discovery degree
 757                            true,                       // atomic_discovery
 758                            NULL);                      // header provides liveness info
 759 
 760   // Cache the cardtable
 761   _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
 762 
 763   _counters = new CollectorCounters("PSScavenge", 0);
 764 }