1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "gc/parallel/parallelArguments.hpp"
  28 #include "gc/parallel/objectStartArray.inline.hpp"
  29 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  31 #include "gc/parallel/psMemoryPool.hpp"
  32 #include "gc/parallel/psParallelCompact.inline.hpp"
  33 #include "gc/parallel/psPromotionManager.hpp"
  34 #include "gc/parallel/psScavenge.hpp"
  35 #include "gc/parallel/psVMOperations.hpp"
  36 #include "gc/shared/gcHeapSummary.hpp"
  37 #include "gc/shared/gcLocker.hpp"
  38 #include "gc/shared/gcWhen.hpp"
  39 #include "gc/shared/genArguments.hpp"
  40 #include "gc/shared/locationPrinter.inline.hpp"
  41 #include "gc/shared/scavengableNMethods.hpp"
  42 #include "logging/log.hpp"
  43 #include "memory/iterator.hpp"
  44 #include "memory/metaspaceCounters.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/handles.inline.hpp"
  48 #include "runtime/java.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "services/memoryManager.hpp"
  51 #include "services/memTracker.hpp"
  52 #include "utilities/macros.hpp"
  53 #include "utilities/vmError.hpp"
  54 
  55 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
  56 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
  57 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
  58 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
  59 
  60 jint ParallelScavengeHeap::initialize() {
  61   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
  62 
  63   ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
  64 
  65   os::trace_page_sizes("Heap",
  66                        MinHeapSize,
  67                        reserved_heap_size,
  68                        GenAlignment,
  69                        heap_rs.base(),
  70                        heap_rs.size());
  71 
  72   initialize_reserved_region(heap_rs);
  73 
  74   PSCardTable* card_table = new PSCardTable(heap_rs.region());
  75   card_table->initialize();
  76   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
  77   barrier_set->initialize();
  78   BarrierSet::set_barrier_set(barrier_set);
  79 
  80   // Make up the generations
  81   assert(MinOldSize <= OldSize && OldSize <= MaxOldSize, "Parameter check");
  82   assert(MinNewSize <= NewSize && NewSize <= MaxNewSize, "Parameter check");
  83 
  84   // Layout the reserved space for the generations.
  85   // If OldGen is allocated on nv-dimm, we need to split the reservation (this is required for windows).
  86   ReservedSpace old_rs   = heap_rs.first_part(MaxOldSize, ParallelArguments::is_heterogeneous_heap() /* split */);
  87   ReservedSpace young_rs = heap_rs.last_part(MaxOldSize);
  88   assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
  89 
  90   // Create and initialize the generations.
  91   _young_gen = new PSYoungGen(
  92       young_rs,
  93       NewSize,
  94       MinNewSize,
  95       MaxNewSize);
  96   _old_gen = new PSOldGen(
  97       old_rs,
  98       OldSize,
  99       MinOldSize,
 100       MaxOldSize,
 101       "old", 1);
 102 
 103   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
 104   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
 105 
 106   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 107   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 108 
 109   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
 110   const size_t old_capacity = _old_gen->capacity_in_bytes();
 111   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
 112   _size_policy =
 113     new PSAdaptiveSizePolicy(eden_capacity,
 114                              initial_promo_size,
 115                              young_gen()->to_space()->capacity_in_bytes(),
 116                              GenAlignment,
 117                              max_gc_pause_sec,
 118                              max_gc_minor_pause_sec,
 119                              GCTimeRatio
 120                              );
 121 
 122   assert(ParallelArguments::is_heterogeneous_heap() ||
 123          (old_gen()->virtual_space()->high_boundary() ==
 124           young_gen()->virtual_space()->low_boundary()),
 125          "Boundaries must meet");
 126   // initialize the policy counters - 2 collectors, 2 generations
 127   _gc_policy_counters =
 128     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
 129 
 130   if (!PSParallelCompact::initialize()) {
 131     return JNI_ENOMEM;
 132   }
 133 
 134   // Set up WorkGang
 135   _workers.initialize_workers();
 136 
 137   return JNI_OK;
 138 }
 139 
 140 void ParallelScavengeHeap::initialize_serviceability() {
 141 
 142   _eden_pool = new EdenMutableSpacePool(_young_gen,
 143                                         _young_gen->eden_space(),
 144                                         "PS Eden Space",
 145                                         false /* support_usage_threshold */);
 146 
 147   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
 148                                                 "PS Survivor Space",
 149                                                 false /* support_usage_threshold */);
 150 
 151   _old_pool = new PSGenerationPool(_old_gen,
 152                                    "PS Old Gen",
 153                                    true /* support_usage_threshold */);
 154 
 155   _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
 156   _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
 157 
 158   _old_manager->add_pool(_eden_pool);
 159   _old_manager->add_pool(_survivor_pool);
 160   _old_manager->add_pool(_old_pool);
 161 
 162   _young_manager->add_pool(_eden_pool);
 163   _young_manager->add_pool(_survivor_pool);
 164 
 165 }
 166 
 167 class PSIsScavengable : public BoolObjectClosure {
 168   bool do_object_b(oop obj) {
 169     return ParallelScavengeHeap::heap()->is_in_young(obj);
 170   }
 171 };
 172 
 173 static PSIsScavengable _is_scavengable;
 174 
 175 void ParallelScavengeHeap::post_initialize() {
 176   CollectedHeap::post_initialize();
 177   // Need to init the tenuring threshold
 178   PSScavenge::initialize();
 179   PSParallelCompact::post_initialize();
 180   PSPromotionManager::initialize();
 181 
 182   ScavengableNMethods::initialize(&_is_scavengable);
 183 }
 184 
 185 void ParallelScavengeHeap::update_counters() {
 186   young_gen()->update_counters();
 187   old_gen()->update_counters();
 188   MetaspaceCounters::update_performance_counters();
 189   CompressedClassSpaceCounters::update_performance_counters();
 190 }
 191 
 192 size_t ParallelScavengeHeap::capacity() const {
 193   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
 194   return value;
 195 }
 196 
 197 size_t ParallelScavengeHeap::used() const {
 198   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
 199   return value;
 200 }
 201 
 202 bool ParallelScavengeHeap::is_maximal_no_gc() const {
 203   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
 204 }
 205 
 206 
 207 size_t ParallelScavengeHeap::max_capacity() const {
 208   size_t estimated = reserved_region().byte_size();
 209   if (UseAdaptiveSizePolicy) {
 210     estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());
 211   } else {
 212     estimated -= young_gen()->to_space()->capacity_in_bytes();
 213   }
 214   return MAX2(estimated, capacity());
 215 }
 216 
 217 bool ParallelScavengeHeap::is_in(const void* p) const {
 218   return young_gen()->is_in(p) || old_gen()->is_in(p);
 219 }
 220 
 221 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
 222   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
 223 }
 224 
 225 // There are two levels of allocation policy here.
 226 //
 227 // When an allocation request fails, the requesting thread must invoke a VM
 228 // operation, transfer control to the VM thread, and await the results of a
 229 // garbage collection. That is quite expensive, and we should avoid doing it
 230 // multiple times if possible.
 231 //
 232 // To accomplish this, we have a basic allocation policy, and also a
 233 // failed allocation policy.
 234 //
 235 // The basic allocation policy controls how you allocate memory without
 236 // attempting garbage collection. It is okay to grab locks and
 237 // expand the heap, if that can be done without coming to a safepoint.
 238 // It is likely that the basic allocation policy will not be very
 239 // aggressive.
 240 //
 241 // The failed allocation policy is invoked from the VM thread after
 242 // the basic allocation policy is unable to satisfy a mem_allocate
 243 // request. This policy needs to cover the entire range of collection,
 244 // heap expansion, and out-of-memory conditions. It should make every
 245 // attempt to allocate the requested memory.
 246 
 247 // Basic allocation policy. Should never be called at a safepoint, or
 248 // from the VM thread.
 249 //
 250 // This method must handle cases where many mem_allocate requests fail
 251 // simultaneously. When that happens, only one VM operation will succeed,
 252 // and the rest will not be executed. For that reason, this method loops
 253 // during failed allocation attempts. If the java heap becomes exhausted,
 254 // we rely on the size_policy object to force a bail out.
 255 HeapWord* ParallelScavengeHeap::mem_allocate(
 256                                      size_t size,
 257                                      bool* gc_overhead_limit_was_exceeded) {
 258   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 259   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 260   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 261 
 262   // In general gc_overhead_limit_was_exceeded should be false so
 263   // set it so here and reset it to true only if the gc time
 264   // limit is being exceeded as checked below.
 265   *gc_overhead_limit_was_exceeded = false;
 266 
 267   HeapWord* result = young_gen()->allocate(size);
 268 
 269   uint loop_count = 0;
 270   uint gc_count = 0;
 271   uint gclocker_stalled_count = 0;
 272 
 273   while (result == NULL) {
 274     // We don't want to have multiple collections for a single filled generation.
 275     // To prevent this, each thread tracks the total_collections() value, and if
 276     // the count has changed, does not do a new collection.
 277     //
 278     // The collection count must be read only while holding the heap lock. VM
 279     // operations also hold the heap lock during collections. There is a lock
 280     // contention case where thread A blocks waiting on the Heap_lock, while
 281     // thread B is holding it doing a collection. When thread A gets the lock,
 282     // the collection count has already changed. To prevent duplicate collections,
 283     // The policy MUST attempt allocations during the same period it reads the
 284     // total_collections() value!
 285     {
 286       MutexLocker ml(Heap_lock);
 287       gc_count = total_collections();
 288 
 289       result = young_gen()->allocate(size);
 290       if (result != NULL) {
 291         return result;
 292       }
 293 
 294       // If certain conditions hold, try allocating from the old gen.
 295       result = mem_allocate_old_gen(size);
 296       if (result != NULL) {
 297         return result;
 298       }
 299 
 300       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 301         return NULL;
 302       }
 303 
 304       // Failed to allocate without a gc.
 305       if (GCLocker::is_active_and_needs_gc()) {
 306         // If this thread is not in a jni critical section, we stall
 307         // the requestor until the critical section has cleared and
 308         // GC allowed. When the critical section clears, a GC is
 309         // initiated by the last thread exiting the critical section; so
 310         // we retry the allocation sequence from the beginning of the loop,
 311         // rather than causing more, now probably unnecessary, GC attempts.
 312         JavaThread* jthr = JavaThread::current();
 313         if (!jthr->in_critical()) {
 314           MutexUnlocker mul(Heap_lock);
 315           GCLocker::stall_until_clear();
 316           gclocker_stalled_count += 1;
 317           continue;
 318         } else {
 319           if (CheckJNICalls) {
 320             fatal("Possible deadlock due to allocating while"
 321                   " in jni critical section");
 322           }
 323           return NULL;
 324         }
 325       }
 326     }
 327 
 328     if (result == NULL) {
 329       // Generate a VM operation
 330       VM_ParallelGCFailedAllocation op(size, gc_count);
 331       VMThread::execute(&op);
 332 
 333       // Did the VM operation execute? If so, return the result directly.
 334       // This prevents us from looping until time out on requests that can
 335       // not be satisfied.
 336       if (op.prologue_succeeded()) {
 337         assert(is_in_or_null(op.result()), "result not in heap");
 338 
 339         // If GC was locked out during VM operation then retry allocation
 340         // and/or stall as necessary.
 341         if (op.gc_locked()) {
 342           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
 343           continue;  // retry and/or stall as necessary
 344         }
 345 
 346         // Exit the loop if the gc time limit has been exceeded.
 347         // The allocation must have failed above ("result" guarding
 348         // this path is NULL) and the most recent collection has exceeded the
 349         // gc overhead limit (although enough may have been collected to
 350         // satisfy the allocation).  Exit the loop so that an out-of-memory
 351         // will be thrown (return a NULL ignoring the contents of
 352         // op.result()),
 353         // but clear gc_overhead_limit_exceeded so that the next collection
 354         // starts with a clean slate (i.e., forgets about previous overhead
 355         // excesses).  Fill op.result() with a filler object so that the
 356         // heap remains parsable.
 357         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 358         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
 359 
 360         if (limit_exceeded && softrefs_clear) {
 361           *gc_overhead_limit_was_exceeded = true;
 362           size_policy()->set_gc_overhead_limit_exceeded(false);
 363           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
 364           if (op.result() != NULL) {
 365             CollectedHeap::fill_with_object(op.result(), size);
 366           }
 367           return NULL;
 368         }
 369 
 370         return op.result();
 371       }
 372     }
 373 
 374     // The policy object will prevent us from looping forever. If the
 375     // time spent in gc crosses a threshold, we will bail out.
 376     loop_count++;
 377     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
 378         (loop_count % QueuedAllocationWarningCount == 0)) {
 379       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
 380       log_warning(gc)("\tsize=" SIZE_FORMAT, size);
 381     }
 382   }
 383 
 384   return result;
 385 }
 386 
 387 // A "death march" is a series of ultra-slow allocations in which a full gc is
 388 // done before each allocation, and after the full gc the allocation still
 389 // cannot be satisfied from the young gen.  This routine detects that condition;
 390 // it should be called after a full gc has been done and the allocation
 391 // attempted from the young gen. The parameter 'addr' should be the result of
 392 // that young gen allocation attempt.
 393 void
 394 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
 395   if (addr != NULL) {
 396     _death_march_count = 0;  // death march has ended
 397   } else if (_death_march_count == 0) {
 398     if (should_alloc_in_eden(size)) {
 399       _death_march_count = 1;    // death march has started
 400     }
 401   }
 402 }
 403 
 404 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
 405   if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
 406     // Size is too big for eden, or gc is locked out.
 407     return old_gen()->allocate(size);
 408   }
 409 
 410   // If a "death march" is in progress, allocate from the old gen a limited
 411   // number of times before doing a GC.
 412   if (_death_march_count > 0) {
 413     if (_death_march_count < 64) {
 414       ++_death_march_count;
 415       return old_gen()->allocate(size);
 416     } else {
 417       _death_march_count = 0;
 418     }
 419   }
 420   return NULL;
 421 }
 422 
 423 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
 424   // The do_full_collection() parameter clear_all_soft_refs
 425   // is interpreted here as maximum_compaction which will
 426   // cause SoftRefs to be cleared.
 427   bool maximum_compaction = clear_all_soft_refs;
 428   PSParallelCompact::invoke(maximum_compaction);
 429 }
 430 
 431 // Failed allocation policy. Must be called from the VM thread, and
 432 // only at a safepoint! Note that this method has policy for allocation
 433 // flow, and NOT collection policy. So we do not check for gc collection
 434 // time over limit here, that is the responsibility of the heap specific
 435 // collection methods. This method decides where to attempt allocations,
 436 // and when to attempt collections, but no collection specific policy.
 437 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
 438   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 439   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 440   assert(!is_gc_active(), "not reentrant");
 441   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 442 
 443   // We assume that allocation in eden will fail unless we collect.
 444 
 445   // First level allocation failure, scavenge and allocate in young gen.
 446   GCCauseSetter gccs(this, GCCause::_allocation_failure);
 447   const bool invoked_full_gc = PSScavenge::invoke();
 448   HeapWord* result = young_gen()->allocate(size);
 449 
 450   // Second level allocation failure.
 451   //   Mark sweep and allocate in young generation.
 452   if (result == NULL && !invoked_full_gc) {
 453     do_full_collection(false);
 454     result = young_gen()->allocate(size);
 455   }
 456 
 457   death_march_check(result, size);
 458 
 459   // Third level allocation failure.
 460   //   After mark sweep and young generation allocation failure,
 461   //   allocate in old generation.
 462   if (result == NULL) {
 463     result = old_gen()->allocate(size);
 464   }
 465 
 466   // Fourth level allocation failure. We're running out of memory.
 467   //   More complete mark sweep and allocate in young generation.
 468   if (result == NULL) {
 469     do_full_collection(true);
 470     result = young_gen()->allocate(size);
 471   }
 472 
 473   // Fifth level allocation failure.
 474   //   After more complete mark sweep, allocate in old generation.
 475   if (result == NULL) {
 476     result = old_gen()->allocate(size);
 477   }
 478 
 479   return result;
 480 }
 481 
 482 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 483   CollectedHeap::ensure_parsability(retire_tlabs);
 484   young_gen()->eden_space()->ensure_parsability();
 485 }
 486 
 487 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
 488   return young_gen()->eden_space()->tlab_capacity(thr);
 489 }
 490 
 491 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
 492   return young_gen()->eden_space()->tlab_used(thr);
 493 }
 494 
 495 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 496   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
 497 }
 498 
 499 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
 500   HeapWord* result = young_gen()->allocate(requested_size);
 501   if (result != NULL) {
 502     *actual_size = requested_size;
 503   }
 504 
 505   return result;
 506 }
 507 
 508 void ParallelScavengeHeap::resize_all_tlabs() {
 509   CollectedHeap::resize_all_tlabs();
 510 }
 511 
 512 // This method is used by System.gc() and JVMTI.
 513 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 514   assert(!Heap_lock->owned_by_self(),
 515     "this thread should not own the Heap_lock");
 516 
 517   uint gc_count      = 0;
 518   uint full_gc_count = 0;
 519   {
 520     MutexLocker ml(Heap_lock);
 521     // This value is guarded by the Heap_lock
 522     gc_count      = total_collections();
 523     full_gc_count = total_full_collections();
 524   }
 525 
 526   if (GCLocker::should_discard(cause, gc_count)) {
 527     return;
 528   }
 529 
 530   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 531   VMThread::execute(&op);
 532 }
 533 
 534 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 535   young_gen()->object_iterate(cl);
 536   old_gen()->object_iterate(cl);
 537 }
 538 
 539 
 540 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
 541   if (young_gen()->is_in_reserved(addr)) {
 542     assert(young_gen()->is_in(addr),
 543            "addr should be in allocated part of young gen");
 544     // called from os::print_location by find or VMError
 545     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
 546     Unimplemented();
 547   } else if (old_gen()->is_in_reserved(addr)) {
 548     assert(old_gen()->is_in(addr),
 549            "addr should be in allocated part of old gen");
 550     return old_gen()->start_array()->object_start((HeapWord*)addr);
 551   }
 552   return 0;
 553 }
 554 
 555 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
 556   return block_start(addr) == addr;
 557 }
 558 
 559 jlong ParallelScavengeHeap::millis_since_last_gc() {
 560   return PSParallelCompact::millis_since_last_gc();
 561 }
 562 
 563 void ParallelScavengeHeap::prepare_for_verify() {
 564   ensure_parsability(false);  // no need to retire TLABs for verification
 565 }
 566 
 567 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
 568   PSOldGen* old = old_gen();
 569   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
 570   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
 571   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
 572 
 573   PSYoungGen* young = young_gen();
 574   VirtualSpaceSummary young_summary(young->reserved().start(),
 575     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
 576 
 577   MutableSpace* eden = young_gen()->eden_space();
 578   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
 579 
 580   MutableSpace* from = young_gen()->from_space();
 581   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
 582 
 583   MutableSpace* to = young_gen()->to_space();
 584   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
 585 
 586   VirtualSpaceSummary heap_summary = create_heap_space_summary();
 587   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
 588 }
 589 
 590 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
 591   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
 592 }
 593 
 594 void ParallelScavengeHeap::print_on(outputStream* st) const {
 595   young_gen()->print_on(st);
 596   old_gen()->print_on(st);
 597   MetaspaceUtils::print_on(st);
 598 }
 599 
 600 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
 601   this->CollectedHeap::print_on_error(st);
 602 
 603   st->cr();
 604   PSParallelCompact::print_on_error(st);
 605 }
 606 
 607 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
 608   ParallelScavengeHeap::heap()->workers().threads_do(tc);
 609 }
 610 
 611 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
 612   ParallelScavengeHeap::heap()->workers().print_worker_threads_on(st);
 613 }
 614 
 615 void ParallelScavengeHeap::print_tracing_info() const {
 616   AdaptiveSizePolicyOutput::print();
 617   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
 618   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
 619 }
 620 
 621 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
 622   const PSYoungGen* const young = young_gen();
 623   const MutableSpace* const eden = young->eden_space();
 624   const MutableSpace* const from = young->from_space();
 625   const PSOldGen* const old = old_gen();
 626 
 627   return PreGenGCValues(young->used_in_bytes(),
 628                         young->capacity_in_bytes(),
 629                         eden->used_in_bytes(),
 630                         eden->capacity_in_bytes(),
 631                         from->used_in_bytes(),
 632                         from->capacity_in_bytes(),
 633                         old->used_in_bytes(),
 634                         old->capacity_in_bytes());
 635 }
 636 
 637 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
 638   const PSYoungGen* const young = young_gen();
 639   const MutableSpace* const eden = young->eden_space();
 640   const MutableSpace* const from = young->from_space();
 641   const PSOldGen* const old = old_gen();
 642 
 643   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
 644                      HEAP_CHANGE_FORMAT" "
 645                      HEAP_CHANGE_FORMAT,
 646                      HEAP_CHANGE_FORMAT_ARGS(young->name(),
 647                                              pre_gc_values.young_gen_used(),
 648                                              pre_gc_values.young_gen_capacity(),
 649                                              young->used_in_bytes(),
 650                                              young->capacity_in_bytes()),
 651                      HEAP_CHANGE_FORMAT_ARGS("Eden",
 652                                              pre_gc_values.eden_used(),
 653                                              pre_gc_values.eden_capacity(),
 654                                              eden->used_in_bytes(),
 655                                              eden->capacity_in_bytes()),
 656                      HEAP_CHANGE_FORMAT_ARGS("From",
 657                                              pre_gc_values.from_used(),
 658                                              pre_gc_values.from_capacity(),
 659                                              from->used_in_bytes(),
 660                                              from->capacity_in_bytes()));
 661   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
 662                      HEAP_CHANGE_FORMAT_ARGS(old->name(),
 663                                              pre_gc_values.old_gen_used(),
 664                                              pre_gc_values.old_gen_capacity(),
 665                                              old->used_in_bytes(),
 666                                              old->capacity_in_bytes()));
 667   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
 668 }
 669 
 670 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
 671   // Why do we need the total_collections()-filter below?
 672   if (total_collections() > 0) {
 673     log_debug(gc, verify)("Tenured");
 674     old_gen()->verify();
 675 
 676     log_debug(gc, verify)("Eden");
 677     young_gen()->verify();
 678   }
 679 }
 680 
 681 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
 682   const PSHeapSummary& heap_summary = create_ps_heap_summary();
 683   gc_tracer->report_gc_heap_summary(when, heap_summary);
 684 
 685   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
 686   gc_tracer->report_metaspace_summary(when, metaspace_summary);
 687 }
 688 
 689 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
 690   CollectedHeap* heap = Universe::heap();
 691   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
 692   assert(heap->kind() == CollectedHeap::Parallel, "Invalid name");
 693   return (ParallelScavengeHeap*)heap;
 694 }
 695 
 696 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
 697   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
 698 }
 699 
 700 PSCardTable* ParallelScavengeHeap::card_table() {
 701   return static_cast<PSCardTable*>(barrier_set()->card_table());
 702 }
 703 
 704 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
 705                                             size_t survivor_size) {
 706   // Delegate the resize to the generation.
 707   _young_gen->resize(eden_size, survivor_size);
 708 }
 709 
 710 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
 711   // Delegate the resize to the generation.
 712   _old_gen->resize(desired_free_space);
 713 }
 714 
 715 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
 716   // nothing particular
 717 }
 718 
 719 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
 720   // nothing particular
 721 }
 722 
 723 #ifndef PRODUCT
 724 void ParallelScavengeHeap::record_gen_tops_before_GC() {
 725   if (ZapUnusedHeapArea) {
 726     young_gen()->record_spaces_top();
 727     old_gen()->record_spaces_top();
 728   }
 729 }
 730 
 731 void ParallelScavengeHeap::gen_mangle_unused_area() {
 732   if (ZapUnusedHeapArea) {
 733     young_gen()->eden_space()->mangle_unused_area();
 734     young_gen()->to_space()->mangle_unused_area();
 735     young_gen()->from_space()->mangle_unused_area();
 736     old_gen()->object_space()->mangle_unused_area();
 737   }
 738 }
 739 #endif
 740 
 741 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
 742   ScavengableNMethods::register_nmethod(nm);
 743 }
 744 
 745 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
 746   ScavengableNMethods::unregister_nmethod(nm);
 747 }
 748 
 749 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
 750   ScavengableNMethods::verify_nmethod(nm);
 751 }
 752 
 753 void ParallelScavengeHeap::flush_nmethod(nmethod* nm) {
 754   // nothing particular
 755 }
 756 
 757 void ParallelScavengeHeap::prune_scavengable_nmethods() {
 758   ScavengableNMethods::prune_nmethods();
 759 }
 760 
 761 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
 762   GrowableArray<GCMemoryManager*> memory_managers(2);
 763   memory_managers.append(_young_manager);
 764   memory_managers.append(_old_manager);
 765   return memory_managers;
 766 }
 767 
 768 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
 769   GrowableArray<MemoryPool*> memory_pools(3);
 770   memory_pools.append(_eden_pool);
 771   memory_pools.append(_survivor_pool);
 772   memory_pools.append(_old_pool);
 773   return memory_pools;
 774 }