1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
  27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
  28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
  29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
  31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
  32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
  33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
  34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
  36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
  37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
  38 #include "gc_implementation/shared/gcHeapSummary.hpp"
  39 #include "gc_implementation/shared/gcWhen.hpp"
  40 #include "memory/gcLocker.inline.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "services/memTracker.hpp"
  46 #include "utilities/vmError.hpp"
  47 
  48 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
  49 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
  50 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
  51 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
  52 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
  53 
  54 jint ParallelScavengeHeap::initialize() {
  55   CollectedHeap::pre_initialize();
  56 
  57   // Initialize collector policy
  58   _collector_policy = new GenerationSizer();
  59   _collector_policy->initialize_all();
  60 
  61   const size_t heap_size = _collector_policy->max_heap_byte_size();
  62 
  63   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
  64   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
  65 
  66   os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(),
  67                        heap_size, generation_alignment(),
  68                        heap_rs.base(),
  69                        heap_rs.size());
  70   if (!heap_rs.is_reserved()) {
  71     vm_shutdown_during_initialization(
  72       "Could not reserve enough space for object heap");
  73     return JNI_ENOMEM;
  74   }
  75 
  76   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
  77 
  78   CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
  79   barrier_set->initialize();
  80   set_barrier_set(barrier_set);
  81 
  82   // Make up the generations
  83   // Calculate the maximum size that a generation can grow.  This
  84   // includes growth into the other generation.  Note that the
  85   // parameter _max_gen_size is kept as the maximum
  86   // size of the generation as the boundaries currently stand.
  87   // _max_gen_size is still used as that value.
  88   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
  89   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
  90 
  91   _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
  92 
  93   _old_gen = _gens->old_gen();
  94   _young_gen = _gens->young_gen();
  95 
  96   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
  97   const size_t old_capacity = _old_gen->capacity_in_bytes();
  98   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
  99   _size_policy =
 100     new PSAdaptiveSizePolicy(eden_capacity,
 101                              initial_promo_size,
 102                              young_gen()->to_space()->capacity_in_bytes(),
 103                              _collector_policy->gen_alignment(),
 104                              max_gc_pause_sec,
 105                              max_gc_minor_pause_sec,
 106                              GCTimeRatio
 107                              );
 108 
 109   assert(!UseAdaptiveGCBoundary ||
 110     (old_gen()->virtual_space()->high_boundary() ==
 111      young_gen()->virtual_space()->low_boundary()),
 112     "Boundaries must meet");
 113   // initialize the policy counters - 2 collectors, 3 generations
 114   _gc_policy_counters =
 115     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
 116 
 117   // Set up the GCTaskManager
 118   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
 119 
 120   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
 121     return JNI_ENOMEM;
 122   }
 123 
 124   return JNI_OK;
 125 }
 126 
 127 void ParallelScavengeHeap::post_initialize() {
 128   // Need to init the tenuring threshold
 129   PSScavenge::initialize();
 130   if (UseParallelOldGC) {
 131     PSParallelCompact::post_initialize();
 132   } else {
 133     PSMarkSweep::initialize();
 134   }
 135   PSPromotionManager::initialize();
 136 }
 137 
 138 void ParallelScavengeHeap::update_counters() {
 139   young_gen()->update_counters();
 140   old_gen()->update_counters();
 141   MetaspaceCounters::update_performance_counters();
 142   CompressedClassSpaceCounters::update_performance_counters();
 143 }
 144 
 145 size_t ParallelScavengeHeap::capacity() const {
 146   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
 147   return value;
 148 }
 149 
 150 size_t ParallelScavengeHeap::used() const {
 151   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
 152   return value;
 153 }
 154 
 155 bool ParallelScavengeHeap::is_maximal_no_gc() const {
 156   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
 157 }
 158 
 159 
 160 size_t ParallelScavengeHeap::max_capacity() const {
 161   size_t estimated = reserved_region().byte_size();
 162   if (UseAdaptiveSizePolicy) {
 163     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
 164   } else {
 165     estimated -= young_gen()->to_space()->capacity_in_bytes();
 166   }
 167   return MAX2(estimated, capacity());
 168 }
 169 
 170 bool ParallelScavengeHeap::is_in(const void* p) const {
 171   if (young_gen()->is_in(p)) {
 172     return true;
 173   }
 174 
 175   if (old_gen()->is_in(p)) {
 176     return true;
 177   }
 178 
 179   return false;
 180 }
 181 
 182 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
 183   if (young_gen()->is_in_reserved(p)) {
 184     return true;
 185   }
 186 
 187   if (old_gen()->is_in_reserved(p)) {
 188     return true;
 189   }
 190 
 191   return false;
 192 }
 193 
 194 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
 195   return is_in_young((oop)addr);
 196 }
 197 
 198 // There are two levels of allocation policy here.
 199 //
 200 // When an allocation request fails, the requesting thread must invoke a VM
 201 // operation, transfer control to the VM thread, and await the results of a
 202 // garbage collection. That is quite expensive, and we should avoid doing it
 203 // multiple times if possible.
 204 //
 205 // To accomplish this, we have a basic allocation policy, and also a
 206 // failed allocation policy.
 207 //
 208 // The basic allocation policy controls how you allocate memory without
 209 // attempting garbage collection. It is okay to grab locks and
 210 // expand the heap, if that can be done without coming to a safepoint.
 211 // It is likely that the basic allocation policy will not be very
 212 // aggressive.
 213 //
 214 // The failed allocation policy is invoked from the VM thread after
 215 // the basic allocation policy is unable to satisfy a mem_allocate
 216 // request. This policy needs to cover the entire range of collection,
 217 // heap expansion, and out-of-memory conditions. It should make every
 218 // attempt to allocate the requested memory.
 219 
 220 // Basic allocation policy. Should never be called at a safepoint, or
 221 // from the VM thread.
 222 //
 223 // This method must handle cases where many mem_allocate requests fail
 224 // simultaneously. When that happens, only one VM operation will succeed,
 225 // and the rest will not be executed. For that reason, this method loops
 226 // during failed allocation attempts. If the java heap becomes exhausted,
 227 // we rely on the size_policy object to force a bail out.
 228 HeapWord* ParallelScavengeHeap::mem_allocate(
 229                                      size_t size,
 230                                      bool* gc_overhead_limit_was_exceeded) {
 231   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 232   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 233   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 234 
 235   // In general gc_overhead_limit_was_exceeded should be false so
 236   // set it so here and reset it to true only if the gc time
 237   // limit is being exceeded as checked below.
 238   *gc_overhead_limit_was_exceeded = false;
 239 
 240   HeapWord* result = young_gen()->allocate(size);
 241 
 242   uint loop_count = 0;
 243   uint gc_count = 0;
 244   uint gclocker_stalled_count = 0;
 245 
 246   while (result == NULL) {
 247     // We don't want to have multiple collections for a single filled generation.
 248     // To prevent this, each thread tracks the total_collections() value, and if
 249     // the count has changed, does not do a new collection.
 250     //
 251     // The collection count must be read only while holding the heap lock. VM
 252     // operations also hold the heap lock during collections. There is a lock
 253     // contention case where thread A blocks waiting on the Heap_lock, while
 254     // thread B is holding it doing a collection. When thread A gets the lock,
 255     // the collection count has already changed. To prevent duplicate collections,
 256     // The policy MUST attempt allocations during the same period it reads the
 257     // total_collections() value!
 258     {
 259       MutexLocker ml(Heap_lock);
 260       gc_count = total_collections();
 261 
 262       result = young_gen()->allocate(size);
 263       if (result != NULL) {
 264         return result;
 265       }
 266 
 267       // If certain conditions hold, try allocating from the old gen.
 268       result = mem_allocate_old_gen(size);
 269       if (result != NULL) {
 270         return result;
 271       }
 272 
 273       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 274         return NULL;
 275       }
 276 
 277       // Failed to allocate without a gc.
 278       if (GC_locker::is_active_and_needs_gc()) {
 279         // If this thread is not in a jni critical section, we stall
 280         // the requestor until the critical section has cleared and
 281         // GC allowed. When the critical section clears, a GC is
 282         // initiated by the last thread exiting the critical section; so
 283         // we retry the allocation sequence from the beginning of the loop,
 284         // rather than causing more, now probably unnecessary, GC attempts.
 285         JavaThread* jthr = JavaThread::current();
 286         if (!jthr->in_critical()) {
 287           MutexUnlocker mul(Heap_lock);
 288           GC_locker::stall_until_clear();
 289           gclocker_stalled_count += 1;
 290           continue;
 291         } else {
 292           if (CheckJNICalls) {
 293             fatal("Possible deadlock due to allocating while"
 294                   " in jni critical section");
 295           }
 296           return NULL;
 297         }
 298       }
 299     }
 300 
 301     if (result == NULL) {
 302       // Generate a VM operation
 303       VM_ParallelGCFailedAllocation op(size, gc_count);
 304       VMThread::execute(&op);
 305 
 306       // Did the VM operation execute? If so, return the result directly.
 307       // This prevents us from looping until time out on requests that can
 308       // not be satisfied.
 309       if (op.prologue_succeeded()) {
 310         assert(is_in_or_null(op.result()), "result not in heap");
 311 
 312         // If GC was locked out during VM operation then retry allocation
 313         // and/or stall as necessary.
 314         if (op.gc_locked()) {
 315           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
 316           continue;  // retry and/or stall as necessary
 317         }
 318 
 319         // Exit the loop if the gc time limit has been exceeded.
 320         // The allocation must have failed above ("result" guarding
 321         // this path is NULL) and the most recent collection has exceeded the
 322         // gc overhead limit (although enough may have been collected to
 323         // satisfy the allocation).  Exit the loop so that an out-of-memory
 324         // will be thrown (return a NULL ignoring the contents of
 325         // op.result()),
 326         // but clear gc_overhead_limit_exceeded so that the next collection
 327         // starts with a clean slate (i.e., forgets about previous overhead
 328         // excesses).  Fill op.result() with a filler object so that the
 329         // heap remains parsable.
 330         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 331         const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
 332 
 333         if (limit_exceeded && softrefs_clear) {
 334           *gc_overhead_limit_was_exceeded = true;
 335           size_policy()->set_gc_overhead_limit_exceeded(false);
 336           if (PrintGCDetails && Verbose) {
 337             gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
 338               "return NULL because gc_overhead_limit_exceeded is set");
 339           }
 340           if (op.result() != NULL) {
 341             CollectedHeap::fill_with_object(op.result(), size);
 342           }
 343           return NULL;
 344         }
 345 
 346         return op.result();
 347       }
 348     }
 349 
 350     // The policy object will prevent us from looping forever. If the
 351     // time spent in gc crosses a threshold, we will bail out.
 352     loop_count++;
 353     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
 354         (loop_count % QueuedAllocationWarningCount == 0)) {
 355       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
 356               " size=" SIZE_FORMAT, loop_count, size);
 357     }
 358   }
 359 
 360   return result;
 361 }
 362 
 363 // A "death march" is a series of ultra-slow allocations in which a full gc is
 364 // done before each allocation, and after the full gc the allocation still
 365 // cannot be satisfied from the young gen.  This routine detects that condition;
 366 // it should be called after a full gc has been done and the allocation
 367 // attempted from the young gen. The parameter 'addr' should be the result of
 368 // that young gen allocation attempt.
 369 void
 370 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
 371   if (addr != NULL) {
 372     _death_march_count = 0;  // death march has ended
 373   } else if (_death_march_count == 0) {
 374     if (should_alloc_in_eden(size)) {
 375       _death_march_count = 1;    // death march has started
 376     }
 377   }
 378 }
 379 
 380 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
 381   if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
 382     // Size is too big for eden, or gc is locked out.
 383     return old_gen()->allocate(size);
 384   }
 385 
 386   // If a "death march" is in progress, allocate from the old gen a limited
 387   // number of times before doing a GC.
 388   if (_death_march_count > 0) {
 389     if (_death_march_count < 64) {
 390       ++_death_march_count;
 391       return old_gen()->allocate(size);
 392     } else {
 393       _death_march_count = 0;
 394     }
 395   }
 396   return NULL;
 397 }
 398 
 399 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
 400   if (UseParallelOldGC) {
 401     // The do_full_collection() parameter clear_all_soft_refs
 402     // is interpreted here as maximum_compaction which will
 403     // cause SoftRefs to be cleared.
 404     bool maximum_compaction = clear_all_soft_refs;
 405     PSParallelCompact::invoke(maximum_compaction);
 406   } else {
 407     PSMarkSweep::invoke(clear_all_soft_refs);
 408   }
 409 }
 410 
 411 // Failed allocation policy. Must be called from the VM thread, and
 412 // only at a safepoint! Note that this method has policy for allocation
 413 // flow, and NOT collection policy. So we do not check for gc collection
 414 // time over limit here, that is the responsibility of the heap specific
 415 // collection methods. This method decides where to attempt allocations,
 416 // and when to attempt collections, but no collection specific policy.
 417 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
 418   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 419   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 420   assert(!is_gc_active(), "not reentrant");
 421   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 422 
 423   // We assume that allocation in eden will fail unless we collect.
 424 
 425   // First level allocation failure, scavenge and allocate in young gen.
 426   GCCauseSetter gccs(this, GCCause::_allocation_failure);
 427   const bool invoked_full_gc = PSScavenge::invoke();
 428   HeapWord* result = young_gen()->allocate(size);
 429 
 430   // Second level allocation failure.
 431   //   Mark sweep and allocate in young generation.
 432   if (result == NULL && !invoked_full_gc) {
 433     do_full_collection(false);
 434     result = young_gen()->allocate(size);
 435   }
 436 
 437   death_march_check(result, size);
 438 
 439   // Third level allocation failure.
 440   //   After mark sweep and young generation allocation failure,
 441   //   allocate in old generation.
 442   if (result == NULL) {
 443     result = old_gen()->allocate(size);
 444   }
 445 
 446   // Fourth level allocation failure. We're running out of memory.
 447   //   More complete mark sweep and allocate in young generation.
 448   if (result == NULL) {
 449     do_full_collection(true);
 450     result = young_gen()->allocate(size);
 451   }
 452 
 453   // Fifth level allocation failure.
 454   //   After more complete mark sweep, allocate in old generation.
 455   if (result == NULL) {
 456     result = old_gen()->allocate(size);
 457   }
 458 
 459   return result;
 460 }
 461 
 462 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 463   CollectedHeap::ensure_parsability(retire_tlabs);
 464   young_gen()->eden_space()->ensure_parsability();
 465 }
 466 
 467 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
 468   return young_gen()->eden_space()->tlab_capacity(thr);
 469 }
 470 
 471 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
 472   return young_gen()->eden_space()->tlab_used(thr);
 473 }
 474 
 475 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 476   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
 477 }
 478 
 479 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
 480   return young_gen()->allocate(size);
 481 }
 482 
 483 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
 484   CollectedHeap::accumulate_statistics_all_tlabs();
 485 }
 486 
 487 void ParallelScavengeHeap::resize_all_tlabs() {
 488   CollectedHeap::resize_all_tlabs();
 489 }
 490 
 491 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
 492   // We don't need barriers for stores to objects in the
 493   // young gen and, a fortiori, for initializing stores to
 494   // objects therein.
 495   return is_in_young(new_obj);
 496 }
 497 
 498 // This method is used by System.gc() and JVMTI.
 499 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 500   assert(!Heap_lock->owned_by_self(),
 501     "this thread should not own the Heap_lock");
 502 
 503   uint gc_count      = 0;
 504   uint full_gc_count = 0;
 505   {
 506     MutexLocker ml(Heap_lock);
 507     // This value is guarded by the Heap_lock
 508     gc_count      = total_collections();
 509     full_gc_count = total_full_collections();
 510   }
 511 
 512   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 513   VMThread::execute(&op);
 514 }
 515 
 516 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 517   young_gen()->object_iterate(cl);
 518   old_gen()->object_iterate(cl);
 519 }
 520 
 521 
 522 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
 523   if (young_gen()->is_in_reserved(addr)) {
 524     assert(young_gen()->is_in(addr),
 525            "addr should be in allocated part of young gen");
 526     // called from os::print_location by find or VMError
 527     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
 528     Unimplemented();
 529   } else if (old_gen()->is_in_reserved(addr)) {
 530     assert(old_gen()->is_in(addr),
 531            "addr should be in allocated part of old gen");
 532     return old_gen()->start_array()->object_start((HeapWord*)addr);
 533   }
 534   return 0;
 535 }
 536 
 537 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
 538   return oop(addr)->size();
 539 }
 540 
 541 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
 542   return block_start(addr) == addr;
 543 }
 544 
 545 jlong ParallelScavengeHeap::millis_since_last_gc() {
 546   return UseParallelOldGC ?
 547     PSParallelCompact::millis_since_last_gc() :
 548     PSMarkSweep::millis_since_last_gc();
 549 }
 550 
 551 void ParallelScavengeHeap::prepare_for_verify() {
 552   ensure_parsability(false);  // no need to retire TLABs for verification
 553 }
 554 
 555 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
 556   PSOldGen* old = old_gen();
 557   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
 558   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
 559   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
 560 
 561   PSYoungGen* young = young_gen();
 562   VirtualSpaceSummary young_summary(young->reserved().start(),
 563     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
 564 
 565   MutableSpace* eden = young_gen()->eden_space();
 566   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
 567 
 568   MutableSpace* from = young_gen()->from_space();
 569   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
 570 
 571   MutableSpace* to = young_gen()->to_space();
 572   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
 573 
 574   VirtualSpaceSummary heap_summary = create_heap_space_summary();
 575   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
 576 }
 577 
 578 void ParallelScavengeHeap::print_on(outputStream* st) const {
 579   young_gen()->print_on(st);
 580   old_gen()->print_on(st);
 581   MetaspaceAux::print_on(st);
 582 }
 583 
 584 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
 585   this->CollectedHeap::print_on_error(st);
 586 
 587   if (UseParallelOldGC) {
 588     st->cr();
 589     PSParallelCompact::print_on_error(st);
 590   }
 591 }
 592 
 593 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
 594   PSScavenge::gc_task_manager()->threads_do(tc);
 595 }
 596 
 597 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
 598   PSScavenge::gc_task_manager()->print_threads_on(st);
 599 }
 600 
 601 void ParallelScavengeHeap::print_tracing_info() const {
 602   if (TraceYoungGenTime) {
 603     double time = PSScavenge::accumulated_time()->seconds();
 604     tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
 605   }
 606   if (TraceOldGenTime) {
 607     double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds();
 608     tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
 609   }
 610 }
 611 
 612 
 613 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
 614   // Why do we need the total_collections()-filter below?
 615   if (total_collections() > 0) {
 616     if (!silent) {
 617       gclog_or_tty->print("tenured ");
 618     }
 619     old_gen()->verify();
 620 
 621     if (!silent) {
 622       gclog_or_tty->print("eden ");
 623     }
 624     young_gen()->verify();
 625   }
 626 }
 627 
 628 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
 629   if (PrintGCDetails && Verbose) {
 630     gclog_or_tty->print(" "  SIZE_FORMAT
 631                         "->" SIZE_FORMAT
 632                         "("  SIZE_FORMAT ")",
 633                         prev_used, used(), capacity());
 634   } else {
 635     gclog_or_tty->print(" "  SIZE_FORMAT "K"
 636                         "->" SIZE_FORMAT "K"
 637                         "("  SIZE_FORMAT "K)",
 638                         prev_used / K, used() / K, capacity() / K);
 639   }
 640 }
 641 
 642 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
 643   const PSHeapSummary& heap_summary = create_ps_heap_summary();
 644   gc_tracer->report_gc_heap_summary(when, heap_summary);
 645 
 646   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
 647   gc_tracer->report_metaspace_summary(when, metaspace_summary);
 648 }
 649 
 650 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
 651   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 652   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
 653   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
 654   return heap;
 655 }
 656 
 657 // Before delegating the resize to the young generation,
 658 // the reserved space for the young and old generations
 659 // may be changed to accommodate the desired resize.
 660 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
 661     size_t survivor_size) {
 662   if (UseAdaptiveGCBoundary) {
 663     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 664       size_policy()->reset_bytes_absorbed_from_eden();
 665       return;  // The generation changed size already.
 666     }
 667     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
 668   }
 669 
 670   // Delegate the resize to the generation.
 671   _young_gen->resize(eden_size, survivor_size);
 672 }
 673 
 674 // Before delegating the resize to the old generation,
 675 // the reserved space for the young and old generations
 676 // may be changed to accommodate the desired resize.
 677 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
 678   if (UseAdaptiveGCBoundary) {
 679     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 680       size_policy()->reset_bytes_absorbed_from_eden();
 681       return;  // The generation changed size already.
 682     }
 683     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
 684   }
 685 
 686   // Delegate the resize to the generation.
 687   _old_gen->resize(desired_free_space);
 688 }
 689 
 690 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
 691   // nothing particular
 692 }
 693 
 694 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
 695   // nothing particular
 696 }
 697 
 698 #ifndef PRODUCT
 699 void ParallelScavengeHeap::record_gen_tops_before_GC() {
 700   if (ZapUnusedHeapArea) {
 701     young_gen()->record_spaces_top();
 702     old_gen()->record_spaces_top();
 703   }
 704 }
 705 
 706 void ParallelScavengeHeap::gen_mangle_unused_area() {
 707   if (ZapUnusedHeapArea) {
 708     young_gen()->eden_space()->mangle_unused_area();
 709     young_gen()->to_space()->mangle_unused_area();
 710     young_gen()->from_space()->mangle_unused_area();
 711     old_gen()->object_space()->mangle_unused_area();
 712   }
 713 }
 714 #endif