1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "gc/parallel/adjoiningGenerations.hpp"
  28 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
  29 #include "gc/parallel/gcTaskManager.hpp"
  30 #include "gc/parallel/generationSizer.hpp"
  31 #include "gc/parallel/objectStartArray.inline.hpp"
  32 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  34 #include "gc/parallel/psMarkSweep.hpp"
  35 #include "gc/parallel/psMemoryPool.hpp"
  36 #include "gc/parallel/psParallelCompact.inline.hpp"
  37 #include "gc/parallel/psPromotionManager.hpp"
  38 #include "gc/parallel/psScavenge.hpp"
  39 #include "gc/parallel/vmPSOperations.hpp"
  40 #include "gc/shared/gcHeapSummary.hpp"
  41 #include "gc/shared/gcLocker.hpp"
  42 #include "gc/shared/gcWhen.hpp"
  43 #include "logging/log.hpp"
  44 #include "memory/metaspaceCounters.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/handles.inline.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/vmThread.hpp"
  49 #include "services/memoryManager.hpp"
  50 #include "services/memTracker.hpp"
  51 #include "utilities/vmError.hpp"
  52 
  53 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
  54 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
  55 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
  56 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
  57 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
  58 
  59 jint ParallelScavengeHeap::initialize() {
  60   const size_t heap_size = _collector_policy->max_heap_byte_size();
  61 
  62   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
  63 
  64   os::trace_page_sizes("Heap",
  65                        _collector_policy->min_heap_byte_size(),
  66                        heap_size,
  67                        generation_alignment(),
  68                        heap_rs.base(),
  69                        heap_rs.size());
  70 
  71   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
  72 
  73   PSCardTable* card_table = new PSCardTable(reserved_region());
  74   card_table->initialize();
  75   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
  76   barrier_set->initialize();
  77   BarrierSet::set_barrier_set(barrier_set);
  78 
  79   // Make up the generations
  80   // Calculate the maximum size that a generation can grow.  This
  81   // includes growth into the other generation.  Note that the
  82   // parameter _max_gen_size is kept as the maximum
  83   // size of the generation as the boundaries currently stand.
  84   // _max_gen_size is still used as that value.
  85   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
  86   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
  87 
  88   _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
  89 
  90   _old_gen = _gens->old_gen();
  91   _young_gen = _gens->young_gen();
  92 
  93   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
  94   const size_t old_capacity = _old_gen->capacity_in_bytes();
  95   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
  96   _size_policy =
  97     new PSAdaptiveSizePolicy(eden_capacity,
  98                              initial_promo_size,
  99                              young_gen()->to_space()->capacity_in_bytes(),
 100                              _collector_policy->gen_alignment(),
 101                              max_gc_pause_sec,
 102                              max_gc_minor_pause_sec,
 103                              GCTimeRatio
 104                              );
 105 
 106   assert(!UseAdaptiveGCBoundary ||
 107     (old_gen()->virtual_space()->high_boundary() ==
 108      young_gen()->virtual_space()->low_boundary()),
 109     "Boundaries must meet");
 110   // initialize the policy counters - 2 collectors, 2 generations
 111   _gc_policy_counters =
 112     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
 113 
 114   // Set up the GCTaskManager
 115   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
 116 
 117   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
 118     return JNI_ENOMEM;
 119   }
 120 
 121   return JNI_OK;
 122 }
 123 
 124 void ParallelScavengeHeap::initialize_serviceability() {
 125 
 126   _eden_pool = new EdenMutableSpacePool(_young_gen,
 127                                         _young_gen->eden_space(),
 128                                         "PS Eden Space",
 129                                         false /* support_usage_threshold */);
 130 
 131   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
 132                                                 "PS Survivor Space",
 133                                                 false /* support_usage_threshold */);
 134 
 135   _old_pool = new PSGenerationPool(_old_gen,
 136                                    "PS Old Gen",
 137                                    true /* support_usage_threshold */);
 138 
 139   _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
 140   _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
 141 
 142   _old_manager->add_pool(_eden_pool);
 143   _old_manager->add_pool(_survivor_pool);
 144   _old_manager->add_pool(_old_pool);
 145 
 146   _young_manager->add_pool(_eden_pool);
 147   _young_manager->add_pool(_survivor_pool);
 148 
 149 }
 150 
 151 void ParallelScavengeHeap::post_initialize() {
 152   CollectedHeap::post_initialize();
 153   // Need to init the tenuring threshold
 154   PSScavenge::initialize();
 155   if (UseParallelOldGC) {
 156     PSParallelCompact::post_initialize();
 157   } else {
 158     PSMarkSweep::initialize();
 159   }
 160   PSPromotionManager::initialize();
 161 }
 162 
 163 void ParallelScavengeHeap::update_counters() {
 164   young_gen()->update_counters();
 165   old_gen()->update_counters();
 166   MetaspaceCounters::update_performance_counters();
 167   CompressedClassSpaceCounters::update_performance_counters();
 168 }
 169 
 170 size_t ParallelScavengeHeap::capacity() const {
 171   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
 172   return value;
 173 }
 174 
 175 size_t ParallelScavengeHeap::used() const {
 176   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
 177   return value;
 178 }
 179 
 180 bool ParallelScavengeHeap::is_maximal_no_gc() const {
 181   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
 182 }
 183 
 184 
 185 size_t ParallelScavengeHeap::max_capacity() const {
 186   size_t estimated = reserved_region().byte_size();
 187   if (UseAdaptiveSizePolicy) {
 188     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
 189   } else {
 190     estimated -= young_gen()->to_space()->capacity_in_bytes();
 191   }
 192   return MAX2(estimated, capacity());
 193 }
 194 
 195 bool ParallelScavengeHeap::is_in(const void* p) const {
 196   return young_gen()->is_in(p) || old_gen()->is_in(p);
 197 }
 198 
 199 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
 200   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
 201 }
 202 
 203 // There are two levels of allocation policy here.
 204 //
 205 // When an allocation request fails, the requesting thread must invoke a VM
 206 // operation, transfer control to the VM thread, and await the results of a
 207 // garbage collection. That is quite expensive, and we should avoid doing it
 208 // multiple times if possible.
 209 //
 210 // To accomplish this, we have a basic allocation policy, and also a
 211 // failed allocation policy.
 212 //
 213 // The basic allocation policy controls how you allocate memory without
 214 // attempting garbage collection. It is okay to grab locks and
 215 // expand the heap, if that can be done without coming to a safepoint.
 216 // It is likely that the basic allocation policy will not be very
 217 // aggressive.
 218 //
 219 // The failed allocation policy is invoked from the VM thread after
 220 // the basic allocation policy is unable to satisfy a mem_allocate
 221 // request. This policy needs to cover the entire range of collection,
 222 // heap expansion, and out-of-memory conditions. It should make every
 223 // attempt to allocate the requested memory.
 224 
 225 // Basic allocation policy. Should never be called at a safepoint, or
 226 // from the VM thread.
 227 //
 228 // This method must handle cases where many mem_allocate requests fail
 229 // simultaneously. When that happens, only one VM operation will succeed,
 230 // and the rest will not be executed. For that reason, this method loops
 231 // during failed allocation attempts. If the java heap becomes exhausted,
 232 // we rely on the size_policy object to force a bail out.
 233 HeapWord* ParallelScavengeHeap::mem_allocate(
 234                                      size_t size,
 235                                      bool* gc_overhead_limit_was_exceeded) {
 236   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
 237   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
 238   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 239 
 240   // In general gc_overhead_limit_was_exceeded should be false so
 241   // set it so here and reset it to true only if the gc time
 242   // limit is being exceeded as checked below.
 243   *gc_overhead_limit_was_exceeded = false;
 244 
 245   HeapWord* result = young_gen()->allocate(size);
 246 
 247   uint loop_count = 0;
 248   uint gc_count = 0;
 249   uint gclocker_stalled_count = 0;
 250 
 251   while (result == NULL) {
 252     // We don't want to have multiple collections for a single filled generation.
 253     // To prevent this, each thread tracks the total_collections() value, and if
 254     // the count has changed, does not do a new collection.
 255     //
 256     // The collection count must be read only while holding the heap lock. VM
 257     // operations also hold the heap lock during collections. There is a lock
 258     // contention case where thread A blocks waiting on the Heap_lock, while
 259     // thread B is holding it doing a collection. When thread A gets the lock,
 260     // the collection count has already changed. To prevent duplicate collections,
 261     // The policy MUST attempt allocations during the same period it reads the
 262     // total_collections() value!
 263     {
 264       MutexLocker ml(Heap_lock);
 265       gc_count = total_collections();
 266 
 267       result = young_gen()->allocate(size);
 268       if (result != NULL) {
 269         return result;
 270       }
 271 
 272       // If certain conditions hold, try allocating from the old gen.
 273       result = mem_allocate_old_gen(size);
 274       if (result != NULL) {
 275         return result;
 276       }
 277 
 278       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 279         return NULL;
 280       }
 281 
 282       // Failed to allocate without a gc.
 283       if (GCLocker::is_active_and_needs_gc()) {
 284         // If this thread is not in a jni critical section, we stall
 285         // the requestor until the critical section has cleared and
 286         // GC allowed. When the critical section clears, a GC is
 287         // initiated by the last thread exiting the critical section; so
 288         // we retry the allocation sequence from the beginning of the loop,
 289         // rather than causing more, now probably unnecessary, GC attempts.
 290         JavaThread* jthr = JavaThread::current();
 291         if (!jthr->in_critical()) {
 292           MutexUnlocker mul(Heap_lock);
 293           GCLocker::stall_until_clear();
 294           gclocker_stalled_count += 1;
 295           continue;
 296         } else {
 297           if (CheckJNICalls) {
 298             fatal("Possible deadlock due to allocating while"
 299                   " in jni critical section");
 300           }
 301           return NULL;
 302         }
 303       }
 304     }
 305 
 306     if (result == NULL) {
 307       // Generate a VM operation
 308       VM_ParallelGCFailedAllocation op(size, gc_count);
 309       VMThread::execute(&op);
 310 
 311       // Did the VM operation execute? If so, return the result directly.
 312       // This prevents us from looping until time out on requests that can
 313       // not be satisfied.
 314       if (op.prologue_succeeded()) {
 315         assert(is_in_or_null(op.result()), "result not in heap");
 316 
 317         // If GC was locked out during VM operation then retry allocation
 318         // and/or stall as necessary.
 319         if (op.gc_locked()) {
 320           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
 321           continue;  // retry and/or stall as necessary
 322         }
 323 
 324         // Exit the loop if the gc time limit has been exceeded.
 325         // The allocation must have failed above ("result" guarding
 326         // this path is NULL) and the most recent collection has exceeded the
 327         // gc overhead limit (although enough may have been collected to
 328         // satisfy the allocation).  Exit the loop so that an out-of-memory
 329         // will be thrown (return a NULL ignoring the contents of
 330         // op.result()),
 331         // but clear gc_overhead_limit_exceeded so that the next collection
 332         // starts with a clean slate (i.e., forgets about previous overhead
 333         // excesses).  Fill op.result() with a filler object so that the
 334         // heap remains parsable.
 335         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 336         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
 337 
 338         if (limit_exceeded && softrefs_clear) {
 339           *gc_overhead_limit_was_exceeded = true;
 340           size_policy()->set_gc_overhead_limit_exceeded(false);
 341           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
 342           if (op.result() != NULL) {
 343             CollectedHeap::fill_with_object(op.result(), size);
 344           }
 345           return NULL;
 346         }
 347 
 348         return op.result();
 349       }
 350     }
 351 
 352     // The policy object will prevent us from looping forever. If the
 353     // time spent in gc crosses a threshold, we will bail out.
 354     loop_count++;
 355     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
 356         (loop_count % QueuedAllocationWarningCount == 0)) {
 357       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
 358       log_warning(gc)("\tsize=" SIZE_FORMAT, size);
 359     }
 360   }
 361 
 362   return result;
 363 }
 364 
 365 // A "death march" is a series of ultra-slow allocations in which a full gc is
 366 // done before each allocation, and after the full gc the allocation still
 367 // cannot be satisfied from the young gen.  This routine detects that condition;
 368 // it should be called after a full gc has been done and the allocation
 369 // attempted from the young gen. The parameter 'addr' should be the result of
 370 // that young gen allocation attempt.
 371 void
 372 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
 373   if (addr != NULL) {
 374     _death_march_count = 0;  // death march has ended
 375   } else if (_death_march_count == 0) {
 376     if (should_alloc_in_eden(size)) {
 377       _death_march_count = 1;    // death march has started
 378     }
 379   }
 380 }
 381 
 382 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
 383   if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
 384     // Size is too big for eden, or gc is locked out.
 385     return old_gen()->allocate(size);
 386   }
 387 
 388   // If a "death march" is in progress, allocate from the old gen a limited
 389   // number of times before doing a GC.
 390   if (_death_march_count > 0) {
 391     if (_death_march_count < 64) {
 392       ++_death_march_count;
 393       return old_gen()->allocate(size);
 394     } else {
 395       _death_march_count = 0;
 396     }
 397   }
 398   return NULL;
 399 }
 400 
 401 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
 402   if (UseParallelOldGC) {
 403     // The do_full_collection() parameter clear_all_soft_refs
 404     // is interpreted here as maximum_compaction which will
 405     // cause SoftRefs to be cleared.
 406     bool maximum_compaction = clear_all_soft_refs;
 407     PSParallelCompact::invoke(maximum_compaction);
 408   } else {
 409     PSMarkSweep::invoke(clear_all_soft_refs);
 410   }
 411 }
 412 
 413 // Failed allocation policy. Must be called from the VM thread, and
 414 // only at a safepoint! Note that this method has policy for allocation
 415 // flow, and NOT collection policy. So we do not check for gc collection
 416 // time over limit here, that is the responsibility of the heap specific
 417 // collection methods. This method decides where to attempt allocations,
 418 // and when to attempt collections, but no collection specific policy.
 419 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
 420   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 421   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 422   assert(!is_gc_active(), "not reentrant");
 423   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 424 
 425   // We assume that allocation in eden will fail unless we collect.
 426 
 427   // First level allocation failure, scavenge and allocate in young gen.
 428   GCCauseSetter gccs(this, GCCause::_allocation_failure);
 429   const bool invoked_full_gc = PSScavenge::invoke();
 430   HeapWord* result = young_gen()->allocate(size);
 431 
 432   // Second level allocation failure.
 433   //   Mark sweep and allocate in young generation.
 434   if (result == NULL && !invoked_full_gc) {
 435     do_full_collection(false);
 436     result = young_gen()->allocate(size);
 437   }
 438 
 439   death_march_check(result, size);
 440 
 441   // Third level allocation failure.
 442   //   After mark sweep and young generation allocation failure,
 443   //   allocate in old generation.
 444   if (result == NULL) {
 445     result = old_gen()->allocate(size);
 446   }
 447 
 448   // Fourth level allocation failure. We're running out of memory.
 449   //   More complete mark sweep and allocate in young generation.
 450   if (result == NULL) {
 451     do_full_collection(true);
 452     result = young_gen()->allocate(size);
 453   }
 454 
 455   // Fifth level allocation failure.
 456   //   After more complete mark sweep, allocate in old generation.
 457   if (result == NULL) {
 458     result = old_gen()->allocate(size);
 459   }
 460 
 461   return result;
 462 }
 463 
 464 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
 465   CollectedHeap::ensure_parsability(retire_tlabs);
 466   young_gen()->eden_space()->ensure_parsability();
 467 }
 468 
 469 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
 470   return young_gen()->eden_space()->tlab_capacity(thr);
 471 }
 472 
 473 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
 474   return young_gen()->eden_space()->tlab_used(thr);
 475 }
 476 
 477 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 478   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
 479 }
 480 
 481 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
 482   return young_gen()->allocate(size);
 483 }
 484 
 485 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
 486   CollectedHeap::accumulate_statistics_all_tlabs();
 487 }
 488 
 489 void ParallelScavengeHeap::resize_all_tlabs() {
 490   CollectedHeap::resize_all_tlabs();
 491 }
 492 
 493 // This method is used by System.gc() and JVMTI.
 494 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
 495   assert(!Heap_lock->owned_by_self(),
 496     "this thread should not own the Heap_lock");
 497 
 498   uint gc_count      = 0;
 499   uint full_gc_count = 0;
 500   {
 501     MutexLocker ml(Heap_lock);
 502     // This value is guarded by the Heap_lock
 503     gc_count      = total_collections();
 504     full_gc_count = total_full_collections();
 505   }
 506 
 507   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
 508   VMThread::execute(&op);
 509 }
 510 
 511 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
 512   young_gen()->object_iterate(cl);
 513   old_gen()->object_iterate(cl);
 514 }
 515 
 516 
 517 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
 518   if (young_gen()->is_in_reserved(addr)) {
 519     assert(young_gen()->is_in(addr),
 520            "addr should be in allocated part of young gen");
 521     // called from os::print_location by find or VMError
 522     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
 523     Unimplemented();
 524   } else if (old_gen()->is_in_reserved(addr)) {
 525     assert(old_gen()->is_in(addr),
 526            "addr should be in allocated part of old gen");
 527     return old_gen()->start_array()->object_start((HeapWord*)addr);
 528   }
 529   return 0;
 530 }
 531 
 532 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
 533   return oop(addr)->size();
 534 }
 535 
 536 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
 537   return block_start(addr) == addr;
 538 }
 539 
 540 jlong ParallelScavengeHeap::millis_since_last_gc() {
 541   return UseParallelOldGC ?
 542     PSParallelCompact::millis_since_last_gc() :
 543     PSMarkSweep::millis_since_last_gc();
 544 }
 545 
 546 void ParallelScavengeHeap::prepare_for_verify() {
 547   ensure_parsability(false);  // no need to retire TLABs for verification
 548 }
 549 
 550 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
 551   PSOldGen* old = old_gen();
 552   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
 553   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
 554   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
 555 
 556   PSYoungGen* young = young_gen();
 557   VirtualSpaceSummary young_summary(young->reserved().start(),
 558     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
 559 
 560   MutableSpace* eden = young_gen()->eden_space();
 561   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
 562 
 563   MutableSpace* from = young_gen()->from_space();
 564   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
 565 
 566   MutableSpace* to = young_gen()->to_space();
 567   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
 568 
 569   VirtualSpaceSummary heap_summary = create_heap_space_summary();
 570   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
 571 }
 572 
 573 void ParallelScavengeHeap::print_on(outputStream* st) const {
 574   young_gen()->print_on(st);
 575   old_gen()->print_on(st);
 576   MetaspaceUtils::print_on(st);
 577 }
 578 
 579 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
 580   this->CollectedHeap::print_on_error(st);
 581 
 582   if (UseParallelOldGC) {
 583     st->cr();
 584     PSParallelCompact::print_on_error(st);
 585   }
 586 }
 587 
 588 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
 589   PSScavenge::gc_task_manager()->threads_do(tc);
 590 }
 591 
 592 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
 593   PSScavenge::gc_task_manager()->print_threads_on(st);
 594 }
 595 
 596 void ParallelScavengeHeap::print_tracing_info() const {
 597   AdaptiveSizePolicyOutput::print();
 598   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
 599   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
 600       UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds());
 601 }
 602 
 603 
 604 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
 605   // Why do we need the total_collections()-filter below?
 606   if (total_collections() > 0) {
 607     log_debug(gc, verify)("Tenured");
 608     old_gen()->verify();
 609 
 610     log_debug(gc, verify)("Eden");
 611     young_gen()->verify();
 612   }
 613 }
 614 
 615 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
 616   const PSHeapSummary& heap_summary = create_ps_heap_summary();
 617   gc_tracer->report_gc_heap_summary(when, heap_summary);
 618 
 619   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
 620   gc_tracer->report_metaspace_summary(when, metaspace_summary);
 621 }
 622 
 623 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
 624   CollectedHeap* heap = Universe::heap();
 625   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
 626   assert(heap->kind() == CollectedHeap::Parallel, "Invalid name");
 627   return (ParallelScavengeHeap*)heap;
 628 }
 629 
 630 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
 631   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
 632 }
 633 
 634 PSCardTable* ParallelScavengeHeap::card_table() {
 635   return static_cast<PSCardTable*>(barrier_set()->card_table());
 636 }
 637 
 638 // Before delegating the resize to the young generation,
 639 // the reserved space for the young and old generations
 640 // may be changed to accommodate the desired resize.
 641 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
 642     size_t survivor_size) {
 643   if (UseAdaptiveGCBoundary) {
 644     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 645       size_policy()->reset_bytes_absorbed_from_eden();
 646       return;  // The generation changed size already.
 647     }
 648     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
 649   }
 650 
 651   // Delegate the resize to the generation.
 652   _young_gen->resize(eden_size, survivor_size);
 653 }
 654 
 655 // Before delegating the resize to the old generation,
 656 // the reserved space for the young and old generations
 657 // may be changed to accommodate the desired resize.
 658 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
 659   if (UseAdaptiveGCBoundary) {
 660     if (size_policy()->bytes_absorbed_from_eden() != 0) {
 661       size_policy()->reset_bytes_absorbed_from_eden();
 662       return;  // The generation changed size already.
 663     }
 664     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
 665   }
 666 
 667   // Delegate the resize to the generation.
 668   _old_gen->resize(desired_free_space);
 669 }
 670 
 671 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
 672   // nothing particular
 673 }
 674 
 675 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
 676   // nothing particular
 677 }
 678 
 679 #ifndef PRODUCT
 680 void ParallelScavengeHeap::record_gen_tops_before_GC() {
 681   if (ZapUnusedHeapArea) {
 682     young_gen()->record_spaces_top();
 683     old_gen()->record_spaces_top();
 684   }
 685 }
 686 
 687 void ParallelScavengeHeap::gen_mangle_unused_area() {
 688   if (ZapUnusedHeapArea) {
 689     young_gen()->eden_space()->mangle_unused_area();
 690     young_gen()->to_space()->mangle_unused_area();
 691     young_gen()->from_space()->mangle_unused_area();
 692     old_gen()->object_space()->mangle_unused_area();
 693   }
 694 }
 695 #endif
 696 
 697 bool ParallelScavengeHeap::is_scavengable(oop obj) {
 698   return is_in_young(obj);
 699 }
 700 
 701 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
 702   CodeCache::register_scavenge_root_nmethod(nm);
 703 }
 704 
 705 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
 706   CodeCache::verify_scavenge_root_nmethod(nm);
 707 }
 708 
 709 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
 710   GrowableArray<GCMemoryManager*> memory_managers(2);
 711   memory_managers.append(_young_manager);
 712   memory_managers.append(_old_manager);
 713   return memory_managers;
 714 }
 715 
 716 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
 717   GrowableArray<MemoryPool*> memory_pools(3);
 718   memory_pools.append(_eden_pool);
 719   memory_pools.append(_survivor_pool);
 720   memory_pools.append(_old_pool);
 721   return memory_pools;
 722 }