1 /*
  2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "gc/parallel/adjoiningGenerations.hpp"
 28 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 29 #include "gc/parallel/cardTableExtension.hpp"
 30 #include "gc/parallel/gcTaskManager.hpp"
 31 #include "gc/parallel/generationSizer.hpp"
 32 #include "gc/parallel/objectStartArray.inline.hpp"
 33 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 34 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 35 #include "gc/parallel/psMarkSweep.hpp"
 36 #include "gc/parallel/psMemoryPool.hpp"
 37 #include "gc/parallel/psParallelCompact.inline.hpp"
 38 #include "gc/parallel/psPromotionManager.hpp"
 39 #include "gc/parallel/psScavenge.hpp"
 40 #include "gc/parallel/vmPSOperations.hpp"
 41 #include "gc/shared/gcHeapSummary.hpp"
 42 #include "gc/shared/gcLocker.inline.hpp"
 43 #include "gc/shared/gcWhen.hpp"
 44 #include "logging/log.hpp"
 45 #include "oops/oop.inline.hpp"
 46 #include "runtime/handles.inline.hpp"
 47 #include "runtime/java.hpp"
 48 #include "runtime/vmThread.hpp"
 49 #include "services/memoryManager.hpp"
 50 #include "services/memTracker.hpp"
 51 #include "utilities/vmError.hpp"
 52 
 53 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
 54 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
 55 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
 56 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
 57 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
 58 
 59 jint ParallelScavengeHeap::initialize() {
 60   const size_t heap_size = _collector_policy->max_heap_byte_size();
 61 
 62   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
 63 
 64   os::trace_page_sizes("Heap",
 65                        _collector_policy->min_heap_byte_size(),
 66                        heap_size,
 67                        generation_alignment(),
 68                        heap_rs.base(),
 69                        heap_rs.size());
 70 
 71   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 72 
 73   CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
 74   barrier_set->initialize();
 75   set_barrier_set(barrier_set);
 76 
 77   // Make up the generations
 78   // Calculate the maximum size that a generation can grow.  This
 79   // includes growth into the other generation.  Note that the
 80   // parameter _max_gen_size is kept as the maximum
 81   // size of the generation as the boundaries currently stand.
 82   // _max_gen_size is still used as that value.
 83   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 84   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 85 
 86   _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
 87 
 88   _old_gen = _gens->old_gen();
 89   _young_gen = _gens->young_gen();
 90 
 91   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
 92   const size_t old_capacity = _old_gen->capacity_in_bytes();
 93   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
 94   _size_policy =
 95     new PSAdaptiveSizePolicy(eden_capacity,
 96                              initial_promo_size,
 97                              young_gen()->to_space()->capacity_in_bytes(),
 98                              _collector_policy->gen_alignment(),
 99                              max_gc_pause_sec,
100                              max_gc_minor_pause_sec,
101                              GCTimeRatio
102                              );
103 
104   assert(!UseAdaptiveGCBoundary ||
105     (old_gen()->virtual_space()->high_boundary() ==
106      young_gen()->virtual_space()->low_boundary()),
107     "Boundaries must meet");
108   // initialize the policy counters - 2 collectors, 2 generations
109   _gc_policy_counters =
110     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
111 
112   // Set up the GCTaskManager
113   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
114 
115   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
116     return JNI_ENOMEM;
117   }
118 
119   return JNI_OK;
120 }
121 
122 void ParallelScavengeHeap::initialize_serviceability() {
123 
124   _eden_pool = new EdenMutableSpacePool(_young_gen,
125                                         _young_gen->eden_space(),
126                                         "PS Eden Space",
127                                         false /* support_usage_threshold */);
128 
129   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
130                                                 "PS Survivor Space",
131                                                 false /* support_usage_threshold */);
132 
133   _old_pool = new PSGenerationPool(_old_gen,
134                                    "PS Old Gen",
135                                    true /* support_usage_threshold */);
136 
137   _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
138   _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
139 
140   _old_manager->add_pool(_eden_pool);
141   _old_manager->add_pool(_survivor_pool);
142   _old_manager->add_pool(_old_pool);
143 
144   _young_manager->add_pool(_eden_pool);
145   _young_manager->add_pool(_survivor_pool);
146 
147 }
148 
149 void ParallelScavengeHeap::post_initialize() {
150   CollectedHeap::post_initialize();
151   // Need to init the tenuring threshold
152   PSScavenge::initialize();
153   if (UseParallelOldGC) {
154     PSParallelCompact::post_initialize();
155   } else {
156     PSMarkSweep::initialize();
157   }
158   PSPromotionManager::initialize();
159 }
160 
161 void ParallelScavengeHeap::update_counters() {
162   young_gen()->update_counters();
163   old_gen()->update_counters();
164   MetaspaceCounters::update_performance_counters();
165   CompressedClassSpaceCounters::update_performance_counters();
166 }
167 
168 size_t ParallelScavengeHeap::capacity() const {
169   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
170   return value;
171 }
172 
173 size_t ParallelScavengeHeap::used() const {
174   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
175   return value;
176 }
177 
178 bool ParallelScavengeHeap::is_maximal_no_gc() const {
179   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
180 }
181 
182 
183 size_t ParallelScavengeHeap::max_capacity() const {
184   size_t estimated = reserved_region().byte_size();
185   if (UseAdaptiveSizePolicy) {
186     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
187   } else {
188     estimated -= young_gen()->to_space()->capacity_in_bytes();
189   }
190   return MAX2(estimated, capacity());
191 }
192 
193 bool ParallelScavengeHeap::is_in(const void* p) const {
194   return young_gen()->is_in(p) || old_gen()->is_in(p);
195 }
196 
197 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
198   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
199 }
200 
201 // There are two levels of allocation policy here.
202 //
203 // When an allocation request fails, the requesting thread must invoke a VM
204 // operation, transfer control to the VM thread, and await the results of a
205 // garbage collection. That is quite expensive, and we should avoid doing it
206 // multiple times if possible.
207 //
208 // To accomplish this, we have a basic allocation policy, and also a
209 // failed allocation policy.
210 //
211 // The basic allocation policy controls how you allocate memory without
212 // attempting garbage collection. It is okay to grab locks and
213 // expand the heap, if that can be done without coming to a safepoint.
214 // It is likely that the basic allocation policy will not be very
215 // aggressive.
216 //
217 // The failed allocation policy is invoked from the VM thread after
218 // the basic allocation policy is unable to satisfy a mem_allocate
219 // request. This policy needs to cover the entire range of collection,
220 // heap expansion, and out-of-memory conditions. It should make every
221 // attempt to allocate the requested memory.
222 
223 // Basic allocation policy. Should never be called at a safepoint, or
224 // from the VM thread.
225 //
226 // This method must handle cases where many mem_allocate requests fail
227 // simultaneously. When that happens, only one VM operation will succeed,
228 // and the rest will not be executed. For that reason, this method loops
229 // during failed allocation attempts. If the java heap becomes exhausted,
230 // we rely on the size_policy object to force a bail out.
231 HeapWord* ParallelScavengeHeap::mem_allocate(
232                                      size_t size,
233                                      bool* gc_overhead_limit_was_exceeded) {
234   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
235   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
236   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
237 
238   // In general gc_overhead_limit_was_exceeded should be false so
239   // set it so here and reset it to true only if the gc time
240   // limit is being exceeded as checked below.
241   *gc_overhead_limit_was_exceeded = false;
242 
243   HeapWord* result = young_gen()->allocate(size);
244 
245   uint loop_count = 0;
246   uint gc_count = 0;
247   uint gclocker_stalled_count = 0;
248 
249   while (result == NULL) {
250     // We don't want to have multiple collections for a single filled generation.
251     // To prevent this, each thread tracks the total_collections() value, and if
252     // the count has changed, does not do a new collection.
253     //
254     // The collection count must be read only while holding the heap lock. VM
255     // operations also hold the heap lock during collections. There is a lock
256     // contention case where thread A blocks waiting on the Heap_lock, while
257     // thread B is holding it doing a collection. When thread A gets the lock,
258     // the collection count has already changed. To prevent duplicate collections,
259     // The policy MUST attempt allocations during the same period it reads the
260     // total_collections() value!
261     {
262       MutexLocker ml(Heap_lock);
263       gc_count = total_collections();
264 
265       result = young_gen()->allocate(size);
266       if (result != NULL) {
267         return result;
268       }
269 
270       // If certain conditions hold, try allocating from the old gen.
271       result = mem_allocate_old_gen(size);
272       if (result != NULL) {
273         return result;
274       }
275 
276       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
277         return NULL;
278       }
279 
280       // Failed to allocate without a gc.
281       if (GCLocker::is_active_and_needs_gc()) {
282         // If this thread is not in a jni critical section, we stall
283         // the requestor until the critical section has cleared and
284         // GC allowed. When the critical section clears, a GC is
285         // initiated by the last thread exiting the critical section; so
286         // we retry the allocation sequence from the beginning of the loop,
287         // rather than causing more, now probably unnecessary, GC attempts.
288         JavaThread* jthr = JavaThread::current();
289         if (!jthr->in_critical()) {
290           MutexUnlocker mul(Heap_lock);
291           GCLocker::stall_until_clear();
292           gclocker_stalled_count += 1;
293           continue;
294         } else {
295           if (CheckJNICalls) {
296             fatal("Possible deadlock due to allocating while"
297                   " in jni critical section");
298           }
299           return NULL;
300         }
301       }
302     }
303 
304     if (result == NULL) {
305       // Generate a VM operation
306       VM_ParallelGCFailedAllocation op(size, gc_count);
307       VMThread::execute(&op);
308 
309       // Did the VM operation execute? If so, return the result directly.
310       // This prevents us from looping until time out on requests that can
311       // not be satisfied.
312       if (op.prologue_succeeded()) {
313         assert(is_in_or_null(op.result()), "result not in heap");
314 
315         // If GC was locked out during VM operation then retry allocation
316         // and/or stall as necessary.
317         if (op.gc_locked()) {
318           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
319           continue;  // retry and/or stall as necessary
320         }
321 
322         // Exit the loop if the gc time limit has been exceeded.
323         // The allocation must have failed above ("result" guarding
324         // this path is NULL) and the most recent collection has exceeded the
325         // gc overhead limit (although enough may have been collected to
326         // satisfy the allocation).  Exit the loop so that an out-of-memory
327         // will be thrown (return a NULL ignoring the contents of
328         // op.result()),
329         // but clear gc_overhead_limit_exceeded so that the next collection
330         // starts with a clean slate (i.e., forgets about previous overhead
331         // excesses).  Fill op.result() with a filler object so that the
332         // heap remains parsable.
333         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
334         const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
335 
336         if (limit_exceeded && softrefs_clear) {
337           *gc_overhead_limit_was_exceeded = true;
338           size_policy()->set_gc_overhead_limit_exceeded(false);
339           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
340           if (op.result() != NULL) {
341             CollectedHeap::fill_with_object(op.result(), size);
342           }
343           return NULL;
344         }
345 
346         return op.result();
347       }
348     }
349 
350     // The policy object will prevent us from looping forever. If the
351     // time spent in gc crosses a threshold, we will bail out.
352     loop_count++;
353     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
354         (loop_count % QueuedAllocationWarningCount == 0)) {
355       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
356       log_warning(gc)("\tsize=" SIZE_FORMAT, size);
357     }
358   }
359 
360   return result;
361 }
362 
363 // A "death march" is a series of ultra-slow allocations in which a full gc is
364 // done before each allocation, and after the full gc the allocation still
365 // cannot be satisfied from the young gen.  This routine detects that condition;
366 // it should be called after a full gc has been done and the allocation
367 // attempted from the young gen. The parameter 'addr' should be the result of
368 // that young gen allocation attempt.
369 void
370 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
371   if (addr != NULL) {
372     _death_march_count = 0;  // death march has ended
373   } else if (_death_march_count == 0) {
374     if (should_alloc_in_eden(size)) {
375       _death_march_count = 1;    // death march has started
376     }
377   }
378 }
379 
380 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
381   if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
382     // Size is too big for eden, or gc is locked out.
383     return old_gen()->allocate(size);
384   }
385 
386   // If a "death march" is in progress, allocate from the old gen a limited
387   // number of times before doing a GC.
388   if (_death_march_count > 0) {
389     if (_death_march_count < 64) {
390       ++_death_march_count;
391       return old_gen()->allocate(size);
392     } else {
393       _death_march_count = 0;
394     }
395   }
396   return NULL;
397 }
398 
399 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
400   if (UseParallelOldGC) {
401     // The do_full_collection() parameter clear_all_soft_refs
402     // is interpreted here as maximum_compaction which will
403     // cause SoftRefs to be cleared.
404     bool maximum_compaction = clear_all_soft_refs;
405     PSParallelCompact::invoke(maximum_compaction);
406   } else {
407     PSMarkSweep::invoke(clear_all_soft_refs);
408   }
409 }
410 
411 // Failed allocation policy. Must be called from the VM thread, and
412 // only at a safepoint! Note that this method has policy for allocation
413 // flow, and NOT collection policy. So we do not check for gc collection
414 // time over limit here, that is the responsibility of the heap specific
415 // collection methods. This method decides where to attempt allocations,
416 // and when to attempt collections, but no collection specific policy.
417 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
418   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
419   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
420   assert(!is_gc_active(), "not reentrant");
421   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
422 
423   // We assume that allocation in eden will fail unless we collect.
424 
425   // First level allocation failure, scavenge and allocate in young gen.
426   GCCauseSetter gccs(this, GCCause::_allocation_failure);
427   const bool invoked_full_gc = PSScavenge::invoke();
428   HeapWord* result = young_gen()->allocate(size);
429 
430   // Second level allocation failure.
431   //   Mark sweep and allocate in young generation.
432   if (result == NULL && !invoked_full_gc) {
433     do_full_collection(false);
434     result = young_gen()->allocate(size);
435   }
436 
437   death_march_check(result, size);
438 
439   // Third level allocation failure.
440   //   After mark sweep and young generation allocation failure,
441   //   allocate in old generation.
442   if (result == NULL) {
443     result = old_gen()->allocate(size);
444   }
445 
446   // Fourth level allocation failure. We're running out of memory.
447   //   More complete mark sweep and allocate in young generation.
448   if (result == NULL) {
449     do_full_collection(true);
450     result = young_gen()->allocate(size);
451   }
452 
453   // Fifth level allocation failure.
454   //   After more complete mark sweep, allocate in old generation.
455   if (result == NULL) {
456     result = old_gen()->allocate(size);
457   }
458 
459   return result;
460 }
461 
462 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
463   CollectedHeap::ensure_parsability(retire_tlabs);
464   young_gen()->eden_space()->ensure_parsability();
465 }
466 
467 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
468   return young_gen()->eden_space()->tlab_capacity(thr);
469 }
470 
471 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
472   return young_gen()->eden_space()->tlab_used(thr);
473 }
474 
475 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
476   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
477 }
478 
479 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
480   return young_gen()->allocate(size);
481 }
482 
483 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
484   CollectedHeap::accumulate_statistics_all_tlabs();
485 }
486 
487 void ParallelScavengeHeap::resize_all_tlabs() {
488   CollectedHeap::resize_all_tlabs();
489 }
490 
491 // This method is used by System.gc() and JVMTI.
492 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
493   assert(!Heap_lock->owned_by_self(),
494     "this thread should not own the Heap_lock");
495 
496   uint gc_count      = 0;
497   uint full_gc_count = 0;
498   {
499     MutexLocker ml(Heap_lock);
500     // This value is guarded by the Heap_lock
501     gc_count      = total_collections();
502     full_gc_count = total_full_collections();
503   }
504 
505   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
506   VMThread::execute(&op);
507 }
508 
509 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
510   young_gen()->object_iterate(cl);
511   old_gen()->object_iterate(cl);
512 }
513 
514 
515 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
516   if (young_gen()->is_in_reserved(addr)) {
517     assert(young_gen()->is_in(addr),
518            "addr should be in allocated part of young gen");
519     // called from os::print_location by find or VMError
520     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
521     Unimplemented();
522   } else if (old_gen()->is_in_reserved(addr)) {
523     assert(old_gen()->is_in(addr),
524            "addr should be in allocated part of old gen");
525     return old_gen()->start_array()->object_start((HeapWord*)addr);
526   }
527   return 0;
528 }
529 
530 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
531   return oop(addr)->size();
532 }
533 
534 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
535   return block_start(addr) == addr;
536 }
537 
538 jlong ParallelScavengeHeap::millis_since_last_gc() {
539   return UseParallelOldGC ?
540     PSParallelCompact::millis_since_last_gc() :
541     PSMarkSweep::millis_since_last_gc();
542 }
543 
544 void ParallelScavengeHeap::prepare_for_verify() {
545   ensure_parsability(false);  // no need to retire TLABs for verification
546 }
547 
548 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
549   PSOldGen* old = old_gen();
550   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
551   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
552   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
553 
554   PSYoungGen* young = young_gen();
555   VirtualSpaceSummary young_summary(young->reserved().start(),
556     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
557 
558   MutableSpace* eden = young_gen()->eden_space();
559   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
560 
561   MutableSpace* from = young_gen()->from_space();
562   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
563 
564   MutableSpace* to = young_gen()->to_space();
565   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
566 
567   VirtualSpaceSummary heap_summary = create_heap_space_summary();
568   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
569 }
570 
571 void ParallelScavengeHeap::print_on(outputStream* st) const {
572   young_gen()->print_on(st);
573   old_gen()->print_on(st);
574   MetaspaceAux::print_on(st);
575 }
576 
577 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
578   this->CollectedHeap::print_on_error(st);
579 
580   if (UseParallelOldGC) {
581     st->cr();
582     PSParallelCompact::print_on_error(st);
583   }
584 }
585 
586 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
587   PSScavenge::gc_task_manager()->threads_do(tc);
588 }
589 
590 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
591   PSScavenge::gc_task_manager()->print_threads_on(st);
592 }
593 
594 void ParallelScavengeHeap::print_tracing_info() const {
595   AdaptiveSizePolicyOutput::print();
596   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
597   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
598       UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds());
599 }
600 
601 
602 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
603   // Why do we need the total_collections()-filter below?
604   if (total_collections() > 0) {
605     log_debug(gc, verify)("Tenured");
606     old_gen()->verify();
607 
608     log_debug(gc, verify)("Eden");
609     young_gen()->verify();
610   }
611 }
612 
613 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
614   const PSHeapSummary& heap_summary = create_ps_heap_summary();
615   gc_tracer->report_gc_heap_summary(when, heap_summary);
616 
617   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
618   gc_tracer->report_metaspace_summary(when, metaspace_summary);
619 }
620 
621 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
622   CollectedHeap* heap = Universe::heap();
623   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
624   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
625   return (ParallelScavengeHeap*)heap;
626 }
627 
628 // Before delegating the resize to the young generation,
629 // the reserved space for the young and old generations
630 // may be changed to accommodate the desired resize.
631 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
632     size_t survivor_size) {
633   if (UseAdaptiveGCBoundary) {
634     if (size_policy()->bytes_absorbed_from_eden() != 0) {
635       size_policy()->reset_bytes_absorbed_from_eden();
636       return;  // The generation changed size already.
637     }
638     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
639   }
640 
641   // Delegate the resize to the generation.
642   _young_gen->resize(eden_size, survivor_size);
643 }
644 
645 // Before delegating the resize to the old generation,
646 // the reserved space for the young and old generations
647 // may be changed to accommodate the desired resize.
648 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
649   if (UseAdaptiveGCBoundary) {
650     if (size_policy()->bytes_absorbed_from_eden() != 0) {
651       size_policy()->reset_bytes_absorbed_from_eden();
652       return;  // The generation changed size already.
653     }
654     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
655   }
656 
657   // Delegate the resize to the generation.
658   _old_gen->resize(desired_free_space);
659 }
660 
661 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
662   // nothing particular
663 }
664 
665 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
666   // nothing particular
667 }
668 
669 #ifndef PRODUCT
670 void ParallelScavengeHeap::record_gen_tops_before_GC() {
671   if (ZapUnusedHeapArea) {
672     young_gen()->record_spaces_top();
673     old_gen()->record_spaces_top();
674   }
675 }
676 
677 void ParallelScavengeHeap::gen_mangle_unused_area() {
678   if (ZapUnusedHeapArea) {
679     young_gen()->eden_space()->mangle_unused_area();
680     young_gen()->to_space()->mangle_unused_area();
681     young_gen()->from_space()->mangle_unused_area();
682     old_gen()->object_space()->mangle_unused_area();
683   }
684 }
685 #endif
686 
687 bool ParallelScavengeHeap::is_scavengable(oop obj) {
688   return is_in_young(obj);
689 }
690 
691 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
692   CodeCache::register_scavenge_root_nmethod(nm);
693 }
694 
695 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
696   CodeCache::verify_scavenge_root_nmethod(nm);
697 }
698 
699 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
700   GrowableArray<GCMemoryManager*> memory_managers(2);
701   memory_managers.append(_young_manager);
702   memory_managers.append(_old_manager);
703   return memory_managers;
704 }
705 
706 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
707   GrowableArray<MemoryPool*> memory_pools(3);
708   memory_pools.append(_eden_pool);
709   memory_pools.append(_survivor_pool);
710   memory_pools.append(_old_pool);
711   return memory_pools;
712 }