1 /*
  2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "gc/parallel/adjoiningGenerations.hpp"
 28 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 29 #include "gc/parallel/gcTaskManager.hpp"
 30 #include "gc/parallel/generationSizer.hpp"
 31 #include "gc/parallel/objectStartArray.inline.hpp"
 32 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 34 #include "gc/parallel/psMarkSweep.hpp"
 35 #include "gc/parallel/psMemoryPool.hpp"
 36 #include "gc/parallel/psParallelCompact.inline.hpp"
 37 #include "gc/parallel/psPromotionManager.hpp"
 38 #include "gc/parallel/psScavenge.hpp"
 39 #include "gc/parallel/vmPSOperations.hpp"
 40 #include "gc/shared/gcHeapSummary.hpp"
 41 #include "gc/shared/gcLocker.inline.hpp"
 42 #include "gc/shared/gcWhen.hpp"
 43 #include "logging/log.hpp"
 44 #include "oops/oop.inline.hpp"
 45 #include "runtime/handles.inline.hpp"
 46 #include "runtime/java.hpp"
 47 #include "runtime/vmThread.hpp"
 48 #include "services/memoryManager.hpp"
 49 #include "services/memTracker.hpp"
 50 #include "utilities/vmError.hpp"
 51 
 52 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
 53 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
 54 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
 55 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
 56 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
 57 
 58 jint ParallelScavengeHeap::initialize() {
 59   const size_t heap_size = _collector_policy->max_heap_byte_size();
 60 
 61   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
 62 
 63   os::trace_page_sizes("Heap",
 64                        _collector_policy->min_heap_byte_size(),
 65                        heap_size,
 66                        generation_alignment(),
 67                        heap_rs.base(),
 68                        heap_rs.size());
 69 
 70   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 71 
 72   PSCardTable* card_table = new PSCardTable(reserved_region());
 73   card_table->initialize();
 74   CardTableModRefBS* const barrier_set = new CardTableModRefBS(card_table);
 75   barrier_set->initialize();
 76   set_barrier_set(barrier_set);
 77 
 78   // Make up the generations
 79   // Calculate the maximum size that a generation can grow.  This
 80   // includes growth into the other generation.  Note that the
 81   // parameter _max_gen_size is kept as the maximum
 82   // size of the generation as the boundaries currently stand.
 83   // _max_gen_size is still used as that value.
 84   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 85   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 86 
 87   _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
 88 
 89   _old_gen = _gens->old_gen();
 90   _young_gen = _gens->young_gen();
 91 
 92   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
 93   const size_t old_capacity = _old_gen->capacity_in_bytes();
 94   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
 95   _size_policy =
 96     new PSAdaptiveSizePolicy(eden_capacity,
 97                              initial_promo_size,
 98                              young_gen()->to_space()->capacity_in_bytes(),
 99                              _collector_policy->gen_alignment(),
100                              max_gc_pause_sec,
101                              max_gc_minor_pause_sec,
102                              GCTimeRatio
103                              );
104 
105   assert(!UseAdaptiveGCBoundary ||
106     (old_gen()->virtual_space()->high_boundary() ==
107      young_gen()->virtual_space()->low_boundary()),
108     "Boundaries must meet");
109   // initialize the policy counters - 2 collectors, 2 generations
110   _gc_policy_counters =
111     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
112 
113   // Set up the GCTaskManager
114   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
115 
116   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
117     return JNI_ENOMEM;
118   }
119 
120   return JNI_OK;
121 }
122 
123 void ParallelScavengeHeap::initialize_serviceability() {
124 
125   _eden_pool = new EdenMutableSpacePool(_young_gen,
126                                         _young_gen->eden_space(),
127                                         "PS Eden Space",
128                                         false /* support_usage_threshold */);
129 
130   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
131                                                 "PS Survivor Space",
132                                                 false /* support_usage_threshold */);
133 
134   _old_pool = new PSGenerationPool(_old_gen,
135                                    "PS Old Gen",
136                                    true /* support_usage_threshold */);
137 
138   _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
139   _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
140 
141   _old_manager->add_pool(_eden_pool);
142   _old_manager->add_pool(_survivor_pool);
143   _old_manager->add_pool(_old_pool);
144 
145   _young_manager->add_pool(_eden_pool);
146   _young_manager->add_pool(_survivor_pool);
147 
148 }
149 
150 void ParallelScavengeHeap::post_initialize() {
151   CollectedHeap::post_initialize();
152   // Need to init the tenuring threshold
153   PSScavenge::initialize();
154   if (UseParallelOldGC) {
155     PSParallelCompact::post_initialize();
156   } else {
157     PSMarkSweep::initialize();
158   }
159   PSPromotionManager::initialize();
160 }
161 
162 void ParallelScavengeHeap::update_counters() {
163   young_gen()->update_counters();
164   old_gen()->update_counters();
165   MetaspaceCounters::update_performance_counters();
166   CompressedClassSpaceCounters::update_performance_counters();
167 }
168 
169 size_t ParallelScavengeHeap::capacity() const {
170   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
171   return value;
172 }
173 
174 size_t ParallelScavengeHeap::used() const {
175   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
176   return value;
177 }
178 
179 bool ParallelScavengeHeap::is_maximal_no_gc() const {
180   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
181 }
182 
183 
184 size_t ParallelScavengeHeap::max_capacity() const {
185   size_t estimated = reserved_region().byte_size();
186   if (UseAdaptiveSizePolicy) {
187     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
188   } else {
189     estimated -= young_gen()->to_space()->capacity_in_bytes();
190   }
191   return MAX2(estimated, capacity());
192 }
193 
194 bool ParallelScavengeHeap::is_in(const void* p) const {
195   return young_gen()->is_in(p) || old_gen()->is_in(p);
196 }
197 
198 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
199   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
200 }
201 
202 // There are two levels of allocation policy here.
203 //
204 // When an allocation request fails, the requesting thread must invoke a VM
205 // operation, transfer control to the VM thread, and await the results of a
206 // garbage collection. That is quite expensive, and we should avoid doing it
207 // multiple times if possible.
208 //
209 // To accomplish this, we have a basic allocation policy, and also a
210 // failed allocation policy.
211 //
212 // The basic allocation policy controls how you allocate memory without
213 // attempting garbage collection. It is okay to grab locks and
214 // expand the heap, if that can be done without coming to a safepoint.
215 // It is likely that the basic allocation policy will not be very
216 // aggressive.
217 //
218 // The failed allocation policy is invoked from the VM thread after
219 // the basic allocation policy is unable to satisfy a mem_allocate
220 // request. This policy needs to cover the entire range of collection,
221 // heap expansion, and out-of-memory conditions. It should make every
222 // attempt to allocate the requested memory.
223 
224 // Basic allocation policy. Should never be called at a safepoint, or
225 // from the VM thread.
226 //
227 // This method must handle cases where many mem_allocate requests fail
228 // simultaneously. When that happens, only one VM operation will succeed,
229 // and the rest will not be executed. For that reason, this method loops
230 // during failed allocation attempts. If the java heap becomes exhausted,
231 // we rely on the size_policy object to force a bail out.
232 HeapWord* ParallelScavengeHeap::mem_allocate(
233                                      size_t size,
234                                      bool* gc_overhead_limit_was_exceeded) {
235   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
236   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
237   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
238 
239   // In general gc_overhead_limit_was_exceeded should be false so
240   // set it so here and reset it to true only if the gc time
241   // limit is being exceeded as checked below.
242   *gc_overhead_limit_was_exceeded = false;
243 
244   HeapWord* result = young_gen()->allocate(size);
245 
246   uint loop_count = 0;
247   uint gc_count = 0;
248   uint gclocker_stalled_count = 0;
249 
250   while (result == NULL) {
251     // We don't want to have multiple collections for a single filled generation.
252     // To prevent this, each thread tracks the total_collections() value, and if
253     // the count has changed, does not do a new collection.
254     //
255     // The collection count must be read only while holding the heap lock. VM
256     // operations also hold the heap lock during collections. There is a lock
257     // contention case where thread A blocks waiting on the Heap_lock, while
258     // thread B is holding it doing a collection. When thread A gets the lock,
259     // the collection count has already changed. To prevent duplicate collections,
260     // The policy MUST attempt allocations during the same period it reads the
261     // total_collections() value!
262     {
263       MutexLocker ml(Heap_lock);
264       gc_count = total_collections();
265 
266       result = young_gen()->allocate(size);
267       if (result != NULL) {
268         return result;
269       }
270 
271       // If certain conditions hold, try allocating from the old gen.
272       result = mem_allocate_old_gen(size);
273       if (result != NULL) {
274         return result;
275       }
276 
277       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
278         return NULL;
279       }
280 
281       // Failed to allocate without a gc.
282       if (GCLocker::is_active_and_needs_gc()) {
283         // If this thread is not in a jni critical section, we stall
284         // the requestor until the critical section has cleared and
285         // GC allowed. When the critical section clears, a GC is
286         // initiated by the last thread exiting the critical section; so
287         // we retry the allocation sequence from the beginning of the loop,
288         // rather than causing more, now probably unnecessary, GC attempts.
289         JavaThread* jthr = JavaThread::current();
290         if (!jthr->in_critical()) {
291           MutexUnlocker mul(Heap_lock);
292           GCLocker::stall_until_clear();
293           gclocker_stalled_count += 1;
294           continue;
295         } else {
296           if (CheckJNICalls) {
297             fatal("Possible deadlock due to allocating while"
298                   " in jni critical section");
299           }
300           return NULL;
301         }
302       }
303     }
304 
305     if (result == NULL) {
306       // Generate a VM operation
307       VM_ParallelGCFailedAllocation op(size, gc_count);
308       VMThread::execute(&op);
309 
310       // Did the VM operation execute? If so, return the result directly.
311       // This prevents us from looping until time out on requests that can
312       // not be satisfied.
313       if (op.prologue_succeeded()) {
314         assert(is_in_or_null(op.result()), "result not in heap");
315 
316         // If GC was locked out during VM operation then retry allocation
317         // and/or stall as necessary.
318         if (op.gc_locked()) {
319           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
320           continue;  // retry and/or stall as necessary
321         }
322 
323         // Exit the loop if the gc time limit has been exceeded.
324         // The allocation must have failed above ("result" guarding
325         // this path is NULL) and the most recent collection has exceeded the
326         // gc overhead limit (although enough may have been collected to
327         // satisfy the allocation).  Exit the loop so that an out-of-memory
328         // will be thrown (return a NULL ignoring the contents of
329         // op.result()),
330         // but clear gc_overhead_limit_exceeded so that the next collection
331         // starts with a clean slate (i.e., forgets about previous overhead
332         // excesses).  Fill op.result() with a filler object so that the
333         // heap remains parsable.
334         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
335         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
336 
337         if (limit_exceeded && softrefs_clear) {
338           *gc_overhead_limit_was_exceeded = true;
339           size_policy()->set_gc_overhead_limit_exceeded(false);
340           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
341           if (op.result() != NULL) {
342             CollectedHeap::fill_with_object(op.result(), size);
343           }
344           return NULL;
345         }
346 
347         return op.result();
348       }
349     }
350 
351     // The policy object will prevent us from looping forever. If the
352     // time spent in gc crosses a threshold, we will bail out.
353     loop_count++;
354     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
355         (loop_count % QueuedAllocationWarningCount == 0)) {
356       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
357       log_warning(gc)("\tsize=" SIZE_FORMAT, size);
358     }
359   }
360 
361   return result;
362 }
363 
364 // A "death march" is a series of ultra-slow allocations in which a full gc is
365 // done before each allocation, and after the full gc the allocation still
366 // cannot be satisfied from the young gen.  This routine detects that condition;
367 // it should be called after a full gc has been done and the allocation
368 // attempted from the young gen. The parameter 'addr' should be the result of
369 // that young gen allocation attempt.
370 void
371 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
372   if (addr != NULL) {
373     _death_march_count = 0;  // death march has ended
374   } else if (_death_march_count == 0) {
375     if (should_alloc_in_eden(size)) {
376       _death_march_count = 1;    // death march has started
377     }
378   }
379 }
380 
381 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
382   if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
383     // Size is too big for eden, or gc is locked out.
384     return old_gen()->allocate(size);
385   }
386 
387   // If a "death march" is in progress, allocate from the old gen a limited
388   // number of times before doing a GC.
389   if (_death_march_count > 0) {
390     if (_death_march_count < 64) {
391       ++_death_march_count;
392       return old_gen()->allocate(size);
393     } else {
394       _death_march_count = 0;
395     }
396   }
397   return NULL;
398 }
399 
400 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
401   if (UseParallelOldGC) {
402     // The do_full_collection() parameter clear_all_soft_refs
403     // is interpreted here as maximum_compaction which will
404     // cause SoftRefs to be cleared.
405     bool maximum_compaction = clear_all_soft_refs;
406     PSParallelCompact::invoke(maximum_compaction);
407   } else {
408     PSMarkSweep::invoke(clear_all_soft_refs);
409   }
410 }
411 
412 // Failed allocation policy. Must be called from the VM thread, and
413 // only at a safepoint! Note that this method has policy for allocation
414 // flow, and NOT collection policy. So we do not check for gc collection
415 // time over limit here, that is the responsibility of the heap specific
416 // collection methods. This method decides where to attempt allocations,
417 // and when to attempt collections, but no collection specific policy.
418 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
419   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
420   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
421   assert(!is_gc_active(), "not reentrant");
422   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
423 
424   // We assume that allocation in eden will fail unless we collect.
425 
426   // First level allocation failure, scavenge and allocate in young gen.
427   GCCauseSetter gccs(this, GCCause::_allocation_failure);
428   const bool invoked_full_gc = PSScavenge::invoke();
429   HeapWord* result = young_gen()->allocate(size);
430 
431   // Second level allocation failure.
432   //   Mark sweep and allocate in young generation.
433   if (result == NULL && !invoked_full_gc) {
434     do_full_collection(false);
435     result = young_gen()->allocate(size);
436   }
437 
438   death_march_check(result, size);
439 
440   // Third level allocation failure.
441   //   After mark sweep and young generation allocation failure,
442   //   allocate in old generation.
443   if (result == NULL) {
444     result = old_gen()->allocate(size);
445   }
446 
447   // Fourth level allocation failure. We're running out of memory.
448   //   More complete mark sweep and allocate in young generation.
449   if (result == NULL) {
450     do_full_collection(true);
451     result = young_gen()->allocate(size);
452   }
453 
454   // Fifth level allocation failure.
455   //   After more complete mark sweep, allocate in old generation.
456   if (result == NULL) {
457     result = old_gen()->allocate(size);
458   }
459 
460   return result;
461 }
462 
463 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
464   CollectedHeap::ensure_parsability(retire_tlabs);
465   young_gen()->eden_space()->ensure_parsability();
466 }
467 
468 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
469   return young_gen()->eden_space()->tlab_capacity(thr);
470 }
471 
472 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
473   return young_gen()->eden_space()->tlab_used(thr);
474 }
475 
476 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
477   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
478 }
479 
480 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
481   return young_gen()->allocate(size);
482 }
483 
484 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
485   CollectedHeap::accumulate_statistics_all_tlabs();
486 }
487 
488 void ParallelScavengeHeap::resize_all_tlabs() {
489   CollectedHeap::resize_all_tlabs();
490 }
491 
492 // This method is used by System.gc() and JVMTI.
493 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
494   assert(!Heap_lock->owned_by_self(),
495     "this thread should not own the Heap_lock");
496 
497   uint gc_count      = 0;
498   uint full_gc_count = 0;
499   {
500     MutexLocker ml(Heap_lock);
501     // This value is guarded by the Heap_lock
502     gc_count      = total_collections();
503     full_gc_count = total_full_collections();
504   }
505 
506   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
507   VMThread::execute(&op);
508 }
509 
510 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
511   young_gen()->object_iterate(cl);
512   old_gen()->object_iterate(cl);
513 }
514 
515 
516 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
517   if (young_gen()->is_in_reserved(addr)) {
518     assert(young_gen()->is_in(addr),
519            "addr should be in allocated part of young gen");
520     // called from os::print_location by find or VMError
521     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
522     Unimplemented();
523   } else if (old_gen()->is_in_reserved(addr)) {
524     assert(old_gen()->is_in(addr),
525            "addr should be in allocated part of old gen");
526     return old_gen()->start_array()->object_start((HeapWord*)addr);
527   }
528   return 0;
529 }
530 
531 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
532   return oop(addr)->size();
533 }
534 
535 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
536   return block_start(addr) == addr;
537 }
538 
539 jlong ParallelScavengeHeap::millis_since_last_gc() {
540   return UseParallelOldGC ?
541     PSParallelCompact::millis_since_last_gc() :
542     PSMarkSweep::millis_since_last_gc();
543 }
544 
545 void ParallelScavengeHeap::prepare_for_verify() {
546   ensure_parsability(false);  // no need to retire TLABs for verification
547 }
548 
549 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
550   PSOldGen* old = old_gen();
551   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
552   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
553   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
554 
555   PSYoungGen* young = young_gen();
556   VirtualSpaceSummary young_summary(young->reserved().start(),
557     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
558 
559   MutableSpace* eden = young_gen()->eden_space();
560   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
561 
562   MutableSpace* from = young_gen()->from_space();
563   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
564 
565   MutableSpace* to = young_gen()->to_space();
566   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
567 
568   VirtualSpaceSummary heap_summary = create_heap_space_summary();
569   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
570 }
571 
572 void ParallelScavengeHeap::print_on(outputStream* st) const {
573   young_gen()->print_on(st);
574   old_gen()->print_on(st);
575   MetaspaceAux::print_on(st);
576 }
577 
578 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
579   this->CollectedHeap::print_on_error(st);
580 
581   if (UseParallelOldGC) {
582     st->cr();
583     PSParallelCompact::print_on_error(st);
584   }
585 }
586 
587 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
588   PSScavenge::gc_task_manager()->threads_do(tc);
589 }
590 
591 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
592   PSScavenge::gc_task_manager()->print_threads_on(st);
593 }
594 
595 void ParallelScavengeHeap::print_tracing_info() const {
596   AdaptiveSizePolicyOutput::print();
597   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
598   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
599       UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds());
600 }
601 
602 
603 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
604   // Why do we need the total_collections()-filter below?
605   if (total_collections() > 0) {
606     log_debug(gc, verify)("Tenured");
607     old_gen()->verify();
608 
609     log_debug(gc, verify)("Eden");
610     young_gen()->verify();
611   }
612 }
613 
614 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
615   const PSHeapSummary& heap_summary = create_ps_heap_summary();
616   gc_tracer->report_gc_heap_summary(when, heap_summary);
617 
618   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
619   gc_tracer->report_metaspace_summary(when, metaspace_summary);
620 }
621 
622 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
623   CollectedHeap* heap = Universe::heap();
624   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
625   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
626   return (ParallelScavengeHeap*)heap;
627 }
628 
629 CardTableModRefBS* ParallelScavengeHeap::barrier_set() {
630   return barrier_set_cast<CardTableModRefBS>(CollectedHeap::barrier_set());
631 }
632 
633 PSCardTable* ParallelScavengeHeap::card_table() {
634   return static_cast<PSCardTable*>(barrier_set()->card_table());
635 }
636 
637 // Before delegating the resize to the young generation,
638 // the reserved space for the young and old generations
639 // may be changed to accommodate the desired resize.
640 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
641     size_t survivor_size) {
642   if (UseAdaptiveGCBoundary) {
643     if (size_policy()->bytes_absorbed_from_eden() != 0) {
644       size_policy()->reset_bytes_absorbed_from_eden();
645       return;  // The generation changed size already.
646     }
647     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
648   }
649 
650   // Delegate the resize to the generation.
651   _young_gen->resize(eden_size, survivor_size);
652 }
653 
654 // Before delegating the resize to the old generation,
655 // the reserved space for the young and old generations
656 // may be changed to accommodate the desired resize.
657 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
658   if (UseAdaptiveGCBoundary) {
659     if (size_policy()->bytes_absorbed_from_eden() != 0) {
660       size_policy()->reset_bytes_absorbed_from_eden();
661       return;  // The generation changed size already.
662     }
663     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
664   }
665 
666   // Delegate the resize to the generation.
667   _old_gen->resize(desired_free_space);
668 }
669 
670 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
671   // nothing particular
672 }
673 
674 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
675   // nothing particular
676 }
677 
678 #ifndef PRODUCT
679 void ParallelScavengeHeap::record_gen_tops_before_GC() {
680   if (ZapUnusedHeapArea) {
681     young_gen()->record_spaces_top();
682     old_gen()->record_spaces_top();
683   }
684 }
685 
686 void ParallelScavengeHeap::gen_mangle_unused_area() {
687   if (ZapUnusedHeapArea) {
688     young_gen()->eden_space()->mangle_unused_area();
689     young_gen()->to_space()->mangle_unused_area();
690     young_gen()->from_space()->mangle_unused_area();
691     old_gen()->object_space()->mangle_unused_area();
692   }
693 }
694 #endif
695 
696 bool ParallelScavengeHeap::is_scavengable(oop obj) {
697   return is_in_young(obj);
698 }
699 
700 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
701   CodeCache::register_scavenge_root_nmethod(nm);
702 }
703 
704 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
705   CodeCache::verify_scavenge_root_nmethod(nm);
706 }
707 
708 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
709   GrowableArray<GCMemoryManager*> memory_managers(2);
710   memory_managers.append(_young_manager);
711   memory_managers.append(_old_manager);
712   return memory_managers;
713 }
714 
715 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
716   GrowableArray<MemoryPool*> memory_pools(3);
717   memory_pools.append(_eden_pool);
718   memory_pools.append(_survivor_pool);
719   memory_pools.append(_old_pool);
720   return memory_pools;
721 }