1 /*
  2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "gc/parallel/adjoiningGenerations.hpp"
 28 #include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
 29 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 30 #include "gc/parallel/parallelArguments.hpp"
 31 #include "gc/parallel/gcTaskManager.hpp"
 32 #include "gc/parallel/objectStartArray.inline.hpp"
 33 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 34 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 35 #include "gc/parallel/psMarkSweepProxy.hpp"
 36 #include "gc/parallel/psMemoryPool.hpp"
 37 #include "gc/parallel/psParallelCompact.inline.hpp"
 38 #include "gc/parallel/psPromotionManager.hpp"
 39 #include "gc/parallel/psScavenge.hpp"
 40 #include "gc/parallel/psVMOperations.hpp"
 41 #include "gc/shared/gcHeapSummary.hpp"
 42 #include "gc/shared/gcLocker.hpp"
 43 #include "gc/shared/gcWhen.hpp"
 44 #include "gc/shared/genArguments.hpp"
 45 #include "gc/shared/scavengableNMethods.hpp"
 46 #include "logging/log.hpp"
 47 #include "memory/metaspaceCounters.hpp"
 48 #include "memory/universe.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/handles.inline.hpp"
 51 #include "runtime/java.hpp"
 52 #include "runtime/vmThread.hpp"
 53 #include "services/memoryManager.hpp"
 54 #include "services/memTracker.hpp"
 55 #include "utilities/macros.hpp"
 56 #include "utilities/vmError.hpp"
 57 
 58 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
 59 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
 60 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
 61 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
 62 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
 63 
 64 jint ParallelScavengeHeap::initialize() {
 65   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
 66 
 67   ReservedSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
 68 
 69   os::trace_page_sizes("Heap",
 70                        MinHeapSize,
 71                        reserved_heap_size,
 72                        GenAlignment,
 73                        heap_rs.base(),
 74                        heap_rs.size());
 75 
 76   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 77 
 78   PSCardTable* card_table = new PSCardTable(reserved_region());
 79   card_table->initialize();
 80   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
 81   barrier_set->initialize();
 82   BarrierSet::set_barrier_set(barrier_set);
 83 
 84   // Make up the generations
 85   // Calculate the maximum size that a generation can grow.  This
 86   // includes growth into the other generation.  Note that the
 87   // parameter _max_gen_size is kept as the maximum
 88   // size of the generation as the boundaries currently stand.
 89   // _max_gen_size is still used as that value.
 90   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 91   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 92 
 93   _gens = AdjoiningGenerations::create_adjoining_generations(heap_rs);
 94 
 95   _old_gen = _gens->old_gen();
 96   _young_gen = _gens->young_gen();
 97 
 98   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
 99   const size_t old_capacity = _old_gen->capacity_in_bytes();
100   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
101   _size_policy =
102     new PSAdaptiveSizePolicy(eden_capacity,
103                              initial_promo_size,
104                              young_gen()->to_space()->capacity_in_bytes(),
105                              GenAlignment,
106                              max_gc_pause_sec,
107                              max_gc_minor_pause_sec,
108                              GCTimeRatio
109                              );
110 
111   assert(ParallelArguments::is_heterogeneous_heap() || !UseAdaptiveGCBoundary ||
112     (old_gen()->virtual_space()->high_boundary() ==
113      young_gen()->virtual_space()->low_boundary()),
114     "Boundaries must meet");
115   // initialize the policy counters - 2 collectors, 2 generations
116   _gc_policy_counters =
117     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
118 
119   // Set up the GCTaskManager
120   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
121 
122   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
123     return JNI_ENOMEM;
124   }
125 
126   return JNI_OK;
127 }
128 
129 void ParallelScavengeHeap::initialize_serviceability() {
130 
131   _eden_pool = new EdenMutableSpacePool(_young_gen,
132                                         _young_gen->eden_space(),
133                                         "PS Eden Space",
134                                         false /* support_usage_threshold */);
135 
136   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
137                                                 "PS Survivor Space",
138                                                 false /* support_usage_threshold */);
139 
140   _old_pool = new PSGenerationPool(_old_gen,
141                                    "PS Old Gen",
142                                    true /* support_usage_threshold */);
143 
144   _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
145   _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
146 
147   _old_manager->add_pool(_eden_pool);
148   _old_manager->add_pool(_survivor_pool);
149   _old_manager->add_pool(_old_pool);
150 
151   _young_manager->add_pool(_eden_pool);
152   _young_manager->add_pool(_survivor_pool);
153 
154 }
155 
156 class PSIsScavengable : public BoolObjectClosure {
157   bool do_object_b(oop obj) {
158     return ParallelScavengeHeap::heap()->is_in_young(obj);
159   }
160 };
161 
162 static PSIsScavengable _is_scavengable;
163 
164 void ParallelScavengeHeap::post_initialize() {
165   CollectedHeap::post_initialize();
166   // Need to init the tenuring threshold
167   PSScavenge::initialize();
168   if (UseParallelOldGC) {
169     PSParallelCompact::post_initialize();
170   } else {
171     PSMarkSweepProxy::initialize();
172   }
173   PSPromotionManager::initialize();
174 
175   ScavengableNMethods::initialize(&_is_scavengable);
176 }
177 
178 void ParallelScavengeHeap::update_counters() {
179   young_gen()->update_counters();
180   old_gen()->update_counters();
181   MetaspaceCounters::update_performance_counters();
182   CompressedClassSpaceCounters::update_performance_counters();
183 }
184 
185 size_t ParallelScavengeHeap::capacity() const {
186   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
187   return value;
188 }
189 
190 size_t ParallelScavengeHeap::used() const {
191   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
192   return value;
193 }
194 
195 bool ParallelScavengeHeap::is_maximal_no_gc() const {
196   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
197 }
198 
199 
200 size_t ParallelScavengeHeap::max_capacity() const {
201   size_t estimated = reserved_region().byte_size();
202   if (UseAdaptiveSizePolicy) {
203     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
204   } else {
205     estimated -= young_gen()->to_space()->capacity_in_bytes();
206   }
207   return MAX2(estimated, capacity());
208 }
209 
210 bool ParallelScavengeHeap::is_in(const void* p) const {
211   return young_gen()->is_in(p) || old_gen()->is_in(p);
212 }
213 
214 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
215   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
216 }
217 
218 // There are two levels of allocation policy here.
219 //
220 // When an allocation request fails, the requesting thread must invoke a VM
221 // operation, transfer control to the VM thread, and await the results of a
222 // garbage collection. That is quite expensive, and we should avoid doing it
223 // multiple times if possible.
224 //
225 // To accomplish this, we have a basic allocation policy, and also a
226 // failed allocation policy.
227 //
228 // The basic allocation policy controls how you allocate memory without
229 // attempting garbage collection. It is okay to grab locks and
230 // expand the heap, if that can be done without coming to a safepoint.
231 // It is likely that the basic allocation policy will not be very
232 // aggressive.
233 //
234 // The failed allocation policy is invoked from the VM thread after
235 // the basic allocation policy is unable to satisfy a mem_allocate
236 // request. This policy needs to cover the entire range of collection,
237 // heap expansion, and out-of-memory conditions. It should make every
238 // attempt to allocate the requested memory.
239 
240 // Basic allocation policy. Should never be called at a safepoint, or
241 // from the VM thread.
242 //
243 // This method must handle cases where many mem_allocate requests fail
244 // simultaneously. When that happens, only one VM operation will succeed,
245 // and the rest will not be executed. For that reason, this method loops
246 // during failed allocation attempts. If the java heap becomes exhausted,
247 // we rely on the size_policy object to force a bail out.
248 HeapWord* ParallelScavengeHeap::mem_allocate(
249                                      size_t size,
250                                      bool* gc_overhead_limit_was_exceeded) {
251   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
252   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
253   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
254 
255   // In general gc_overhead_limit_was_exceeded should be false so
256   // set it so here and reset it to true only if the gc time
257   // limit is being exceeded as checked below.
258   *gc_overhead_limit_was_exceeded = false;
259 
260   HeapWord* result = young_gen()->allocate(size);
261 
262   uint loop_count = 0;
263   uint gc_count = 0;
264   uint gclocker_stalled_count = 0;
265 
266   while (result == NULL) {
267     // We don't want to have multiple collections for a single filled generation.
268     // To prevent this, each thread tracks the total_collections() value, and if
269     // the count has changed, does not do a new collection.
270     //
271     // The collection count must be read only while holding the heap lock. VM
272     // operations also hold the heap lock during collections. There is a lock
273     // contention case where thread A blocks waiting on the Heap_lock, while
274     // thread B is holding it doing a collection. When thread A gets the lock,
275     // the collection count has already changed. To prevent duplicate collections,
276     // The policy MUST attempt allocations during the same period it reads the
277     // total_collections() value!
278     {
279       MutexLocker ml(Heap_lock);
280       gc_count = total_collections();
281 
282       result = young_gen()->allocate(size);
283       if (result != NULL) {
284         return result;
285       }
286 
287       // If certain conditions hold, try allocating from the old gen.
288       result = mem_allocate_old_gen(size);
289       if (result != NULL) {
290         return result;
291       }
292 
293       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
294         return NULL;
295       }
296 
297       // Failed to allocate without a gc.
298       if (GCLocker::is_active_and_needs_gc()) {
299         // If this thread is not in a jni critical section, we stall
300         // the requestor until the critical section has cleared and
301         // GC allowed. When the critical section clears, a GC is
302         // initiated by the last thread exiting the critical section; so
303         // we retry the allocation sequence from the beginning of the loop,
304         // rather than causing more, now probably unnecessary, GC attempts.
305         JavaThread* jthr = JavaThread::current();
306         if (!jthr->in_critical()) {
307           MutexUnlocker mul(Heap_lock);
308           GCLocker::stall_until_clear();
309           gclocker_stalled_count += 1;
310           continue;
311         } else {
312           if (CheckJNICalls) {
313             fatal("Possible deadlock due to allocating while"
314                   " in jni critical section");
315           }
316           return NULL;
317         }
318       }
319     }
320 
321     if (result == NULL) {
322       // Generate a VM operation
323       VM_ParallelGCFailedAllocation op(size, gc_count);
324       VMThread::execute(&op);
325 
326       // Did the VM operation execute? If so, return the result directly.
327       // This prevents us from looping until time out on requests that can
328       // not be satisfied.
329       if (op.prologue_succeeded()) {
330         assert(is_in_or_null(op.result()), "result not in heap");
331 
332         // If GC was locked out during VM operation then retry allocation
333         // and/or stall as necessary.
334         if (op.gc_locked()) {
335           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
336           continue;  // retry and/or stall as necessary
337         }
338 
339         // Exit the loop if the gc time limit has been exceeded.
340         // The allocation must have failed above ("result" guarding
341         // this path is NULL) and the most recent collection has exceeded the
342         // gc overhead limit (although enough may have been collected to
343         // satisfy the allocation).  Exit the loop so that an out-of-memory
344         // will be thrown (return a NULL ignoring the contents of
345         // op.result()),
346         // but clear gc_overhead_limit_exceeded so that the next collection
347         // starts with a clean slate (i.e., forgets about previous overhead
348         // excesses).  Fill op.result() with a filler object so that the
349         // heap remains parsable.
350         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
351         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
352 
353         if (limit_exceeded && softrefs_clear) {
354           *gc_overhead_limit_was_exceeded = true;
355           size_policy()->set_gc_overhead_limit_exceeded(false);
356           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
357           if (op.result() != NULL) {
358             CollectedHeap::fill_with_object(op.result(), size);
359           }
360           return NULL;
361         }
362 
363         return op.result();
364       }
365     }
366 
367     // The policy object will prevent us from looping forever. If the
368     // time spent in gc crosses a threshold, we will bail out.
369     loop_count++;
370     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
371         (loop_count % QueuedAllocationWarningCount == 0)) {
372       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
373       log_warning(gc)("\tsize=" SIZE_FORMAT, size);
374     }
375   }
376 
377   return result;
378 }
379 
380 // A "death march" is a series of ultra-slow allocations in which a full gc is
381 // done before each allocation, and after the full gc the allocation still
382 // cannot be satisfied from the young gen.  This routine detects that condition;
383 // it should be called after a full gc has been done and the allocation
384 // attempted from the young gen. The parameter 'addr' should be the result of
385 // that young gen allocation attempt.
386 void
387 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
388   if (addr != NULL) {
389     _death_march_count = 0;  // death march has ended
390   } else if (_death_march_count == 0) {
391     if (should_alloc_in_eden(size)) {
392       _death_march_count = 1;    // death march has started
393     }
394   }
395 }
396 
397 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
398   if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
399     // Size is too big for eden, or gc is locked out.
400     return old_gen()->allocate(size);
401   }
402 
403   // If a "death march" is in progress, allocate from the old gen a limited
404   // number of times before doing a GC.
405   if (_death_march_count > 0) {
406     if (_death_march_count < 64) {
407       ++_death_march_count;
408       return old_gen()->allocate(size);
409     } else {
410       _death_march_count = 0;
411     }
412   }
413   return NULL;
414 }
415 
416 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
417   if (UseParallelOldGC) {
418     // The do_full_collection() parameter clear_all_soft_refs
419     // is interpreted here as maximum_compaction which will
420     // cause SoftRefs to be cleared.
421     bool maximum_compaction = clear_all_soft_refs;
422     PSParallelCompact::invoke(maximum_compaction);
423   } else {
424     PSMarkSweepProxy::invoke(clear_all_soft_refs);
425   }
426 }
427 
428 // Failed allocation policy. Must be called from the VM thread, and
429 // only at a safepoint! Note that this method has policy for allocation
430 // flow, and NOT collection policy. So we do not check for gc collection
431 // time over limit here, that is the responsibility of the heap specific
432 // collection methods. This method decides where to attempt allocations,
433 // and when to attempt collections, but no collection specific policy.
434 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
435   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
436   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
437   assert(!is_gc_active(), "not reentrant");
438   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
439 
440   // We assume that allocation in eden will fail unless we collect.
441 
442   // First level allocation failure, scavenge and allocate in young gen.
443   GCCauseSetter gccs(this, GCCause::_allocation_failure);
444   const bool invoked_full_gc = PSScavenge::invoke();
445   HeapWord* result = young_gen()->allocate(size);
446 
447   // Second level allocation failure.
448   //   Mark sweep and allocate in young generation.
449   if (result == NULL && !invoked_full_gc) {
450     do_full_collection(false);
451     result = young_gen()->allocate(size);
452   }
453 
454   death_march_check(result, size);
455 
456   // Third level allocation failure.
457   //   After mark sweep and young generation allocation failure,
458   //   allocate in old generation.
459   if (result == NULL) {
460     result = old_gen()->allocate(size);
461   }
462 
463   // Fourth level allocation failure. We're running out of memory.
464   //   More complete mark sweep and allocate in young generation.
465   if (result == NULL) {
466     do_full_collection(true);
467     result = young_gen()->allocate(size);
468   }
469 
470   // Fifth level allocation failure.
471   //   After more complete mark sweep, allocate in old generation.
472   if (result == NULL) {
473     result = old_gen()->allocate(size);
474   }
475 
476   return result;
477 }
478 
479 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
480   CollectedHeap::ensure_parsability(retire_tlabs);
481   young_gen()->eden_space()->ensure_parsability();
482 }
483 
484 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
485   return young_gen()->eden_space()->tlab_capacity(thr);
486 }
487 
488 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
489   return young_gen()->eden_space()->tlab_used(thr);
490 }
491 
492 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
493   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
494 }
495 
496 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
497   HeapWord* result = young_gen()->allocate(requested_size);
498   if (result != NULL) {
499     *actual_size = requested_size;
500   }
501 
502   return result;
503 }
504 
505 void ParallelScavengeHeap::resize_all_tlabs() {
506   CollectedHeap::resize_all_tlabs();
507 }
508 
509 // This method is used by System.gc() and JVMTI.
510 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
511   assert(!Heap_lock->owned_by_self(),
512     "this thread should not own the Heap_lock");
513 
514   uint gc_count      = 0;
515   uint full_gc_count = 0;
516   {
517     MutexLocker ml(Heap_lock);
518     // This value is guarded by the Heap_lock
519     gc_count      = total_collections();
520     full_gc_count = total_full_collections();
521   }
522 
523   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
524   VMThread::execute(&op);
525 }
526 
527 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
528   young_gen()->object_iterate(cl);
529   old_gen()->object_iterate(cl);
530 }
531 
532 
533 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
534   if (young_gen()->is_in_reserved(addr)) {
535     assert(young_gen()->is_in(addr),
536            "addr should be in allocated part of young gen");
537     // called from os::print_location by find or VMError
538     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
539     Unimplemented();
540   } else if (old_gen()->is_in_reserved(addr)) {
541     assert(old_gen()->is_in(addr),
542            "addr should be in allocated part of old gen");
543     return old_gen()->start_array()->object_start((HeapWord*)addr);
544   }
545   return 0;
546 }
547 
548 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
549   return block_start(addr) == addr;
550 }
551 
552 jlong ParallelScavengeHeap::millis_since_last_gc() {
553   return UseParallelOldGC ?
554     PSParallelCompact::millis_since_last_gc() :
555     PSMarkSweepProxy::millis_since_last_gc();
556 }
557 
558 void ParallelScavengeHeap::prepare_for_verify() {
559   ensure_parsability(false);  // no need to retire TLABs for verification
560 }
561 
562 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
563   PSOldGen* old = old_gen();
564   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
565   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
566   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
567 
568   PSYoungGen* young = young_gen();
569   VirtualSpaceSummary young_summary(young->reserved().start(),
570     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
571 
572   MutableSpace* eden = young_gen()->eden_space();
573   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
574 
575   MutableSpace* from = young_gen()->from_space();
576   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
577 
578   MutableSpace* to = young_gen()->to_space();
579   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
580 
581   VirtualSpaceSummary heap_summary = create_heap_space_summary();
582   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
583 }
584 
585 void ParallelScavengeHeap::print_on(outputStream* st) const {
586   young_gen()->print_on(st);
587   old_gen()->print_on(st);
588   MetaspaceUtils::print_on(st);
589 }
590 
591 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
592   this->CollectedHeap::print_on_error(st);
593 
594   if (UseParallelOldGC) {
595     st->cr();
596     PSParallelCompact::print_on_error(st);
597   }
598 }
599 
600 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
601   PSScavenge::gc_task_manager()->threads_do(tc);
602 }
603 
604 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
605   PSScavenge::gc_task_manager()->print_threads_on(st);
606 }
607 
608 void ParallelScavengeHeap::print_tracing_info() const {
609   AdaptiveSizePolicyOutput::print();
610   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
611   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
612       UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
613 }
614 
615 
616 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
617   // Why do we need the total_collections()-filter below?
618   if (total_collections() > 0) {
619     log_debug(gc, verify)("Tenured");
620     old_gen()->verify();
621 
622     log_debug(gc, verify)("Eden");
623     young_gen()->verify();
624   }
625 }
626 
627 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
628   const PSHeapSummary& heap_summary = create_ps_heap_summary();
629   gc_tracer->report_gc_heap_summary(when, heap_summary);
630 
631   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
632   gc_tracer->report_metaspace_summary(when, metaspace_summary);
633 }
634 
635 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
636   CollectedHeap* heap = Universe::heap();
637   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
638   assert(heap->kind() == CollectedHeap::Parallel, "Invalid name");
639   return (ParallelScavengeHeap*)heap;
640 }
641 
642 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
643   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
644 }
645 
646 PSCardTable* ParallelScavengeHeap::card_table() {
647   return static_cast<PSCardTable*>(barrier_set()->card_table());
648 }
649 
650 // Before delegating the resize to the young generation,
651 // the reserved space for the young and old generations
652 // may be changed to accommodate the desired resize.
653 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
654     size_t survivor_size) {
655   if (UseAdaptiveGCBoundary) {
656     if (size_policy()->bytes_absorbed_from_eden() != 0) {
657       size_policy()->reset_bytes_absorbed_from_eden();
658       return;  // The generation changed size already.
659     }
660     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
661   }
662 
663   // Delegate the resize to the generation.
664   _young_gen->resize(eden_size, survivor_size);
665 }
666 
667 // Before delegating the resize to the old generation,
668 // the reserved space for the young and old generations
669 // may be changed to accommodate the desired resize.
670 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
671   if (UseAdaptiveGCBoundary) {
672     if (size_policy()->bytes_absorbed_from_eden() != 0) {
673       size_policy()->reset_bytes_absorbed_from_eden();
674       return;  // The generation changed size already.
675     }
676     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
677   }
678 
679   // Delegate the resize to the generation.
680   _old_gen->resize(desired_free_space);
681 }
682 
683 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
684   // nothing particular
685 }
686 
687 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
688   // nothing particular
689 }
690 
691 #ifndef PRODUCT
692 void ParallelScavengeHeap::record_gen_tops_before_GC() {
693   if (ZapUnusedHeapArea) {
694     young_gen()->record_spaces_top();
695     old_gen()->record_spaces_top();
696   }
697 }
698 
699 void ParallelScavengeHeap::gen_mangle_unused_area() {
700   if (ZapUnusedHeapArea) {
701     young_gen()->eden_space()->mangle_unused_area();
702     young_gen()->to_space()->mangle_unused_area();
703     young_gen()->from_space()->mangle_unused_area();
704     old_gen()->object_space()->mangle_unused_area();
705   }
706 }
707 #endif
708 
709 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
710   ScavengableNMethods::register_nmethod(nm);
711 }
712 
713 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
714   ScavengableNMethods::unregister_nmethod(nm);
715 }
716 
717 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
718   ScavengableNMethods::verify_nmethod(nm);
719 }
720 
721 void ParallelScavengeHeap::flush_nmethod(nmethod* nm) {
722   // nothing particular
723 }
724 
725 void ParallelScavengeHeap::prune_scavengable_nmethods() {
726   ScavengableNMethods::prune_nmethods();
727 }
728 
729 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
730   GrowableArray<GCMemoryManager*> memory_managers(2);
731   memory_managers.append(_young_manager);
732   memory_managers.append(_old_manager);
733   return memory_managers;
734 }
735 
736 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
737   GrowableArray<MemoryPool*> memory_pools(3);
738   memory_pools.append(_eden_pool);
739   memory_pools.append(_survivor_pool);
740   memory_pools.append(_old_pool);
741   return memory_pools;
742 }