1 /*
  2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "gc/parallel/adjoiningGenerations.hpp"
 28 #include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
 29 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 30 #include "gc/parallel/parallelArguments.hpp"
 31 #include "gc/parallel/objectStartArray.inline.hpp"
 32 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 34 #include "gc/parallel/psMarkSweepProxy.hpp"
 35 #include "gc/parallel/psMemoryPool.hpp"
 36 #include "gc/parallel/psParallelCompact.inline.hpp"
 37 #include "gc/parallel/psPromotionManager.hpp"
 38 #include "gc/parallel/psScavenge.hpp"
 39 #include "gc/parallel/psVMOperations.hpp"
 40 #include "gc/shared/gcHeapSummary.hpp"
 41 #include "gc/shared/gcLocker.hpp"
 42 #include "gc/shared/gcWhen.hpp"
 43 #include "gc/shared/genArguments.hpp"
 44 #include "gc/shared/scavengableNMethods.hpp"
 45 #include "logging/log.hpp"
 46 #include "memory/metaspaceCounters.hpp"
 47 #include "memory/universe.hpp"
 48 #include "oops/oop.inline.hpp"
 49 #include "runtime/handles.inline.hpp"
 50 #include "runtime/java.hpp"
 51 #include "runtime/vmThread.hpp"
 52 #include "services/memoryManager.hpp"
 53 #include "services/memTracker.hpp"
 54 #include "utilities/macros.hpp"
 55 #include "utilities/vmError.hpp"
 56 
 57 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
 58 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
 59 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
 60 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
 61 
 62 jint ParallelScavengeHeap::initialize() {
 63   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
 64 
 65   ReservedSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
 66 
 67   os::trace_page_sizes("Heap",
 68                        MinHeapSize,
 69                        reserved_heap_size,
 70                        GenAlignment,
 71                        heap_rs.base(),
 72                        heap_rs.size());
 73 
 74   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 75 
 76   PSCardTable* card_table = new PSCardTable(reserved_region());
 77   card_table->initialize();
 78   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
 79   barrier_set->initialize();
 80   BarrierSet::set_barrier_set(barrier_set);
 81 
 82   // Make up the generations
 83   // Calculate the maximum size that a generation can grow.  This
 84   // includes growth into the other generation.  Note that the
 85   // parameter _max_gen_size is kept as the maximum
 86   // size of the generation as the boundaries currently stand.
 87   // _max_gen_size is still used as that value.
 88   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 89   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 90 
 91   _gens = AdjoiningGenerations::create_adjoining_generations(heap_rs);
 92 
 93   _old_gen = _gens->old_gen();
 94   _young_gen = _gens->young_gen();
 95 
 96   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
 97   const size_t old_capacity = _old_gen->capacity_in_bytes();
 98   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
 99   _size_policy =
100     new PSAdaptiveSizePolicy(eden_capacity,
101                              initial_promo_size,
102                              young_gen()->to_space()->capacity_in_bytes(),
103                              GenAlignment,
104                              max_gc_pause_sec,
105                              max_gc_minor_pause_sec,
106                              GCTimeRatio
107                              );
108 
109   assert(ParallelArguments::is_heterogeneous_heap() || !UseAdaptiveGCBoundary ||
110     (old_gen()->virtual_space()->high_boundary() ==
111      young_gen()->virtual_space()->low_boundary()),
112     "Boundaries must meet");
113   // initialize the policy counters - 2 collectors, 2 generations
114   _gc_policy_counters =
115     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
116 
117   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
118     return JNI_ENOMEM;
119   }
120 
121   // Set up WorkGang
122   _workers.initialize_workers();
123 
124   return JNI_OK;
125 }
126 
127 void ParallelScavengeHeap::initialize_serviceability() {
128 
129   _eden_pool = new EdenMutableSpacePool(_young_gen,
130                                         _young_gen->eden_space(),
131                                         "PS Eden Space",
132                                         false /* support_usage_threshold */);
133 
134   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
135                                                 "PS Survivor Space",
136                                                 false /* support_usage_threshold */);
137 
138   _old_pool = new PSGenerationPool(_old_gen,
139                                    "PS Old Gen",
140                                    true /* support_usage_threshold */);
141 
142   _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
143   _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
144 
145   _old_manager->add_pool(_eden_pool);
146   _old_manager->add_pool(_survivor_pool);
147   _old_manager->add_pool(_old_pool);
148 
149   _young_manager->add_pool(_eden_pool);
150   _young_manager->add_pool(_survivor_pool);
151 
152 }
153 
154 class PSIsScavengable : public BoolObjectClosure {
155   bool do_object_b(oop obj) {
156     return ParallelScavengeHeap::heap()->is_in_young(obj);
157   }
158 };
159 
160 static PSIsScavengable _is_scavengable;
161 
162 void ParallelScavengeHeap::post_initialize() {
163   CollectedHeap::post_initialize();
164   // Need to init the tenuring threshold
165   PSScavenge::initialize();
166   if (UseParallelOldGC) {
167     PSParallelCompact::post_initialize();
168   } else {
169     PSMarkSweepProxy::initialize();
170   }
171   PSPromotionManager::initialize();
172 
173   ScavengableNMethods::initialize(&_is_scavengable);
174 }
175 
176 void ParallelScavengeHeap::update_counters() {
177   young_gen()->update_counters();
178   old_gen()->update_counters();
179   MetaspaceCounters::update_performance_counters();
180   CompressedClassSpaceCounters::update_performance_counters();
181 }
182 
183 size_t ParallelScavengeHeap::capacity() const {
184   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
185   return value;
186 }
187 
188 size_t ParallelScavengeHeap::used() const {
189   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
190   return value;
191 }
192 
193 bool ParallelScavengeHeap::is_maximal_no_gc() const {
194   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
195 }
196 
197 
198 size_t ParallelScavengeHeap::max_capacity() const {
199   size_t estimated = reserved_region().byte_size();
200   if (UseAdaptiveSizePolicy) {
201     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
202   } else {
203     estimated -= young_gen()->to_space()->capacity_in_bytes();
204   }
205   return MAX2(estimated, capacity());
206 }
207 
208 bool ParallelScavengeHeap::is_in(const void* p) const {
209   return young_gen()->is_in(p) || old_gen()->is_in(p);
210 }
211 
212 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
213   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
214 }
215 
216 // There are two levels of allocation policy here.
217 //
218 // When an allocation request fails, the requesting thread must invoke a VM
219 // operation, transfer control to the VM thread, and await the results of a
220 // garbage collection. That is quite expensive, and we should avoid doing it
221 // multiple times if possible.
222 //
223 // To accomplish this, we have a basic allocation policy, and also a
224 // failed allocation policy.
225 //
226 // The basic allocation policy controls how you allocate memory without
227 // attempting garbage collection. It is okay to grab locks and
228 // expand the heap, if that can be done without coming to a safepoint.
229 // It is likely that the basic allocation policy will not be very
230 // aggressive.
231 //
232 // The failed allocation policy is invoked from the VM thread after
233 // the basic allocation policy is unable to satisfy a mem_allocate
234 // request. This policy needs to cover the entire range of collection,
235 // heap expansion, and out-of-memory conditions. It should make every
236 // attempt to allocate the requested memory.
237 
238 // Basic allocation policy. Should never be called at a safepoint, or
239 // from the VM thread.
240 //
241 // This method must handle cases where many mem_allocate requests fail
242 // simultaneously. When that happens, only one VM operation will succeed,
243 // and the rest will not be executed. For that reason, this method loops
244 // during failed allocation attempts. If the java heap becomes exhausted,
245 // we rely on the size_policy object to force a bail out.
246 HeapWord* ParallelScavengeHeap::mem_allocate(
247                                      size_t size,
248                                      bool* gc_overhead_limit_was_exceeded) {
249   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
250   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
251   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
252 
253   // In general gc_overhead_limit_was_exceeded should be false so
254   // set it so here and reset it to true only if the gc time
255   // limit is being exceeded as checked below.
256   *gc_overhead_limit_was_exceeded = false;
257 
258   HeapWord* result = young_gen()->allocate(size);
259 
260   uint loop_count = 0;
261   uint gc_count = 0;
262   uint gclocker_stalled_count = 0;
263 
264   while (result == NULL) {
265     // We don't want to have multiple collections for a single filled generation.
266     // To prevent this, each thread tracks the total_collections() value, and if
267     // the count has changed, does not do a new collection.
268     //
269     // The collection count must be read only while holding the heap lock. VM
270     // operations also hold the heap lock during collections. There is a lock
271     // contention case where thread A blocks waiting on the Heap_lock, while
272     // thread B is holding it doing a collection. When thread A gets the lock,
273     // the collection count has already changed. To prevent duplicate collections,
274     // The policy MUST attempt allocations during the same period it reads the
275     // total_collections() value!
276     {
277       MutexLocker ml(Heap_lock);
278       gc_count = total_collections();
279 
280       result = young_gen()->allocate(size);
281       if (result != NULL) {
282         return result;
283       }
284 
285       // If certain conditions hold, try allocating from the old gen.
286       result = mem_allocate_old_gen(size);
287       if (result != NULL) {
288         return result;
289       }
290 
291       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
292         return NULL;
293       }
294 
295       // Failed to allocate without a gc.
296       if (GCLocker::is_active_and_needs_gc()) {
297         // If this thread is not in a jni critical section, we stall
298         // the requestor until the critical section has cleared and
299         // GC allowed. When the critical section clears, a GC is
300         // initiated by the last thread exiting the critical section; so
301         // we retry the allocation sequence from the beginning of the loop,
302         // rather than causing more, now probably unnecessary, GC attempts.
303         JavaThread* jthr = JavaThread::current();
304         if (!jthr->in_critical()) {
305           MutexUnlocker mul(Heap_lock);
306           GCLocker::stall_until_clear();
307           gclocker_stalled_count += 1;
308           continue;
309         } else {
310           if (CheckJNICalls) {
311             fatal("Possible deadlock due to allocating while"
312                   " in jni critical section");
313           }
314           return NULL;
315         }
316       }
317     }
318 
319     if (result == NULL) {
320       // Generate a VM operation
321       VM_ParallelGCFailedAllocation op(size, gc_count);
322       VMThread::execute(&op);
323 
324       // Did the VM operation execute? If so, return the result directly.
325       // This prevents us from looping until time out on requests that can
326       // not be satisfied.
327       if (op.prologue_succeeded()) {
328         assert(is_in_or_null(op.result()), "result not in heap");
329 
330         // If GC was locked out during VM operation then retry allocation
331         // and/or stall as necessary.
332         if (op.gc_locked()) {
333           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
334           continue;  // retry and/or stall as necessary
335         }
336 
337         // Exit the loop if the gc time limit has been exceeded.
338         // The allocation must have failed above ("result" guarding
339         // this path is NULL) and the most recent collection has exceeded the
340         // gc overhead limit (although enough may have been collected to
341         // satisfy the allocation).  Exit the loop so that an out-of-memory
342         // will be thrown (return a NULL ignoring the contents of
343         // op.result()),
344         // but clear gc_overhead_limit_exceeded so that the next collection
345         // starts with a clean slate (i.e., forgets about previous overhead
346         // excesses).  Fill op.result() with a filler object so that the
347         // heap remains parsable.
348         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
349         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
350 
351         if (limit_exceeded && softrefs_clear) {
352           *gc_overhead_limit_was_exceeded = true;
353           size_policy()->set_gc_overhead_limit_exceeded(false);
354           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
355           if (op.result() != NULL) {
356             CollectedHeap::fill_with_object(op.result(), size);
357           }
358           return NULL;
359         }
360 
361         return op.result();
362       }
363     }
364 
365     // The policy object will prevent us from looping forever. If the
366     // time spent in gc crosses a threshold, we will bail out.
367     loop_count++;
368     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
369         (loop_count % QueuedAllocationWarningCount == 0)) {
370       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
371       log_warning(gc)("\tsize=" SIZE_FORMAT, size);
372     }
373   }
374 
375   return result;
376 }
377 
378 // A "death march" is a series of ultra-slow allocations in which a full gc is
379 // done before each allocation, and after the full gc the allocation still
380 // cannot be satisfied from the young gen.  This routine detects that condition;
381 // it should be called after a full gc has been done and the allocation
382 // attempted from the young gen. The parameter 'addr' should be the result of
383 // that young gen allocation attempt.
384 void
385 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
386   if (addr != NULL) {
387     _death_march_count = 0;  // death march has ended
388   } else if (_death_march_count == 0) {
389     if (should_alloc_in_eden(size)) {
390       _death_march_count = 1;    // death march has started
391     }
392   }
393 }
394 
395 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
396   if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
397     // Size is too big for eden, or gc is locked out.
398     return old_gen()->allocate(size);
399   }
400 
401   // If a "death march" is in progress, allocate from the old gen a limited
402   // number of times before doing a GC.
403   if (_death_march_count > 0) {
404     if (_death_march_count < 64) {
405       ++_death_march_count;
406       return old_gen()->allocate(size);
407     } else {
408       _death_march_count = 0;
409     }
410   }
411   return NULL;
412 }
413 
414 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
415   if (UseParallelOldGC) {
416     // The do_full_collection() parameter clear_all_soft_refs
417     // is interpreted here as maximum_compaction which will
418     // cause SoftRefs to be cleared.
419     bool maximum_compaction = clear_all_soft_refs;
420     PSParallelCompact::invoke(maximum_compaction);
421   } else {
422     PSMarkSweepProxy::invoke(clear_all_soft_refs);
423   }
424 }
425 
426 // Failed allocation policy. Must be called from the VM thread, and
427 // only at a safepoint! Note that this method has policy for allocation
428 // flow, and NOT collection policy. So we do not check for gc collection
429 // time over limit here, that is the responsibility of the heap specific
430 // collection methods. This method decides where to attempt allocations,
431 // and when to attempt collections, but no collection specific policy.
432 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
433   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
434   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
435   assert(!is_gc_active(), "not reentrant");
436   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
437 
438   // We assume that allocation in eden will fail unless we collect.
439 
440   // First level allocation failure, scavenge and allocate in young gen.
441   GCCauseSetter gccs(this, GCCause::_allocation_failure);
442   const bool invoked_full_gc = PSScavenge::invoke();
443   HeapWord* result = young_gen()->allocate(size);
444 
445   // Second level allocation failure.
446   //   Mark sweep and allocate in young generation.
447   if (result == NULL && !invoked_full_gc) {
448     do_full_collection(false);
449     result = young_gen()->allocate(size);
450   }
451 
452   death_march_check(result, size);
453 
454   // Third level allocation failure.
455   //   After mark sweep and young generation allocation failure,
456   //   allocate in old generation.
457   if (result == NULL) {
458     result = old_gen()->allocate(size);
459   }
460 
461   // Fourth level allocation failure. We're running out of memory.
462   //   More complete mark sweep and allocate in young generation.
463   if (result == NULL) {
464     do_full_collection(true);
465     result = young_gen()->allocate(size);
466   }
467 
468   // Fifth level allocation failure.
469   //   After more complete mark sweep, allocate in old generation.
470   if (result == NULL) {
471     result = old_gen()->allocate(size);
472   }
473 
474   return result;
475 }
476 
477 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
478   CollectedHeap::ensure_parsability(retire_tlabs);
479   young_gen()->eden_space()->ensure_parsability();
480 }
481 
482 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
483   return young_gen()->eden_space()->tlab_capacity(thr);
484 }
485 
486 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
487   return young_gen()->eden_space()->tlab_used(thr);
488 }
489 
490 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
491   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
492 }
493 
494 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
495   HeapWord* result = young_gen()->allocate(requested_size);
496   if (result != NULL) {
497     *actual_size = requested_size;
498   }
499 
500   return result;
501 }
502 
503 void ParallelScavengeHeap::resize_all_tlabs() {
504   CollectedHeap::resize_all_tlabs();
505 }
506 
507 // This method is used by System.gc() and JVMTI.
508 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
509   assert(!Heap_lock->owned_by_self(),
510     "this thread should not own the Heap_lock");
511 
512   uint gc_count      = 0;
513   uint full_gc_count = 0;
514   {
515     MutexLocker ml(Heap_lock);
516     // This value is guarded by the Heap_lock
517     gc_count      = total_collections();
518     full_gc_count = total_full_collections();
519   }
520 
521   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
522   VMThread::execute(&op);
523 }
524 
525 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
526   young_gen()->object_iterate(cl);
527   old_gen()->object_iterate(cl);
528 }
529 
530 
531 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
532   if (young_gen()->is_in_reserved(addr)) {
533     assert(young_gen()->is_in(addr),
534            "addr should be in allocated part of young gen");
535     // called from os::print_location by find or VMError
536     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
537     Unimplemented();
538   } else if (old_gen()->is_in_reserved(addr)) {
539     assert(old_gen()->is_in(addr),
540            "addr should be in allocated part of old gen");
541     return old_gen()->start_array()->object_start((HeapWord*)addr);
542   }
543   return 0;
544 }
545 
546 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
547   return block_start(addr) == addr;
548 }
549 
550 jlong ParallelScavengeHeap::millis_since_last_gc() {
551   return UseParallelOldGC ?
552     PSParallelCompact::millis_since_last_gc() :
553     PSMarkSweepProxy::millis_since_last_gc();
554 }
555 
556 void ParallelScavengeHeap::prepare_for_verify() {
557   ensure_parsability(false);  // no need to retire TLABs for verification
558 }
559 
560 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
561   PSOldGen* old = old_gen();
562   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
563   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
564   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
565 
566   PSYoungGen* young = young_gen();
567   VirtualSpaceSummary young_summary(young->reserved().start(),
568     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
569 
570   MutableSpace* eden = young_gen()->eden_space();
571   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
572 
573   MutableSpace* from = young_gen()->from_space();
574   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
575 
576   MutableSpace* to = young_gen()->to_space();
577   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
578 
579   VirtualSpaceSummary heap_summary = create_heap_space_summary();
580   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
581 }
582 
583 void ParallelScavengeHeap::print_on(outputStream* st) const {
584   young_gen()->print_on(st);
585   old_gen()->print_on(st);
586   MetaspaceUtils::print_on(st);
587 }
588 
589 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
590   this->CollectedHeap::print_on_error(st);
591 
592   if (UseParallelOldGC) {
593     st->cr();
594     PSParallelCompact::print_on_error(st);
595   }
596 }
597 
598 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
599   ParallelScavengeHeap::heap()->workers().threads_do(tc);
600 }
601 
602 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
603   ParallelScavengeHeap::heap()->workers().print_worker_threads_on(st);
604 }
605 
606 void ParallelScavengeHeap::print_tracing_info() const {
607   AdaptiveSizePolicyOutput::print();
608   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
609   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
610       UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
611 }
612 
613 
614 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
615   // Why do we need the total_collections()-filter below?
616   if (total_collections() > 0) {
617     log_debug(gc, verify)("Tenured");
618     old_gen()->verify();
619 
620     log_debug(gc, verify)("Eden");
621     young_gen()->verify();
622   }
623 }
624 
625 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
626   const PSHeapSummary& heap_summary = create_ps_heap_summary();
627   gc_tracer->report_gc_heap_summary(when, heap_summary);
628 
629   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
630   gc_tracer->report_metaspace_summary(when, metaspace_summary);
631 }
632 
633 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
634   CollectedHeap* heap = Universe::heap();
635   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
636   assert(heap->kind() == CollectedHeap::Parallel, "Invalid name");
637   return (ParallelScavengeHeap*)heap;
638 }
639 
640 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
641   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
642 }
643 
644 PSCardTable* ParallelScavengeHeap::card_table() {
645   return static_cast<PSCardTable*>(barrier_set()->card_table());
646 }
647 
648 // Before delegating the resize to the young generation,
649 // the reserved space for the young and old generations
650 // may be changed to accommodate the desired resize.
651 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
652     size_t survivor_size) {
653   if (UseAdaptiveGCBoundary) {
654     if (size_policy()->bytes_absorbed_from_eden() != 0) {
655       size_policy()->reset_bytes_absorbed_from_eden();
656       return;  // The generation changed size already.
657     }
658     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
659   }
660 
661   // Delegate the resize to the generation.
662   _young_gen->resize(eden_size, survivor_size);
663 }
664 
665 // Before delegating the resize to the old generation,
666 // the reserved space for the young and old generations
667 // may be changed to accommodate the desired resize.
668 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
669   if (UseAdaptiveGCBoundary) {
670     if (size_policy()->bytes_absorbed_from_eden() != 0) {
671       size_policy()->reset_bytes_absorbed_from_eden();
672       return;  // The generation changed size already.
673     }
674     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
675   }
676 
677   // Delegate the resize to the generation.
678   _old_gen->resize(desired_free_space);
679 }
680 
681 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
682   // nothing particular
683 }
684 
685 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
686   // nothing particular
687 }
688 
689 #ifndef PRODUCT
690 void ParallelScavengeHeap::record_gen_tops_before_GC() {
691   if (ZapUnusedHeapArea) {
692     young_gen()->record_spaces_top();
693     old_gen()->record_spaces_top();
694   }
695 }
696 
697 void ParallelScavengeHeap::gen_mangle_unused_area() {
698   if (ZapUnusedHeapArea) {
699     young_gen()->eden_space()->mangle_unused_area();
700     young_gen()->to_space()->mangle_unused_area();
701     young_gen()->from_space()->mangle_unused_area();
702     old_gen()->object_space()->mangle_unused_area();
703   }
704 }
705 #endif
706 
707 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
708   ScavengableNMethods::register_nmethod(nm);
709 }
710 
711 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
712   ScavengableNMethods::unregister_nmethod(nm);
713 }
714 
715 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
716   ScavengableNMethods::verify_nmethod(nm);
717 }
718 
719 void ParallelScavengeHeap::flush_nmethod(nmethod* nm) {
720   // nothing particular
721 }
722 
723 void ParallelScavengeHeap::prune_scavengable_nmethods() {
724   ScavengableNMethods::prune_nmethods();
725 }
726 
727 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
728   GrowableArray<GCMemoryManager*> memory_managers(2);
729   memory_managers.append(_young_manager);
730   memory_managers.append(_old_manager);
731   return memory_managers;
732 }
733 
734 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
735   GrowableArray<MemoryPool*> memory_pools(3);
736   memory_pools.append(_eden_pool);
737   memory_pools.append(_survivor_pool);
738   memory_pools.append(_old_pool);
739   return memory_pools;
740 }