1 /*
  2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "gc/parallel/adjoiningGenerations.hpp"
 28 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 29 #include "gc/parallel/cardTableExtension.hpp"
 30 #include "gc/parallel/gcTaskManager.hpp"
 31 #include "gc/parallel/generationSizer.hpp"
 32 #include "gc/parallel/objectStartArray.inline.hpp"
 33 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 34 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 35 #include "gc/parallel/psMarkSweep.hpp"
 36 #include "gc/parallel/psMemoryPool.hpp"
 37 #include "gc/parallel/psParallelCompact.inline.hpp"
 38 #include "gc/parallel/psPromotionManager.hpp"
 39 #include "gc/parallel/psScavenge.hpp"
 40 #include "gc/parallel/vmPSOperations.hpp"
 41 #include "gc/shared/gcHeapSummary.hpp"
 42 #include "gc/shared/gcLocker.inline.hpp"
 43 #include "gc/shared/gcWhen.hpp"
 44 #include "logging/log.hpp"
 45 #include "oops/oop.inline.hpp"
 46 #include "runtime/handles.inline.hpp"
 47 #include "runtime/java.hpp"
 48 #include "runtime/vmThread.hpp"
 49 #include "services/memoryManager.hpp"
 50 #include "services/memTracker.hpp"
 51 #include "utilities/vmError.hpp"
 52 
 53 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
 54 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
 55 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
 56 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
 57 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
 58 
 59 jint ParallelScavengeHeap::initialize() {
 60   CollectedHeap::pre_initialize();
 61 
 62   const size_t heap_size = _collector_policy->max_heap_byte_size();
 63 
 64   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
 65 
 66   os::trace_page_sizes("Heap",
 67                        _collector_policy->min_heap_byte_size(),
 68                        heap_size,
 69                        generation_alignment(),
 70                        heap_rs.base(),
 71                        heap_rs.size());
 72 
 73   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 74 
 75   CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
 76   barrier_set->initialize();
 77   set_barrier_set(barrier_set);
 78 
 79   // Make up the generations
 80   // Calculate the maximum size that a generation can grow.  This
 81   // includes growth into the other generation.  Note that the
 82   // parameter _max_gen_size is kept as the maximum
 83   // size of the generation as the boundaries currently stand.
 84   // _max_gen_size is still used as that value.
 85   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 86   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
 87 
 88   _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
 89 
 90   _old_gen = _gens->old_gen();
 91   _young_gen = _gens->young_gen();
 92 
 93   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
 94   const size_t old_capacity = _old_gen->capacity_in_bytes();
 95   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
 96   _size_policy =
 97     new PSAdaptiveSizePolicy(eden_capacity,
 98                              initial_promo_size,
 99                              young_gen()->to_space()->capacity_in_bytes(),
100                              _collector_policy->gen_alignment(),
101                              max_gc_pause_sec,
102                              max_gc_minor_pause_sec,
103                              GCTimeRatio
104                              );
105 
106   assert(!UseAdaptiveGCBoundary ||
107     (old_gen()->virtual_space()->high_boundary() ==
108      young_gen()->virtual_space()->low_boundary()),
109     "Boundaries must meet");
110   // initialize the policy counters - 2 collectors, 2 generations
111   _gc_policy_counters =
112     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
113 
114   // Set up the GCTaskManager
115   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
116 
117   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
118     return JNI_ENOMEM;
119   }
120 
121   return JNI_OK;
122 }
123 
124 void ParallelScavengeHeap::initialize_serviceability() {
125 
126   _eden_pool = new EdenMutableSpacePool(_young_gen,
127                                         _young_gen->eden_space(),
128                                         "PS Eden Space",
129                                         false /* support_usage_threshold */);
130 
131   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
132                                                 "PS Survivor Space",
133                                                 false /* support_usage_threshold */);
134 
135   _old_pool = new PSGenerationPool(_old_gen,
136                                    "PS Old Gen",
137                                    true /* support_usage_threshold */);
138 
139   _young_manager = new GCMemoryManager("PS Scavenge", "end of minor GC");
140   _old_manager = new GCMemoryManager("PS MarkSweep", "end of major GC");
141 
142   _old_manager->add_pool(_eden_pool);
143   _old_manager->add_pool(_survivor_pool);
144   _old_manager->add_pool(_old_pool);
145 
146   _young_manager->add_pool(_eden_pool);
147   _young_manager->add_pool(_survivor_pool);
148 
149 }
150 
151 void ParallelScavengeHeap::post_initialize() {
152   CollectedHeap::post_initialize();
153   // Need to init the tenuring threshold
154   PSScavenge::initialize();
155   if (UseParallelOldGC) {
156     PSParallelCompact::post_initialize();
157   } else {
158     PSMarkSweep::initialize();
159   }
160   PSPromotionManager::initialize();
161 }
162 
163 void ParallelScavengeHeap::update_counters() {
164   young_gen()->update_counters();
165   old_gen()->update_counters();
166   MetaspaceCounters::update_performance_counters();
167   CompressedClassSpaceCounters::update_performance_counters();
168 }
169 
170 size_t ParallelScavengeHeap::capacity() const {
171   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
172   return value;
173 }
174 
175 size_t ParallelScavengeHeap::used() const {
176   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
177   return value;
178 }
179 
180 bool ParallelScavengeHeap::is_maximal_no_gc() const {
181   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
182 }
183 
184 
185 size_t ParallelScavengeHeap::max_capacity() const {
186   size_t estimated = reserved_region().byte_size();
187   if (UseAdaptiveSizePolicy) {
188     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
189   } else {
190     estimated -= young_gen()->to_space()->capacity_in_bytes();
191   }
192   return MAX2(estimated, capacity());
193 }
194 
195 bool ParallelScavengeHeap::is_in(const void* p) const {
196   return young_gen()->is_in(p) || old_gen()->is_in(p);
197 }
198 
199 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
200   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
201 }
202 
203 // There are two levels of allocation policy here.
204 //
205 // When an allocation request fails, the requesting thread must invoke a VM
206 // operation, transfer control to the VM thread, and await the results of a
207 // garbage collection. That is quite expensive, and we should avoid doing it
208 // multiple times if possible.
209 //
210 // To accomplish this, we have a basic allocation policy, and also a
211 // failed allocation policy.
212 //
213 // The basic allocation policy controls how you allocate memory without
214 // attempting garbage collection. It is okay to grab locks and
215 // expand the heap, if that can be done without coming to a safepoint.
216 // It is likely that the basic allocation policy will not be very
217 // aggressive.
218 //
219 // The failed allocation policy is invoked from the VM thread after
220 // the basic allocation policy is unable to satisfy a mem_allocate
221 // request. This policy needs to cover the entire range of collection,
222 // heap expansion, and out-of-memory conditions. It should make every
223 // attempt to allocate the requested memory.
224 
225 // Basic allocation policy. Should never be called at a safepoint, or
226 // from the VM thread.
227 //
228 // This method must handle cases where many mem_allocate requests fail
229 // simultaneously. When that happens, only one VM operation will succeed,
230 // and the rest will not be executed. For that reason, this method loops
231 // during failed allocation attempts. If the java heap becomes exhausted,
232 // we rely on the size_policy object to force a bail out.
233 HeapWord* ParallelScavengeHeap::mem_allocate(
234                                      size_t size,
235                                      bool* gc_overhead_limit_was_exceeded) {
236   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
237   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
238   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
239 
240   // In general gc_overhead_limit_was_exceeded should be false so
241   // set it so here and reset it to true only if the gc time
242   // limit is being exceeded as checked below.
243   *gc_overhead_limit_was_exceeded = false;
244 
245   HeapWord* result = young_gen()->allocate(size);
246 
247   uint loop_count = 0;
248   uint gc_count = 0;
249   uint gclocker_stalled_count = 0;
250 
251   while (result == NULL) {
252     // We don't want to have multiple collections for a single filled generation.
253     // To prevent this, each thread tracks the total_collections() value, and if
254     // the count has changed, does not do a new collection.
255     //
256     // The collection count must be read only while holding the heap lock. VM
257     // operations also hold the heap lock during collections. There is a lock
258     // contention case where thread A blocks waiting on the Heap_lock, while
259     // thread B is holding it doing a collection. When thread A gets the lock,
260     // the collection count has already changed. To prevent duplicate collections,
261     // The policy MUST attempt allocations during the same period it reads the
262     // total_collections() value!
263     {
264       MutexLocker ml(Heap_lock);
265       gc_count = total_collections();
266 
267       result = young_gen()->allocate(size);
268       if (result != NULL) {
269         return result;
270       }
271 
272       // If certain conditions hold, try allocating from the old gen.
273       result = mem_allocate_old_gen(size);
274       if (result != NULL) {
275         return result;
276       }
277 
278       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
279         return NULL;
280       }
281 
282       // Failed to allocate without a gc.
283       if (GCLocker::is_active_and_needs_gc()) {
284         // If this thread is not in a jni critical section, we stall
285         // the requestor until the critical section has cleared and
286         // GC allowed. When the critical section clears, a GC is
287         // initiated by the last thread exiting the critical section; so
288         // we retry the allocation sequence from the beginning of the loop,
289         // rather than causing more, now probably unnecessary, GC attempts.
290         JavaThread* jthr = JavaThread::current();
291         if (!jthr->in_critical()) {
292           MutexUnlocker mul(Heap_lock);
293           GCLocker::stall_until_clear();
294           gclocker_stalled_count += 1;
295           continue;
296         } else {
297           if (CheckJNICalls) {
298             fatal("Possible deadlock due to allocating while"
299                   " in jni critical section");
300           }
301           return NULL;
302         }
303       }
304     }
305 
306     if (result == NULL) {
307       // Generate a VM operation
308       VM_ParallelGCFailedAllocation op(size, gc_count);
309       VMThread::execute(&op);
310 
311       // Did the VM operation execute? If so, return the result directly.
312       // This prevents us from looping until time out on requests that can
313       // not be satisfied.
314       if (op.prologue_succeeded()) {
315         assert(is_in_or_null(op.result()), "result not in heap");
316 
317         // If GC was locked out during VM operation then retry allocation
318         // and/or stall as necessary.
319         if (op.gc_locked()) {
320           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
321           continue;  // retry and/or stall as necessary
322         }
323 
324         // Exit the loop if the gc time limit has been exceeded.
325         // The allocation must have failed above ("result" guarding
326         // this path is NULL) and the most recent collection has exceeded the
327         // gc overhead limit (although enough may have been collected to
328         // satisfy the allocation).  Exit the loop so that an out-of-memory
329         // will be thrown (return a NULL ignoring the contents of
330         // op.result()),
331         // but clear gc_overhead_limit_exceeded so that the next collection
332         // starts with a clean slate (i.e., forgets about previous overhead
333         // excesses).  Fill op.result() with a filler object so that the
334         // heap remains parsable.
335         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
336         const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
337 
338         if (limit_exceeded && softrefs_clear) {
339           *gc_overhead_limit_was_exceeded = true;
340           size_policy()->set_gc_overhead_limit_exceeded(false);
341           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
342           if (op.result() != NULL) {
343             CollectedHeap::fill_with_object(op.result(), size);
344           }
345           return NULL;
346         }
347 
348         return op.result();
349       }
350     }
351 
352     // The policy object will prevent us from looping forever. If the
353     // time spent in gc crosses a threshold, we will bail out.
354     loop_count++;
355     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
356         (loop_count % QueuedAllocationWarningCount == 0)) {
357       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
358       log_warning(gc)("\tsize=" SIZE_FORMAT, size);
359     }
360   }
361 
362   return result;
363 }
364 
365 // A "death march" is a series of ultra-slow allocations in which a full gc is
366 // done before each allocation, and after the full gc the allocation still
367 // cannot be satisfied from the young gen.  This routine detects that condition;
368 // it should be called after a full gc has been done and the allocation
369 // attempted from the young gen. The parameter 'addr' should be the result of
370 // that young gen allocation attempt.
371 void
372 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
373   if (addr != NULL) {
374     _death_march_count = 0;  // death march has ended
375   } else if (_death_march_count == 0) {
376     if (should_alloc_in_eden(size)) {
377       _death_march_count = 1;    // death march has started
378     }
379   }
380 }
381 
382 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
383   if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
384     // Size is too big for eden, or gc is locked out.
385     return old_gen()->allocate(size);
386   }
387 
388   // If a "death march" is in progress, allocate from the old gen a limited
389   // number of times before doing a GC.
390   if (_death_march_count > 0) {
391     if (_death_march_count < 64) {
392       ++_death_march_count;
393       return old_gen()->allocate(size);
394     } else {
395       _death_march_count = 0;
396     }
397   }
398   return NULL;
399 }
400 
401 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
402   if (UseParallelOldGC) {
403     // The do_full_collection() parameter clear_all_soft_refs
404     // is interpreted here as maximum_compaction which will
405     // cause SoftRefs to be cleared.
406     bool maximum_compaction = clear_all_soft_refs;
407     PSParallelCompact::invoke(maximum_compaction);
408   } else {
409     PSMarkSweep::invoke(clear_all_soft_refs);
410   }
411 }
412 
413 // Failed allocation policy. Must be called from the VM thread, and
414 // only at a safepoint! Note that this method has policy for allocation
415 // flow, and NOT collection policy. So we do not check for gc collection
416 // time over limit here, that is the responsibility of the heap specific
417 // collection methods. This method decides where to attempt allocations,
418 // and when to attempt collections, but no collection specific policy.
419 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
420   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
421   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
422   assert(!is_gc_active(), "not reentrant");
423   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
424 
425   // We assume that allocation in eden will fail unless we collect.
426 
427   // First level allocation failure, scavenge and allocate in young gen.
428   GCCauseSetter gccs(this, GCCause::_allocation_failure);
429   const bool invoked_full_gc = PSScavenge::invoke();
430   HeapWord* result = young_gen()->allocate(size);
431 
432   // Second level allocation failure.
433   //   Mark sweep and allocate in young generation.
434   if (result == NULL && !invoked_full_gc) {
435     do_full_collection(false);
436     result = young_gen()->allocate(size);
437   }
438 
439   death_march_check(result, size);
440 
441   // Third level allocation failure.
442   //   After mark sweep and young generation allocation failure,
443   //   allocate in old generation.
444   if (result == NULL) {
445     result = old_gen()->allocate(size);
446   }
447 
448   // Fourth level allocation failure. We're running out of memory.
449   //   More complete mark sweep and allocate in young generation.
450   if (result == NULL) {
451     do_full_collection(true);
452     result = young_gen()->allocate(size);
453   }
454 
455   // Fifth level allocation failure.
456   //   After more complete mark sweep, allocate in old generation.
457   if (result == NULL) {
458     result = old_gen()->allocate(size);
459   }
460 
461   return result;
462 }
463 
464 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
465   CollectedHeap::ensure_parsability(retire_tlabs);
466   young_gen()->eden_space()->ensure_parsability();
467 }
468 
469 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
470   return young_gen()->eden_space()->tlab_capacity(thr);
471 }
472 
473 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
474   return young_gen()->eden_space()->tlab_used(thr);
475 }
476 
477 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
478   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
479 }
480 
481 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
482   return young_gen()->allocate(size);
483 }
484 
485 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
486   CollectedHeap::accumulate_statistics_all_tlabs();
487 }
488 
489 void ParallelScavengeHeap::resize_all_tlabs() {
490   CollectedHeap::resize_all_tlabs();
491 }
492 
493 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
494   // We don't need barriers for stores to objects in the
495   // young gen and, a fortiori, for initializing stores to
496   // objects therein.
497   return is_in_young(new_obj);
498 }
499 
500 // This method is used by System.gc() and JVMTI.
501 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
502   assert(!Heap_lock->owned_by_self(),
503     "this thread should not own the Heap_lock");
504 
505   uint gc_count      = 0;
506   uint full_gc_count = 0;
507   {
508     MutexLocker ml(Heap_lock);
509     // This value is guarded by the Heap_lock
510     gc_count      = total_collections();
511     full_gc_count = total_full_collections();
512   }
513 
514   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
515   VMThread::execute(&op);
516 }
517 
518 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
519   young_gen()->object_iterate(cl);
520   old_gen()->object_iterate(cl);
521 }
522 
523 
524 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
525   if (young_gen()->is_in_reserved(addr)) {
526     assert(young_gen()->is_in(addr),
527            "addr should be in allocated part of young gen");
528     // called from os::print_location by find or VMError
529     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
530     Unimplemented();
531   } else if (old_gen()->is_in_reserved(addr)) {
532     assert(old_gen()->is_in(addr),
533            "addr should be in allocated part of old gen");
534     return old_gen()->start_array()->object_start((HeapWord*)addr);
535   }
536   return 0;
537 }
538 
539 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
540   return oop(addr)->size();
541 }
542 
543 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
544   return block_start(addr) == addr;
545 }
546 
547 jlong ParallelScavengeHeap::millis_since_last_gc() {
548   return UseParallelOldGC ?
549     PSParallelCompact::millis_since_last_gc() :
550     PSMarkSweep::millis_since_last_gc();
551 }
552 
553 void ParallelScavengeHeap::prepare_for_verify() {
554   ensure_parsability(false);  // no need to retire TLABs for verification
555 }
556 
557 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
558   PSOldGen* old = old_gen();
559   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
560   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
561   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
562 
563   PSYoungGen* young = young_gen();
564   VirtualSpaceSummary young_summary(young->reserved().start(),
565     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
566 
567   MutableSpace* eden = young_gen()->eden_space();
568   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
569 
570   MutableSpace* from = young_gen()->from_space();
571   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
572 
573   MutableSpace* to = young_gen()->to_space();
574   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
575 
576   VirtualSpaceSummary heap_summary = create_heap_space_summary();
577   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
578 }
579 
580 void ParallelScavengeHeap::print_on(outputStream* st) const {
581   young_gen()->print_on(st);
582   old_gen()->print_on(st);
583   MetaspaceAux::print_on(st);
584 }
585 
586 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
587   this->CollectedHeap::print_on_error(st);
588 
589   if (UseParallelOldGC) {
590     st->cr();
591     PSParallelCompact::print_on_error(st);
592   }
593 }
594 
595 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
596   PSScavenge::gc_task_manager()->threads_do(tc);
597 }
598 
599 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
600   PSScavenge::gc_task_manager()->print_threads_on(st);
601 }
602 
603 void ParallelScavengeHeap::print_tracing_info() const {
604   AdaptiveSizePolicyOutput::print();
605   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
606   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
607       UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds());
608 }
609 
610 
611 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
612   // Why do we need the total_collections()-filter below?
613   if (total_collections() > 0) {
614     log_debug(gc, verify)("Tenured");
615     old_gen()->verify();
616 
617     log_debug(gc, verify)("Eden");
618     young_gen()->verify();
619   }
620 }
621 
622 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
623   const PSHeapSummary& heap_summary = create_ps_heap_summary();
624   gc_tracer->report_gc_heap_summary(when, heap_summary);
625 
626   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
627   gc_tracer->report_metaspace_summary(when, metaspace_summary);
628 }
629 
630 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
631   CollectedHeap* heap = Universe::heap();
632   assert(heap != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
633   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Not a ParallelScavengeHeap");
634   return (ParallelScavengeHeap*)heap;
635 }
636 
637 // Before delegating the resize to the young generation,
638 // the reserved space for the young and old generations
639 // may be changed to accommodate the desired resize.
640 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
641     size_t survivor_size) {
642   if (UseAdaptiveGCBoundary) {
643     if (size_policy()->bytes_absorbed_from_eden() != 0) {
644       size_policy()->reset_bytes_absorbed_from_eden();
645       return;  // The generation changed size already.
646     }
647     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
648   }
649 
650   // Delegate the resize to the generation.
651   _young_gen->resize(eden_size, survivor_size);
652 }
653 
654 // Before delegating the resize to the old generation,
655 // the reserved space for the young and old generations
656 // may be changed to accommodate the desired resize.
657 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
658   if (UseAdaptiveGCBoundary) {
659     if (size_policy()->bytes_absorbed_from_eden() != 0) {
660       size_policy()->reset_bytes_absorbed_from_eden();
661       return;  // The generation changed size already.
662     }
663     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
664   }
665 
666   // Delegate the resize to the generation.
667   _old_gen->resize(desired_free_space);
668 }
669 
670 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
671   // nothing particular
672 }
673 
674 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
675   // nothing particular
676 }
677 
678 #ifndef PRODUCT
679 void ParallelScavengeHeap::record_gen_tops_before_GC() {
680   if (ZapUnusedHeapArea) {
681     young_gen()->record_spaces_top();
682     old_gen()->record_spaces_top();
683   }
684 }
685 
686 void ParallelScavengeHeap::gen_mangle_unused_area() {
687   if (ZapUnusedHeapArea) {
688     young_gen()->eden_space()->mangle_unused_area();
689     young_gen()->to_space()->mangle_unused_area();
690     young_gen()->from_space()->mangle_unused_area();
691     old_gen()->object_space()->mangle_unused_area();
692   }
693 }
694 #endif
695 
696 bool ParallelScavengeHeap::is_scavengable(oop obj) {
697   return is_in_young(obj);
698 }
699 
700 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
701   CodeCache::register_scavenge_root_nmethod(nm);
702 }
703 
704 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
705   CodeCache::verify_scavenge_root_nmethod(nm);
706 }
707 
708 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
709   GrowableArray<GCMemoryManager*> memory_managers(2);
710   memory_managers.append(_young_manager);
711   memory_managers.append(_old_manager);
712   return memory_managers;
713 }
714 
715 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
716   GrowableArray<MemoryPool*> memory_pools(3);
717   memory_pools.append(_eden_pool);
718   memory_pools.append(_survivor_pool);
719   memory_pools.append(_old_pool);
720   return memory_pools;
721 }
722